code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
snake_case_ = None
snake_case_ = logging.get_logger(__name__)
snake_case_ = "▁"
snake_case_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
snake_case_ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
snake_case_ = {
"google/pegasus-xsum": 512,
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PegasusTokenizer
__UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self :Any , lowercase_ :Any=None , lowercase_ :List[Any]=None , lowercase_ :Dict="<pad>" , lowercase_ :List[Any]="</s>" , lowercase_ :Dict="<unk>" , lowercase_ :Tuple="<mask_2>" , lowercase_ :Union[str, Any]="<mask_1>" , lowercase_ :Union[str, Any]=None , lowercase_ :List[str]=1_03 , **lowercase_ :str , ) -> List[str]:
UpperCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(SCREAMING_SNAKE_CASE_ )}, but is"""
f""" {type(SCREAMING_SNAKE_CASE_ )}""" )
UpperCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(SCREAMING_SNAKE_CASE_ ) , self.offset - 1 )
]
if len(set(SCREAMING_SNAKE_CASE_ ) ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
UpperCAmelCase = additional_special_tokens_extended
else:
UpperCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , mask_token_sent=SCREAMING_SNAKE_CASE_ , offset=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
def UpperCAmelCase__ ( self :str , lowercase_ :Optional[Any] ) -> Optional[Any]:
UpperCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Optional[int] , lowercase_ :List[Any] = None , lowercase_ :Any = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(SCREAMING_SNAKE_CASE_ )
elif token_ids_a is None:
return self._special_token_mask(SCREAMING_SNAKE_CASE_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int , lowercase_ :int=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Any , lowercase_ :Union[str, Any] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 78 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ : str = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowercase__ : str = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'whisper'
_snake_case = ['past_key_values']
_snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE_=51865 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=50257 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1500 , SCREAMING_SNAKE_CASE_=448 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[220, 50256] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = num_mel_bins
__UpperCamelCase = d_model
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = use_cache
__UpperCamelCase = encoder_layers
__UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase = max_source_positions
__UpperCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase = classifier_proj_size
__UpperCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
__UpperCamelCase = mask_feature_min_masks
__UpperCamelCase = median_filter_width
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def A__ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__UpperCamelCase = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
__UpperCamelCase = {0: '''batch'''}
else:
__UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' )
return common_inputs
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 22050 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 220 , )-> Mapping[str, Any]:
'''simple docstring'''
__UpperCamelCase = OrderedDict()
__UpperCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , )
__UpperCamelCase = encoder_inputs['''input_features'''].shape[2]
__UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__UpperCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = encoder_inputs.pop('''input_features''' )
__UpperCamelCase = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
__UpperCamelCase = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def A__ ( self )-> float:
'''simple docstring'''
return 1E-3
| 328 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def lowerCamelCase ( self , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = np.random.RandomState(lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = 3 * [inputs['prompt']]
# forward
_snake_case = pipe(**lowerCAmelCase_ )
_snake_case = output.images[0, -3:, -3:, -1]
_snake_case = self.get_dummy_inputs()
_snake_case = 3 * [inputs.pop('prompt' )]
_snake_case = pipe.tokenizer(
lowerCAmelCase_ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors='np' , )
_snake_case = text_inputs['input_ids']
_snake_case = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_snake_case = prompt_embeds
# forward
_snake_case = pipe(**lowerCAmelCase_ )
_snake_case = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = 3 * ['this is a negative prompt']
_snake_case = negative_prompt
_snake_case = 3 * [inputs['prompt']]
# forward
_snake_case = pipe(**lowerCAmelCase_ )
_snake_case = output.images[0, -3:, -3:, -1]
_snake_case = self.get_dummy_inputs()
_snake_case = 3 * [inputs.pop('prompt' )]
_snake_case = []
for p in [prompt, negative_prompt]:
_snake_case = pipe.tokenizer(
lowerCAmelCase_ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors='np' , )
_snake_case = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_snake_case , _snake_case = embeds
# forward
_snake_case = pipe(**lowerCAmelCase_ )
_snake_case = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ort.SessionOptions()
_snake_case = False
return options
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
_snake_case = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='np' )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_snake_case = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'open neural network exchange'
_snake_case = np.random.RandomState(0 )
_snake_case = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase_ , output_type='np' )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_snake_case = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'open neural network exchange'
_snake_case = np.random.RandomState(0 )
_snake_case = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase_ , output_type='np' )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 0
def test_callback_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_snake_case = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
_snake_case = False
_snake_case = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'Andromeda galaxy in a bottle'
_snake_case = np.random.RandomState(0 )
pipe(
prompt=lowerCAmelCase_ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert pipe.safety_checker is None
_snake_case = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
_snake_case = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_snake_case = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
| 160 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = CanineTokenizer
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_snake_case = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase ( self ):
"""simple docstring"""
return CanineTokenizer.from_pretrained('google/canine-s' )
def lowerCamelCase ( self , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
_snake_case = 10_24
return tokenizer
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.canine_tokenizer
_snake_case = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
_snake_case = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.canine_tokenizer
_snake_case = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertIn('token_type_ids' , lowerCAmelCase_ )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.canine_tokenizer
_snake_case = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
_snake_case = tokenizer(
text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_snake_case = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
shutil.rmtree(lowerCAmelCase_ )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_snake_case = chr(0XE_0_0_7 )
additional_special_tokens.append(lowerCAmelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_snake_case = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertIn(lowerCAmelCase_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case , _snake_case = self.get_clean_sequence(lowerCAmelCase_ )
# a special token for Canine can be defined as follows:
_snake_case = 0XE_0_0_5
_snake_case = chr(lowerCAmelCase_ )
tokenizer.add_special_tokens({'cls_token': special_token} )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
_snake_case = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , input_encoded + special_token_id )
_snake_case = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case = chr(0XE_0_0_5 )
_snake_case = chr(0XE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} )
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(token_a[0] , lowerCAmelCase_ )
self.assertEqual(token_a[0] , lowerCAmelCase_ )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
_snake_case = 0XE_0_0_6
_snake_case = chr(lowerCAmelCase_ )
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCAmelCase_ )
tokenizer.from_pretrained(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowerCAmelCase_ )
# a special token for Canine can be defined as follows:
_snake_case = 0XE_0_0_6
_snake_case = chr(lowerCAmelCase_ )
_snake_case = [new_token_a]
_snake_case = [new_token_a]
with open(os.path.join(lowerCAmelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(lowerCAmelCase_ , extra_ids=0 )
self.assertIn(lowerCAmelCase_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_snake_case = 0XE_0_0_7
_snake_case = chr(lowerCAmelCase_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = [AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )]
_snake_case = tokenizer_class.from_pretrained(
lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , extra_ids=0 )
self.assertIn(lowerCAmelCase_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case = 'hello world'
if self.space_between_special_tokens:
_snake_case = '[CLS] hello world [SEP]'
else:
_snake_case = input
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.decode(lowerCAmelCase_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCAmelCase_ , [output, output.lower()] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_snake_case = 'a'
_snake_case = ord(lowerCAmelCase_ )
for attr in attributes_list:
setattr(lowerCAmelCase_ , attr + '_id' , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + '_id' ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , attr + '_id' , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + '_id' ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens_ids' ) , [] )
_snake_case = 0XE_0_0_6
_snake_case = chr(lowerCAmelCase_ )
setattr(lowerCAmelCase_ , 'additional_special_tokens_ids' , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens' ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens_ids' ) , [additional_special_token_id] )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
| 160 | 1 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase_ = 'src/diffusers'
UpperCamelCase_ = '.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase_ = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase_ = spec.loader.load_module()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
return line.startswith(UpperCAmelCase ) or len(UpperCAmelCase ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , UpperCAmelCase ) is not None
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = object_name.split("." )
a_ = 0
# First let's find the module where our object lives.
a_ = parts[i]
while i < len(UpperCAmelCase ) and not os.path.isfile(os.path.join(UpperCAmelCase , F'''{module}.py''' ) ):
i += 1
if i < len(UpperCAmelCase ):
a_ = os.path.join(UpperCAmelCase , parts[i] )
if i >= len(UpperCAmelCase ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(UpperCAmelCase , F'''{module}.py''' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
a_ = f.readlines()
# Now let's find the class / func in the code!
a_ = ""
a_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
a_ = line_index
while line_index < len(UpperCAmelCase ) and _should_continue(lines[line_index] , UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a_ = lines[start_index:line_index]
return "".join(UpperCAmelCase )
UpperCamelCase_ = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase_ = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase_ = re.compile(R'<FILL\s+[^>]*>')
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = code.split("\n" )
a_ = 0
while idx < len(UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = len(get_indent(UpperCAmelCase ) ) > 0
if has_indent:
a_ = F'''class Bla:\n{code}'''
a_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase )
a_ = black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
a_ , a_ = style_docstrings_in_code(UpperCAmelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->str:
"""simple docstring"""
with open(UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
a_ = f.readlines()
a_ = []
a_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase ):
a_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
a_ , a_ , a_ = search.groups()
a_ = find_code_in_diffusers(UpperCAmelCase )
a_ = get_indent(UpperCAmelCase )
a_ = line_index + 1 if indent == theoretical_indent else line_index + 2
a_ = theoretical_indent
a_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
a_ = True
while line_index < len(UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase ):
break
a_ = lines[line_index]
a_ = _should_continue(UpperCAmelCase , UpperCAmelCase ) and re.search(F'''^{indent}# End copy''' , UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a_ = lines[start_index:line_index]
a_ = "".join(UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
a_ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(UpperCAmelCase ) is None]
a_ = "\n".join(UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase ) > 0:
a_ = replace_pattern.replace("with" , "" ).split("," )
a_ = [_re_replace_pattern.search(UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
a_ , a_ , a_ = pattern.groups()
a_ = re.sub(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if option.strip() == "all-casing":
a_ = re.sub(obja.lower() , obja.lower() , UpperCAmelCase )
a_ = re.sub(obja.upper() , obja.upper() , UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
a_ = blackify(lines[start_index - 1] + theoretical_code )
a_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
a_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
a_ = start_index + 1
if overwrite and len(UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase )
return diffs
def UpperCamelCase ( UpperCAmelCase = False ) ->int:
"""simple docstring"""
a_ = glob.glob(os.path.join(UpperCAmelCase , "**/*.py" ) , recursive=UpperCAmelCase )
a_ = []
for filename in all_files:
a_ = is_copy_consistent(UpperCAmelCase , UpperCAmelCase )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(UpperCAmelCase ) > 0:
a_ = "\n".join(UpperCAmelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase_ = parser.parse_args()
check_copies(args.fix_and_overwrite) | 243 |
"""simple docstring"""
import baseaa
def UpperCamelCase ( UpperCAmelCase ) ->bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode("utf-8" ) )
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
return baseaa.baadecode(UpperCAmelCase ).decode("utf-8" )
if __name__ == "__main__":
UpperCamelCase_ = 'Hello World!'
UpperCamelCase_ = baseaa_encode(test)
print(encoded)
UpperCamelCase_ = baseaa_decode(encoded)
print(decoded) | 243 | 1 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : List[str] = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "encodec"
def __init__( self : Any , lowerCamelCase : Optional[int]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCamelCase : List[str]=24000 , lowerCamelCase : int=1 , lowerCamelCase : Optional[int]=False , lowerCamelCase : Dict=None , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=128 , lowerCamelCase : Optional[int]=32 , lowerCamelCase : List[str]=1 , lowerCamelCase : str=[8, 5, 4, 2] , lowerCamelCase : List[str]="weight_norm" , lowerCamelCase : Any=7 , lowerCamelCase : Tuple=7 , lowerCamelCase : int=3 , lowerCamelCase : int=2 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[Any]="reflect" , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : int=1.0 , lowerCamelCase : Optional[Any]=1024 , lowerCamelCase : Optional[Any]=None , lowerCamelCase : str=True , **lowerCamelCase : Dict , ) -> Any:
__snake_case : Tuple = target_bandwidths
__snake_case : Union[str, Any] = sampling_rate
__snake_case : Union[str, Any] = audio_channels
__snake_case : Dict = normalize
__snake_case : List[Any] = chunk_length_s
__snake_case : Tuple = overlap
__snake_case : Optional[int] = hidden_size
__snake_case : List[Any] = num_filters
__snake_case : Union[str, Any] = num_residual_layers
__snake_case : Optional[int] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Optional[int] = kernel_size
__snake_case : Dict = last_kernel_size
__snake_case : Tuple = residual_kernel_size
__snake_case : List[Any] = dilation_growth_rate
__snake_case : Optional[int] = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : Union[str, Any] = compress
__snake_case : Union[str, Any] = num_lstm_layers
__snake_case : int = trim_right_ratio
__snake_case : Tuple = codebook_size
__snake_case : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**lowerCamelCase )
@property
def __snake_case ( self : int ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __snake_case ( self : Optional[Any] ) -> int:
__snake_case : Union[str, Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __snake_case ( self : Optional[Any] ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 134 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : int = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = "beit"
def __init__( self : Union[str, Any] , lowerCamelCase : Any=8192 , lowerCamelCase : Dict=768 , lowerCamelCase : int=12 , lowerCamelCase : Optional[Any]=12 , lowerCamelCase : List[str]=3072 , lowerCamelCase : Tuple="gelu" , lowerCamelCase : Union[str, Any]=0.0 , lowerCamelCase : int=0.0 , lowerCamelCase : Dict=0.02 , lowerCamelCase : List[str]=1E-12 , lowerCamelCase : Optional[Any]=224 , lowerCamelCase : Optional[int]=16 , lowerCamelCase : Any=3 , lowerCamelCase : Optional[int]=False , lowerCamelCase : Any=False , lowerCamelCase : Optional[Any]=False , lowerCamelCase : int=False , lowerCamelCase : Any=0.1 , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Optional[int]=True , lowerCamelCase : int=[3, 5, 7, 11] , lowerCamelCase : str=[1, 2, 3, 6] , lowerCamelCase : int=True , lowerCamelCase : List[Any]=0.4 , lowerCamelCase : int=256 , lowerCamelCase : str=1 , lowerCamelCase : List[str]=False , lowerCamelCase : List[str]=255 , **lowerCamelCase : Dict , ) -> int:
super().__init__(**lowerCamelCase )
__snake_case : Any = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = initializer_range
__snake_case : str = layer_norm_eps
__snake_case : Optional[Any] = image_size
__snake_case : List[str] = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : Any = use_mask_token
__snake_case : List[str] = use_absolute_position_embeddings
__snake_case : List[Any] = use_relative_position_bias
__snake_case : str = use_shared_relative_position_bias
__snake_case : str = layer_scale_init_value
__snake_case : Any = drop_path_rate
__snake_case : int = use_mean_pooling
# decode head attributes (semantic segmentation)
__snake_case : Optional[Any] = out_indices
__snake_case : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
__snake_case : int = use_auxiliary_head
__snake_case : int = auxiliary_loss_weight
__snake_case : Optional[int] = auxiliary_channels
__snake_case : int = auxiliary_num_convs
__snake_case : str = auxiliary_concat_input
__snake_case : List[str] = semantic_loss_ignore_index
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = version.parse("1.11" )
@property
def __snake_case ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __snake_case ( self : str ) -> float:
return 1E-4
| 134 | 1 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase_ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
lowerCAmelCase_ : Union[str, Any] = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase_ : Union[str, Any] = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase_ : Any = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase_ : Optional[int] = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase_ : Union[str, Any] = re.compile(R'\[([^\]]+)\]')
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Any:
_a = _re_indent.search(lowercase )
return "" if search is None else search.groups()[0]
def _lowerCamelCase ( lowercase : Dict , lowercase : Union[str, Any]="" , lowercase : Tuple=None , lowercase : List[Any]=None ) -> str:
_a = 0
_a = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(lowercase ):
index += 1
_a = ["\n".join(lines[:index] )]
else:
_a = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_a = [lines[index]]
index += 1
while index < len(lowercase ) and (end_prompt is None or not lines[index].startswith(lowercase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(lowercase ) )
if index < len(lowercase ) - 1:
_a = [lines[index + 1]]
index += 1
else:
_a = []
else:
blocks.append("\n".join(lowercase ) )
_a = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase ) > 0:
blocks.append("\n".join(lowercase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def _lowerCamelCase ( lowercase : str ) -> int:
def _inner(lowercase : Dict ):
return key(lowercase ).lower().replace("_" , "" )
return _inner
def _lowerCamelCase ( lowercase : str , lowercase : Tuple=None ) -> Optional[int]:
# If no key is provided, we use a noop.
def noop(lowercase : List[str] ):
return x
if key is None:
_a = noop
# Constants are all uppercase, they go first.
_a = [obj for obj in objects if key(lowercase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_a = [obj for obj in objects if key(lowercase )[0].isupper() and not key(lowercase ).isupper()]
# Functions begin with a lowercase, they go last.
_a = [obj for obj in objects if not key(lowercase )[0].isupper()]
_a = ignore_underscore(lowercase )
return sorted(lowercase , key=lowercase ) + sorted(lowercase , key=lowercase ) + sorted(lowercase , key=lowercase )
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> str:
# This inner function sort imports between [ ].
def _replace(lowercase : List[str] ):
_a = match.groups()[0]
if "," not in imports:
return F'[{imports}]'
_a = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_a = keys[:-1]
return "[" + ", ".join([F'"{k}"' for k in sort_objects(lowercase )] ) + "]"
_a = import_statement.split("\n" )
if len(lowercase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_a = 2 if lines[1].strip() == "[" else 1
_a = [(i, _re_strip_line.search(lowercase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_a = sort_objects(lowercase , key=lambda lowercase : x[1] )
_a = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_a = _re_bracket_content.sub(_replace , lines[1] )
else:
_a = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_a = keys[:-1]
_a = get_indent(lines[1] ) + ", ".join([F'"{k}"' for k in sort_objects(lowercase )] )
return "\n".join(lowercase )
else:
# Finally we have to deal with imports fitting on one line
_a = _re_bracket_content.sub(_replace , lowercase )
return import_statement
def _lowerCamelCase ( lowercase : Tuple , lowercase : List[Any]=True ) -> str:
with open(lowercase , encoding="utf-8" ) as f:
_a = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_a = split_code_in_indented_blocks(
lowercase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_a = main_blocks[block_idx]
_a = block.split("\n" )
# Get to the start of the imports.
_a = 0
while line_idx < len(lowercase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_a = len(lowercase )
else:
line_idx += 1
if line_idx >= len(lowercase ):
continue
# Ignore beginning and last line: they don't contain anything.
_a = "\n".join(block_lines[line_idx:-1] )
_a = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_a = split_code_in_indented_blocks(lowercase , indent_level=lowercase )
# We have two categories of import key: list or _import_structure[key].append/extend
_a = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_a = [(pattern.search(lowercase ).groups()[0] if pattern.search(lowercase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_a = [(i, key) for i, key in enumerate(lowercase ) if key is not None]
_a = [x[0] for x in sorted(lowercase , key=lambda lowercase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_a = 0
_a = []
for i in range(len(lowercase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_a = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowercase )
count += 1
# And we put our main block back together with its first and last line.
_a = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase ):
if check_only:
return True
else:
print(F'Overwriting {file}.' )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowercase ) )
def _lowerCamelCase ( lowercase : List[str]=True ) -> List[str]:
_a = []
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
_a = sort_imports(os.path.join(lowercase , "__init__.py" ) , check_only=lowercase )
if result:
_a = [os.path.join(lowercase , "__init__.py" )]
if len(lowercase ) > 0:
raise ValueError(F'Would overwrite {len(lowercase )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase_ : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 63 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = AutoencoderKL
__lowerCamelCase = 'sample'
__lowerCamelCase = 1e-2
@property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = 4
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase )
return {"sample": image}
@property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
A__ = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ , A__ = self.prepare_init_args_and_inputs_for_common()
A__ = self.model_class(**lowercase )
model.to(lowercase )
assert not model.is_gradient_checkpointing and model.training
A__ = model(**lowercase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
A__ = torch.randn_like(lowercase )
A__ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
A__ = self.model_class(**lowercase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowercase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
A__ = model_a(**lowercase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
A__ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
A__ = dict(model.named_parameters() )
A__ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ , A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowercase )
A__ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
A__ = model.to(lowercase )
model.eval()
if torch_device == "mps":
A__ = torch.manual_seed(0 )
else:
A__ = torch.Generator(device=lowercase ).manual_seed(0 )
A__ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
A__ = image.to(lowercase )
with torch.no_grad():
A__ = model(lowercase , sample_posterior=lowercase , generator=lowercase ).sample
A__ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
A__ = torch.tensor(
[
-4.00_78e-01,
-3.83_23e-04,
-1.26_81e-01,
-1.14_62e-01,
2.00_95e-01,
1.08_93e-01,
-8.82_47e-02,
-3.03_61e-01,
-9.86_44e-03,
] )
elif torch_device == "cpu":
A__ = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
A__ = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2 ) )
@slow
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase , lowercase ) -> str:
'''simple docstring'''
return F'gaussian_noise_s={seed}_shape={"_".join([str(lowercase ) for s in shape] )}.npy'
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self , lowercase=0 , lowercase=(4, 3, 512, 512) , lowercase=False ) -> Optional[int]:
'''simple docstring'''
A__ = torch.floataa if fpaa else torch.floataa
A__ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) ).to(lowercase ).to(lowercase )
return image
def UpperCamelCase ( self , lowercase="CompVis/stable-diffusion-v1-4" , lowercase=False ) -> Any:
'''simple docstring'''
A__ = "fp16" if fpaa else None
A__ = torch.floataa if fpaa else torch.floataa
A__ = AutoencoderKL.from_pretrained(
lowercase , subfolder="vae" , torch_dtype=lowercase , revision=lowercase , )
model.to(lowercase ).eval()
return model
def UpperCamelCase ( self , lowercase=0 ) -> List[str]:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(lowercase )
return torch.Generator(device=lowercase ).manual_seed(lowercase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
A__ = self.get_sd_vae_model()
A__ = self.get_sd_image(lowercase )
A__ = self.get_generator(lowercase )
with torch.no_grad():
A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample
assert sample.shape == image.shape
A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowercase , lowercase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
A__ = self.get_sd_vae_model(fpaa=lowercase )
A__ = self.get_sd_image(lowercase , fpaa=lowercase )
A__ = self.get_generator(lowercase )
with torch.no_grad():
A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample
assert sample.shape == image.shape
A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
A__ = torch.tensor(lowercase )
assert torch_all_close(lowercase , lowercase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
A__ = self.get_sd_vae_model()
A__ = self.get_sd_image(lowercase )
with torch.no_grad():
A__ = model(lowercase ).sample
assert sample.shape == image.shape
A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowercase , lowercase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase ( self , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
A__ = self.get_sd_vae_model()
A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) )
with torch.no_grad():
A__ = model.decode(lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
A__ = sample[-1, -2:, :2, -2:].flatten().cpu()
A__ = torch.tensor(lowercase )
assert torch_all_close(lowercase , lowercase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.get_sd_vae_model(fpaa=lowercase )
A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase )
with torch.no_grad():
A__ = model.decode(lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
A__ = torch.tensor(lowercase )
assert torch_all_close(lowercase , lowercase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def UpperCamelCase ( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = self.get_sd_vae_model(fpaa=lowercase )
A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase )
with torch.no_grad():
A__ = model.decode(lowercase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
A__ = model.decode(lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase , lowercase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
A__ = self.get_sd_vae_model()
A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) )
with torch.no_grad():
A__ = model.decode(lowercase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
A__ = model.decode(lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase , lowercase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def UpperCamelCase ( self , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = self.get_sd_vae_model()
A__ = self.get_sd_image(lowercase )
A__ = self.get_generator(lowercase )
with torch.no_grad():
A__ = model.encode(lowercase ).latent_dist
A__ = dist.sample(generator=lowercase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
A__ = sample[0, -1, -3:, -3:].flatten().cpu()
A__ = torch.tensor(lowercase )
A__ = 3e-3 if torch_device != "mps" else 1e-2
assert torch_all_close(lowercase , lowercase , atol=lowercase )
| 68 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
lowerCAmelCase__ = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
lowerCAmelCase__ = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def snake_case_ ( A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = set()
_lowerCamelCase : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase : Tuple = char
_lowerCamelCase : Optional[Any] = set(A_ )
return pairs
class __snake_case ( _lowercase):
snake_case__ : str = VOCAB_FILES_NAMES
snake_case__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple="<s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : str="<unk>" , __lowerCAmelCase : List[str]="<pad>" , __lowerCAmelCase : Any="<mask>" , **__lowerCAmelCase : List[Any] , ):
"""simple docstring"""
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : str = vocab_file
_lowerCamelCase : List[str] = merges_file
_lowerCamelCase : Tuple = {}
_lowerCamelCase : int = 0
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : str = 2
_lowerCamelCase : int = 3
self.add_from_file(__lowerCAmelCase )
_lowerCamelCase : Any = {v: k for k, v in self.encoder.items()}
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
_lowerCamelCase : str = merges_handle.read().split('''\n''' )[:-1]
_lowerCamelCase : str = [tuple(merge.split()[:-1] ) for merge in merges]
_lowerCamelCase : Tuple = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : Optional[Any] = {}
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : Optional[Any] = [self.cls_token_id]
_lowerCamelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Dict ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_lowerCamelCase : Dict = tuple(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_lowerCamelCase : Optional[Any] = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_lowerCamelCase : List[Any] = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCamelCase , _lowerCamelCase : Any = bigram
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : str = 0
while i < len(__lowerCAmelCase ):
try:
_lowerCamelCase : Optional[Any] = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCamelCase : List[Any] = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCamelCase : Any = tuple(__lowerCAmelCase )
_lowerCamelCase : List[Any] = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_lowerCamelCase : int = get_pairs(__lowerCAmelCase )
_lowerCamelCase : int = '''@@ '''.join(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = word[:-4]
_lowerCamelCase : List[Any] = word
return word
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = []
_lowerCamelCase : Optional[Any] = re.findall(R'''\S+\n?''' , __lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict ):
"""simple docstring"""
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int ):
"""simple docstring"""
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = ''' '''.join(__lowerCAmelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : List[str] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : Optional[Any] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.merges_file , __lowerCAmelCase )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
_lowerCamelCase : List[str] = f.readlines()
for lineTmp in lines:
_lowerCamelCase : str = lineTmp.strip()
_lowerCamelCase : Optional[Any] = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
_lowerCamelCase : int = line[:idx]
_lowerCamelCase : Any = len(self.encoder )
| 175 |
"""simple docstring"""
import argparse
lowerCAmelCase__ = '''docs/source/_static/js/custom.js'''
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
with open(A_, encoding='''utf-8''', newline='''\n''' ) as f:
_lowerCamelCase : int = f.readlines()
_lowerCamelCase : List[str] = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
_lowerCamelCase : List[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(A_, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
lowerCAmelCase__ = parser.parse_args()
update_custom_js(args.version)
| 175 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
A : Union[str, Any] = logging.getLogger()
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , "all_results.json" )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__UpperCamelCase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
A : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self : List[str] ) -> Union[str, Any]:
import xla_spawn
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(__magic_name__ , "argv" , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = time()
xla_spawn.main()
SCREAMING_SNAKE_CASE_ = time()
SCREAMING_SNAKE_CASE_ = get_results(__magic_name__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def __A ( self : Union[str, Any] ) -> Any:
import xla_spawn
SCREAMING_SNAKE_CASE_ = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(__magic_name__ , "argv" , __magic_name__ ):
xla_spawn.main()
| 118 | import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , __magic_name__ : Any , __magic_name__ : List[Any]=13 , __magic_name__ : List[Any]=2 , __magic_name__ : Tuple=24 , __magic_name__ : List[str]=16 , __magic_name__ : Dict=True , __magic_name__ : List[Any]=True , __magic_name__ : Optional[int]=32 , __magic_name__ : Tuple=5 , __magic_name__ : int=4 , __magic_name__ : Tuple=37 , __magic_name__ : List[str]="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Union[str, Any]=10 , __magic_name__ : Tuple=0.02 , __magic_name__ : Tuple=None , __magic_name__ : Any=2 , __magic_name__ : Dict=2 , ) -> int:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = num_mel_bins
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = frequency_stride
SCREAMING_SNAKE_CASE_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE_ = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE_ = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE_ = num_patches + 2
def __A ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_values, labels
def __A ( self : Any ) -> Dict:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def __A ( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = ASTModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"input_values": input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Tuple ) -> Tuple:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __A ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = ASTModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __A ( self : Union[str, Any] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def __A ( self : Optional[Any] ) -> Tuple:
pass
def __A ( self : int ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __A ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["input_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@slow
def __A ( self : int ) -> int:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = ASTModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torchaudio.load(__UpperCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[Any] ) -> List[Any]:
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def __A ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.default_feature_extractor
SCREAMING_SNAKE_CASE_ = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_feature_extractor
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_audio()
SCREAMING_SNAKE_CASE_ = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE_ = feature_extractor(__magic_name__ , sampling_rate=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 118 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = (PNDMScheduler,)
lowerCamelCase_ = (('''num_inference_steps''', 5_0),)
def lowerCAmelCase_ ( self , **lowercase ):
"""simple docstring"""
A_ : int = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowercase )
return config
def lowerCAmelCase_ ( self , lowercase=0 , **lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = dict(self.forward_default_kwargs )
A_ : int = kwargs.pop('num_inference_steps' , lowercase )
A_ : int = self.dummy_sample
A_ : Optional[Any] = 0.1 * sample
A_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ : Union[str, Any] = self.get_scheduler_config(**lowercase )
A_ : Any = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
A_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
A_ : Optional[int] = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
A_ : str = dummy_past_residuals[:]
A_ : Tuple = scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A_ : int = new_scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A_ : Tuple = scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A_ : Optional[Any] = new_scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self , lowercase=0 , **lowercase ):
"""simple docstring"""
A_ : Dict = dict(self.forward_default_kwargs )
A_ : Optional[Any] = kwargs.pop('num_inference_steps' , lowercase )
A_ : List[str] = self.dummy_sample
A_ : Union[str, Any] = 0.1 * sample
A_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ : List[Any] = self.get_scheduler_config()
A_ : Any = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
A_ : Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
A_ : Any = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
A_ : Union[str, Any] = dummy_past_residuals[:]
A_ : Any = scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A_ : List[Any] = new_scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A_ : List[Any] = scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A_ : str = new_scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self , **lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config(**lowercase )
A_ : Optional[int] = scheduler_class(**lowercase )
A_ : str = 1_0
A_ : str = self.dummy_model()
A_ : Any = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.prk_timesteps ):
A_ : Optional[Any] = model(lowercase , lowercase )
A_ : Any = scheduler.step_prk(lowercase , lowercase , lowercase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A_ : List[str] = model(lowercase , lowercase )
A_ : List[str] = scheduler.step_plms(lowercase , lowercase , lowercase ).prev_sample
return sample
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = dict(self.forward_default_kwargs )
A_ : Union[str, Any] = kwargs.pop('num_inference_steps' , lowercase )
for scheduler_class in self.scheduler_classes:
A_ : Any = self.get_scheduler_config()
A_ : Any = scheduler_class(**lowercase )
A_ : Any = self.dummy_sample
A_ : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase , 'set_timesteps' ):
scheduler.set_timesteps(lowercase )
elif num_inference_steps is not None and not hasattr(lowercase , 'set_timesteps' ):
A_ : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ : int = dummy_past_residuals[:]
A_ : int = scheduler.step_prk(lowercase , 0 , lowercase , **lowercase ).prev_sample
A_ : str = scheduler.step_prk(lowercase , 1 , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ : Tuple = scheduler.step_plms(lowercase , 0 , lowercase , **lowercase ).prev_sample
A_ : int = scheduler.step_plms(lowercase , 1 , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase )
A_ : List[str] = self.scheduler_classes[0]
A_ : str = self.get_scheduler_config(steps_offset=1 )
A_ : Dict = scheduler_class(**lowercase )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase , beta_end=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = 2_7
for scheduler_class in self.scheduler_classes:
A_ : List[Any] = self.dummy_sample
A_ : Optional[Any] = 0.1 * sample
A_ : List[str] = self.get_scheduler_config()
A_ : Union[str, Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A_ : List[str] = scheduler.step_prk(lowercase , lowercase , lowercase ).prev_sample
def lowerCAmelCase_ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase ):
A_ : str = self.scheduler_classes[0]
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : List[str] = scheduler_class(**lowercase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.full_loop()
A_ : List[Any] = torch.sum(torch.abs(lowercase ) )
A_ : Union[str, Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.full_loop(prediction_type='v_prediction' )
A_ : Optional[int] = torch.sum(torch.abs(lowercase ) )
A_ : int = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase , beta_start=0.01 )
A_ : Tuple = torch.sum(torch.abs(lowercase ) )
A_ : Union[str, Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.full_loop(set_alpha_to_one=lowercase , beta_start=0.01 )
A_ : Union[str, Any] = torch.sum(torch.abs(lowercase ) )
A_ : Tuple = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 351 | import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_UpperCAmelCase = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
_UpperCAmelCase = """hopper-medium-v2"""
_UpperCAmelCase = gym.make(env_name)
_UpperCAmelCase = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
_UpperCAmelCase = env.reset()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1000
_UpperCAmelCase = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_UpperCAmelCase = pipeline(obs, planning_horizon=32)
# execute action in environment
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = env.step(denorm_actions)
_UpperCAmelCase = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
_UpperCAmelCase = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 192 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 10 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCAmelCase = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 158 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
A: Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 356 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int ):
return number | (1 << position)
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int ):
return number & ~(1 << position)
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int ):
return number ^ (1 << position)
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int ):
return ((number >> position) & 1) == 1
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 | 0 |
"""simple docstring"""
from math import ceil
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = list(range(0 , lowercase_ ) )
__lowerCAmelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowerCAmelCase = []
for i in device_map_blocks:
if device_map_blocks.count(lowercase_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowercase_ )
# Missing blocks
__lowerCAmelCase = [i for i in blocks if i not in device_map_blocks]
__lowerCAmelCase = [i for i in device_map_blocks if i not in blocks]
if len(lowercase_ ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(lowercase_ ) )
if len(lowercase_ ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(lowercase_ ) )
if len(lowercase_ ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(lowercase_ ) )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = list(range(lowercase_ ) )
__lowerCAmelCase = int(ceil(n_layers / len(lowercase_ ) ) )
__lowerCAmelCase = [layers[i : i + n_blocks] for i in range(0 , lowercase_ , lowercase_ )]
return dict(zip(lowercase_ , lowercase_ ) )
| 57 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 14 | 0 |
"""simple docstring"""
from math import factorial
SCREAMING_SNAKE_CASE : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def __UpperCAmelCase ( snake_case_ : int ) -> int:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(snake_case_ ) )
def __UpperCAmelCase ( snake_case_ : int = 60 , snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not isinstance(snake_case_ , snake_case_ ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
_lowerCAmelCase = 0
# the cached sizes of the previous chains
_lowerCAmelCase = {}
for start_chain_element in range(1 , snake_case_ ):
# The temporary set will contain the elements of the chain
_lowerCAmelCase = set()
_lowerCAmelCase = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
_lowerCAmelCase = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(snake_case_ )
chain_set_length += 1
_lowerCAmelCase = digit_factorial_sum(snake_case_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
_lowerCAmelCase = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}') | 369 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE : List[str] = False
class __lowerCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 317 | 0 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
class a__ ( lowercase__ ):
A = ['input_features', 'attention_mask']
def __init__( self : int,_A : int=80,_A : str=1_6000,_A : Optional[Any]=80,_A : List[Any]=0.0,_A : Union[str, Any]=True,_A : Tuple=True,_A : Union[str, Any]=True,**_A : List[Any],):
"""simple docstring"""
super().__init__(feature_size=__lowercase,sampling_rate=__lowercase,padding_value=__lowercase,**__lowercase )
SCREAMING_SNAKE_CASE_ : Any = num_mel_bins
SCREAMING_SNAKE_CASE_ : List[Any] = do_ceptral_normalize
SCREAMING_SNAKE_CASE_ : List[str] = normalize_means
SCREAMING_SNAKE_CASE_ : List[Any] = normalize_vars
SCREAMING_SNAKE_CASE_ : List[str] = True
def __UpperCamelCase ( self : int,_A : np.ndarray,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.from_numpy(__lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : List[str] = ta_kaldi.fbank(__lowercase,num_mel_bins=self.num_mel_bins,sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __UpperCamelCase ( _A : np.ndarray,_A : int,_A : Optional[bool] = True,_A : Optional[bool] = True,_A : float = 0.0,):
"""simple docstring"""
if normalize_means:
SCREAMING_SNAKE_CASE_ : str = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE_ : int = np.subtract(__lowercase,__lowercase )
if normalize_vars:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE_ : int = np.divide(__lowercase,__lowercase )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE_ : Tuple = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE_ : int = x.astype(np.floataa )
return x
def __UpperCamelCase ( self : Tuple,_A : List[np.ndarray],_A : Optional[np.ndarray] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowercase,__lowercase,self.normalize_means,self.normalize_vars,self.padding_value )
for x, n in zip(__lowercase,__lowercase )
]
def __call__( self : int,_A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],_A : Union[bool, str, PaddingStrategy] = False,_A : Optional[int] = None,_A : bool = False,_A : Optional[int] = None,_A : Optional[Union[str, TensorType]] = None,_A : Optional[int] = None,_A : Optional[bool] = None,**_A : int,):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE_ : List[str] = isinstance(__lowercase,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
SCREAMING_SNAKE_CASE_ : Dict = is_batched_numpy or (
isinstance(__lowercase,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ : Any = [np.asarray(__lowercase,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase,np.ndarray ):
SCREAMING_SNAKE_CASE_ : int = np.asarray(__lowercase,dtype=np.floataa )
elif isinstance(__lowercase,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ : int = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE_ : Dict = [self._extract_fbank_features(__lowercase ) for waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE_ : List[Any] = BatchFeature({"input_features": features} )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.pad(
__lowercase,padding=__lowercase,max_length=__lowercase,truncation=__lowercase,pad_to_multiple_of=__lowercase,return_attention_mask=__lowercase,**__lowercase,)
# make sure list is in array format
SCREAMING_SNAKE_CASE_ : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0],__lowercase ):
SCREAMING_SNAKE_CASE_ : Tuple = [np.asarray(__lowercase,dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE_ : Tuple = padded_inputs.get("attention_mask" )
if attention_mask is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = [np.asarray(__lowercase,dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
SCREAMING_SNAKE_CASE_ : Tuple = (
np.array(__lowercase,dtype=np.intaa )
if self._get_padding_strategies(__lowercase,max_length=__lowercase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE_ : str = self.normalize(
padded_inputs["input_features"],attention_mask=__lowercase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ : List[str] = padded_inputs.convert_to_tensors(__lowercase )
return padded_inputs
| 18 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : List[Any] = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class a ( lowercase__ ):
"""simple docstring"""
a : List[Any] = 'xglm'
a : str = ['past_key_values']
a : Any = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , __lowercase : int=256008 , __lowercase : Tuple=2048 , __lowercase : List[Any]=1024 , __lowercase : str=4096 , __lowercase : Optional[Any]=24 , __lowercase : Optional[int]=16 , __lowercase : List[Any]="gelu" , __lowercase : str=0.1 , __lowercase : Dict=0.1 , __lowercase : Tuple=0.0 , __lowercase : Optional[int]=0.0 , __lowercase : Dict=0.02 , __lowercase : Optional[int]=True , __lowercase : Any=True , __lowercase : Dict=2 , __lowercase : Optional[Any]=1 , __lowercase : List[Any]=0 , __lowercase : Optional[Any]=2 , **__lowercase : List[str] , ) -> Optional[int]:
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[Any] = d_model
__UpperCAmelCase : str = ffn_dim
__UpperCAmelCase : List[str] = num_layers
__UpperCAmelCase : Dict = attention_heads
__UpperCAmelCase : str = activation_function
__UpperCAmelCase : Optional[Any] = dropout
__UpperCAmelCase : Any = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Tuple = layerdrop
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = use_cache
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , decoder_start_token_id=__lowercase , **__lowercase , )
| 114 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( _UpperCamelCase: Dict , _UpperCamelCase: Tuple , _UpperCamelCase: int , _UpperCamelCase: List[str] ) -> Any:
"""simple docstring"""
_snake_case = FunnelConfig.from_json_file(lowerCAmelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
_snake_case = FunnelBaseModel(lowerCAmelCase_ ) if base_model else FunnelModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
UpperCamelCase_ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 369 |
'''simple docstring'''
import pprint
import requests
UpperCamelCase_ : Tuple = '''https://zenquotes.io/api'''
def __a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def __a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
UpperCamelCase_ : Any = random_quotes()
pprint.pprint(response)
| 142 | 0 |
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _A ( snake_case , snake_case=False ) -> List[Any]:
_lowercase : Union[str, Any] = OmegaConf.load(snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(snake_case ) ) )
return config
def _A ( snake_case , snake_case=None , snake_case=None ) -> Optional[int]:
if conf_path is None:
_lowercase : str = "./model_checkpoints/vqgan_only.yaml"
_lowercase : List[Any] = load_config(snake_case , display=snake_case )
_lowercase : Optional[Any] = VQModel(**config.model.params )
if ckpt_path is None:
_lowercase : str = "./model_checkpoints/vqgan_only.pt"
_lowercase : Dict = torch.load(snake_case , map_location=snake_case )
if ".ckpt" in ckpt_path:
_lowercase : Union[str, Any] = sd["state_dict"]
model.load_state_dict(snake_case , strict=snake_case )
model.to(snake_case )
del sd
return model
def _A ( snake_case , snake_case ) -> Union[str, Any]:
_lowercase , _lowercase , _lowercase : Union[str, Any] = model.encode(snake_case )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
_lowercase : int = model.decode(snake_case )
return xrec
def _A ( snake_case , snake_case=False ) -> Union[str, Any]:
_lowercase , _lowercase : Optional[Any] = string.rsplit("." , 1 )
if reload:
_lowercase : Union[str, Any] = importlib.import_module(snake_case )
importlib.reload(snake_case )
return getattr(importlib.import_module(snake_case , package=snake_case ) , cls )
def _A ( snake_case ) -> Optional[Any]:
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def _A ( snake_case , snake_case , snake_case=True , snake_case=True ) -> Union[str, Any]:
_lowercase : Tuple = instantiate_from_config(snake_case )
if sd is not None:
model.load_state_dict(snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _A ( snake_case , snake_case , snake_case , snake_case ) -> str:
# load the specified checkpoint
if ckpt:
_lowercase : Optional[Any] = torch.load(snake_case , map_location="cpu" )
_lowercase : Dict = pl_sd["global_step"]
print(F'''loaded model from global step {global_step}.''' )
else:
_lowercase : int = {"state_dict": None}
_lowercase : int = None
_lowercase : str = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=snake_case , eval_mode=snake_case )["model"]
return model, global_step
| 250 |
'''simple docstring'''
from __future__ import annotations
import requests
def _A ( snake_case ) -> dict:
_lowercase : Dict = F'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(snake_case ).json()
def _A ( snake_case = 10 ) -> list[dict]:
_lowercase : List[Any] = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_lowercase : List[str] = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def _A ( snake_case = 10 ) -> str:
_lowercase : Union[str, Any] = hackernews_top_stories(snake_case )
return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 250 | 1 |
import argparse
import copy
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : Any = {}
with open(_lowerCAmelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCamelCase : Any = []
_list.append([line.split()[1], line.split()[2]] )
UpperCamelCase : Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCamelCase : str = []
_list.append([line.split()[0], line.split()[2]] )
UpperCamelCase : List[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
with open(_lowerCAmelCase ) as f:
UpperCamelCase : Optional[Any] = f.read(1 )
UpperCamelCase : Tuple = start_node
UpperCamelCase : Any = []
UpperCamelCase : int = start_node
UpperCamelCase : List[str] = 0
while visiting not in first_solution:
UpperCamelCase : Optional[int] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_lowerCAmelCase ) and k[0] not in first_solution:
UpperCamelCase : Any = k[1]
UpperCamelCase : List[str] = k[0]
first_solution.append(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = distance_of_first_solution + int(_lowerCAmelCase )
UpperCamelCase : List[Any] = best_node
first_solution.append(_lowerCAmelCase )
UpperCamelCase : str = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCamelCase : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Optional[int] = []
for n in solution[1:-1]:
UpperCamelCase : str = solution.index(_lowerCAmelCase )
for kn in solution[1:-1]:
UpperCamelCase : List[str] = solution.index(_lowerCAmelCase )
if n == kn:
continue
UpperCamelCase : Union[str, Any] = copy.deepcopy(_lowerCAmelCase )
UpperCamelCase : Tuple = kn
UpperCamelCase : str = n
UpperCamelCase : Union[str, Any] = 0
for k in _tmp[:-1]:
UpperCamelCase : List[Any] = _tmp[_tmp.index(_lowerCAmelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCamelCase : List[str] = distance + int(i[1] )
_tmp.append(_lowerCAmelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCamelCase : Dict = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _lowerCAmelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Dict = 1
UpperCamelCase : Optional[int] = first_solution
UpperCamelCase : List[str] = []
UpperCamelCase : Any = distance_of_first_solution
UpperCamelCase : List[Any] = solution
while count <= iters:
UpperCamelCase : Union[str, Any] = find_neighborhood(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : Optional[int] = neighborhood[index_of_best_solution]
UpperCamelCase : Dict = len(_lowerCAmelCase ) - 1
UpperCamelCase : Union[str, Any] = False
while not found:
UpperCamelCase : Optional[int] = 0
while i < len(_lowerCAmelCase ):
if best_solution[i] != solution[i]:
UpperCamelCase : Optional[Any] = best_solution[i]
UpperCamelCase : Dict = solution[i]
break
UpperCamelCase : List[Any] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Union[str, Any] = best_solution[:-1]
UpperCamelCase : List[str] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCamelCase : int = cost
UpperCamelCase : Any = solution
else:
UpperCamelCase : List[str] = index_of_best_solution + 1
UpperCamelCase : Optional[int] = neighborhood[index_of_best_solution]
if len(_lowerCAmelCase ) >= size:
tabu_list.pop(0 )
UpperCamelCase : Optional[int] = count + 1
return best_solution_ever, best_cost
def A_ ( _lowerCAmelCase=None ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = generate_neighbours(args.File )
UpperCamelCase , UpperCamelCase : Any = generate_first_solution(
args.File , _lowerCAmelCase )
UpperCamelCase , UpperCamelCase : List[Any] = tabu_search(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.Iterations , args.Size , )
print(F"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 140 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :List[Any] = 'timm_backbone'
def __init__( self , A_=None , A_=3 , A_=True , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Tuple = backbone
UpperCamelCase : Dict = num_channels
UpperCamelCase : Tuple = features_only
UpperCamelCase : Optional[int] = use_pretrained_backbone
UpperCamelCase : Dict = True
UpperCamelCase : List[str] = out_indices if out_indices is not None else (-1,)
| 140 | 1 |
"""simple docstring"""
import operator as op
__UpperCAmelCase = 'scaler.pt'
__UpperCAmelCase = 'pytorch_model'
__UpperCAmelCase = 'random_states'
__UpperCAmelCase = 'optimizer'
__UpperCAmelCase = 'scheduler'
__UpperCAmelCase = 'pytorch_model.bin'
__UpperCAmelCase = 'pytorch_model.bin.index.json'
__UpperCAmelCase = 'model.safetensors'
__UpperCAmelCase = 'model.safetensors.index.json'
__UpperCAmelCase = '1.10.2'
__UpperCAmelCase = 'py38'
__UpperCAmelCase = '4.17.0'
__UpperCAmelCase = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
__UpperCAmelCase = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
__UpperCAmelCase = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
__UpperCAmelCase = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
__UpperCAmelCase = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
__UpperCAmelCase = '2.0.1'
__UpperCAmelCase = ['pdsh', 'standard', 'openmpi', 'mvapich']
__UpperCAmelCase = ['default', 'reduce-overhead', 'max-autotune']
__UpperCAmelCase = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__UpperCAmelCase = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
__UpperCAmelCase = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
__UpperCAmelCase = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 84 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=32 ,__UpperCAmelCase=2 ,__UpperCAmelCase=4 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,__UpperCAmelCase=0 ,) -> Dict:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = projection_dim
def snake_case__ ( self ) -> Optional[Any]:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A__ = ids_tensor([self.batch_size] ,self.num_choices )
A__ = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCAmelCase ,initializer_range=self.initializer_range ,)
A__ = DPRConfig(projection_dim=self.projection_dim ,**config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
A__ = TFDPRContextEncoder(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = TFDPRQuestionEncoder(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
A__ = TFDPRReader(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape ,(self.batch_size,) )
def snake_case__ ( self ) -> int:
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class UpperCamelCase__( __A , __A , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : str = False
def snake_case__ ( self ) -> str:
A__ = TFDPRModelTester(self )
A__ = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=37 )
def snake_case__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__UpperCAmelCase )
def snake_case__ ( self ) -> List[str]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__UpperCAmelCase )
@slow
def snake_case__ ( self ) -> int:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRContextEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRContextEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRQuestionEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRReader.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class UpperCamelCase__( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> Optional[Any]:
A__ = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
A__ = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
A__ = model(__UpperCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
A__ = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 221 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _snake_case ( unittest.TestCase):
def A__ ( self : Any ):
lowercase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase__ = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ), torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ), gelu_new(__A ) ) )
def A__ ( self : Tuple ):
lowercase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase__ = get_activation("gelu" )
lowercase__ = get_activation("gelu_10" )
lowercase__ = torch_builtin(__A )
lowercase__ = geluaa(__A )
lowercase__ = torch.where(y_gelu_aa < 10.0, 1, 0 )
self.assertTrue(torch.max(__A ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask, y_gelu_aa * clipped_mask ) )
def A__ ( self : List[str] ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def A__ ( self : Union[str, Any] ):
lowercase__ = get_activation("gelu" )
lowercase__ = 1
lowercase__ = get_activation("gelu" )
self.assertEqual(acta.a, 1 )
with self.assertRaises(__A ):
lowercase__ = acta.a
| 351 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase__ = n - k
# Calculate C(n,k)
for i in range(SCREAMING_SNAKE_CASE_ ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return binomial_coefficient(2 * node_count , SCREAMING_SNAKE_CASE_ ) // (node_count + 1)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if n < 0:
raise ValueError("factorial() not defined for negative values" )
lowercase__ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return catalan_number(SCREAMING_SNAKE_CASE_ ) * factorial(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase_ = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 224 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase__ ( _a ):
"""simple docstring"""
a = 42
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : List[str]=("DownEncoderBlock2D",) , __lowerCamelCase : Optional[Any]=(64,) , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[str]="silu" , __lowerCamelCase : Union[str, Any]=True , ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ = layers_per_block
SCREAMING_SNAKE_CASE__ = torch.nn.Convad(
lowercase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE__ = block_out_channels[0]
for i, down_block_type in enumerate(lowercase__ ):
SCREAMING_SNAKE_CASE__ = output_channel
SCREAMING_SNAKE_CASE__ = block_out_channels[i]
SCREAMING_SNAKE_CASE__ = i == len(lowercase__ ) - 1
SCREAMING_SNAKE_CASE__ = get_down_block(
lowercase__ , num_layers=self.layers_per_block , in_channels=lowercase__ , out_channels=lowercase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowercase__ , resnet_groups=lowercase__ , attention_head_dim=lowercase__ , temb_channels=lowercase__ , )
self.down_blocks.append(lowercase__ )
# mid
SCREAMING_SNAKE_CASE__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowercase__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase__ , temb_channels=lowercase__ , )
# out
SCREAMING_SNAKE_CASE__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowercase__ , eps=1e-6 )
SCREAMING_SNAKE_CASE__ = nn.SiLU()
SCREAMING_SNAKE_CASE__ = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE__ = nn.Convad(block_out_channels[-1] , lowercase__ , 3 , padding=1 )
SCREAMING_SNAKE_CASE__ = False
def lowercase_ ( self : Any , __lowerCamelCase : int ) -> Any:
SCREAMING_SNAKE_CASE__ = x
SCREAMING_SNAKE_CASE__ = self.conv_in(lowercase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__lowerCamelCase : Dict ):
def custom_forward(*__lowerCamelCase : Dict ):
return module(*lowercase__ )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase__ ) , lowercase__ , use_reentrant=lowercase__ )
# middle
SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase__ , use_reentrant=lowercase__ )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase__ ) , lowercase__ )
# middle
SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowercase__ )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE__ = down_block(lowercase__ )
# middle
SCREAMING_SNAKE_CASE__ = self.mid_block(lowercase__ )
# post-process
SCREAMING_SNAKE_CASE__ = self.conv_norm_out(lowercase__ )
SCREAMING_SNAKE_CASE__ = self.conv_act(lowercase__ )
SCREAMING_SNAKE_CASE__ = self.conv_out(lowercase__ )
return sample
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : int=3 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Any=("UpDecoderBlock2D",) , __lowerCamelCase : Any=(64,) , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[str]=32 , __lowerCamelCase : List[str]="silu" , __lowerCamelCase : Optional[int]="group" , ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ = layers_per_block
SCREAMING_SNAKE_CASE__ = nn.Convad(
lowercase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = nn.ModuleList([] )
SCREAMING_SNAKE_CASE__ = in_channels if norm_type == '''spatial''' else None
# mid
SCREAMING_SNAKE_CASE__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowercase__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase__ , temb_channels=lowercase__ , )
# up
SCREAMING_SNAKE_CASE__ = list(reversed(lowercase__ ) )
SCREAMING_SNAKE_CASE__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowercase__ ):
SCREAMING_SNAKE_CASE__ = output_channel
SCREAMING_SNAKE_CASE__ = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE__ = i == len(lowercase__ ) - 1
SCREAMING_SNAKE_CASE__ = get_up_block(
lowercase__ , num_layers=self.layers_per_block + 1 , in_channels=lowercase__ , out_channels=lowercase__ , prev_output_channel=lowercase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowercase__ , resnet_groups=lowercase__ , attention_head_dim=lowercase__ , temb_channels=lowercase__ , resnet_time_scale_shift=lowercase__ , )
self.up_blocks.append(lowercase__ )
SCREAMING_SNAKE_CASE__ = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE__ = SpatialNorm(block_out_channels[0] , lowercase__ )
else:
SCREAMING_SNAKE_CASE__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowercase__ , eps=1e-6 )
SCREAMING_SNAKE_CASE__ = nn.SiLU()
SCREAMING_SNAKE_CASE__ = nn.Convad(block_out_channels[0] , lowercase__ , 3 , padding=1 )
SCREAMING_SNAKE_CASE__ = False
def lowercase_ ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any]=None ) -> int:
SCREAMING_SNAKE_CASE__ = z
SCREAMING_SNAKE_CASE__ = self.conv_in(lowercase__ )
SCREAMING_SNAKE_CASE__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__lowerCamelCase : Any ):
def custom_forward(*__lowerCamelCase : List[Any] ):
return module(*lowercase__ )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase__ , lowercase__ , use_reentrant=lowercase__ )
SCREAMING_SNAKE_CASE__ = sample.to(lowercase__ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase__ ) , lowercase__ , lowercase__ , use_reentrant=lowercase__ )
else:
# middle
SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ = sample.to(lowercase__ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE__ = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase__ ) , lowercase__ , lowercase__ )
else:
# middle
SCREAMING_SNAKE_CASE__ = self.mid_block(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ = sample.to(lowercase__ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE__ = up_block(lowercase__ , lowercase__ )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE__ = self.conv_norm_out(lowercase__ )
else:
SCREAMING_SNAKE_CASE__ = self.conv_norm_out(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ = self.conv_act(lowercase__ )
SCREAMING_SNAKE_CASE__ = self.conv_out(lowercase__ )
return sample
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple=None , __lowerCamelCase : str="random" , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[Any]=True ) -> Tuple:
super().__init__()
SCREAMING_SNAKE_CASE__ = n_e
SCREAMING_SNAKE_CASE__ = vq_embed_dim
SCREAMING_SNAKE_CASE__ = beta
SCREAMING_SNAKE_CASE__ = legacy
SCREAMING_SNAKE_CASE__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE__ = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE__ = self.used.shape[0]
SCREAMING_SNAKE_CASE__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE__ = self.re_embed
SCREAMING_SNAKE_CASE__ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
SCREAMING_SNAKE_CASE__ = n_e
SCREAMING_SNAKE_CASE__ = sane_index_shape
def lowercase_ ( self : List[Any] , __lowerCamelCase : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ = inds.shape
assert len(lowercase__ ) > 1
SCREAMING_SNAKE_CASE__ = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE__ = self.used.to(lowercase__ )
SCREAMING_SNAKE_CASE__ = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE__ = match.argmax(-1 )
SCREAMING_SNAKE_CASE__ = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE__ = self.unknown_index
return new.reshape(lowercase__ )
def lowercase_ ( self : Dict , __lowerCamelCase : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ = inds.shape
assert len(lowercase__ ) > 1
SCREAMING_SNAKE_CASE__ = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE__ = self.used.to(lowercase__ )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE__ = 0 # simply set to zero
SCREAMING_SNAKE_CASE__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowercase__ )
return back.reshape(lowercase__ )
def lowercase_ ( self : List[Any] , __lowerCamelCase : str ) -> Any:
# reshape z -> (batch, height, width, channel) and flatten
SCREAMING_SNAKE_CASE__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE__ = torch.argmin(torch.cdist(lowercase__ , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE__ = self.embedding(lowercase__ ).view(z.shape )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE__ = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE__ = self.remap_to_used(lowercase__ )
SCREAMING_SNAKE_CASE__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowercase_ ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ) -> Dict:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
SCREAMING_SNAKE_CASE__ = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE__ = self.unmap_to_all(lowercase__ )
SCREAMING_SNAKE_CASE__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE__ = self.embedding(lowercase__ )
if shape is not None:
SCREAMING_SNAKE_CASE__ = z_q.view(lowercase__ )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase__ ( _a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple=False ) -> Tuple:
SCREAMING_SNAKE_CASE__ = parameters
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = torch.chunk(lowercase__ , 2 , dim=1 )
SCREAMING_SNAKE_CASE__ = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE__ = deterministic
SCREAMING_SNAKE_CASE__ = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE__ = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowercase_ ( self : List[str] , __lowerCamelCase : Union[str, Any] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
SCREAMING_SNAKE_CASE__ = randn_tensor(
self.mean.shape , generator=lowercase__ , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE__ = self.mean + self.std * sample
return x
def lowercase_ ( self : List[str] , __lowerCamelCase : Dict=None ) -> Any:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple=[1, 2, 3] ) -> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowercase__ )
def lowercase_ ( self : Optional[Any] ) -> List[str]:
return self.mean
| 314 |
A_ : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
A_ : int = ['a', 'b', 'c', 'd', 'e']
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
A_ : Tuple = topological_sort('a', [], [])
print(sort)
| 333 | 0 |
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_SCREAMING_SNAKE_CASE = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_SCREAMING_SNAKE_CASE = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : List[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int]=None , _A : Dict=False , _A : Dict=False , _A : Optional[Any]=False , ) -> List[str]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case_ : List[str] = np.array([re.sub(_A , '' , _A ) for x in predictions] )
snake_case_ : int = np.array([re.sub(_A , '' , _A ) for x in references] )
else:
snake_case_ : Optional[Any] = np.asarray(_A )
snake_case_ : Optional[Any] = np.asarray(_A )
if ignore_case:
snake_case_ : int = np.char.lower(_A )
snake_case_ : List[str] = np.char.lower(_A )
if ignore_punctuation:
snake_case_ : str = string.punctuation.maketrans('' , '' , string.punctuation )
snake_case_ : str = np.char.translate(_A , table=_A )
snake_case_ : Any = np.char.translate(_A , table=_A )
if ignore_numbers:
snake_case_ : int = string.digits.maketrans('' , '' , string.digits )
snake_case_ : Tuple = np.char.translate(_A , table=_A )
snake_case_ : Optional[Any] = np.char.translate(_A , table=_A )
snake_case_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(_A ) * 100}
| 368 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Optional[int] = u
for i in range(1 , __a ):
snake_case_ : Optional[Any] = temp * (u - i)
return temp
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Dict = int(input('enter the numbers of values: ' ) )
snake_case_ : list[list[float]] = []
for _ in range(__a ):
y.append([] )
for i in range(__a ):
for j in range(__a ):
y[i].append(__a )
snake_case_ : str = 0
print('enter the values of parameters in a list: ' )
snake_case_ : int = list(map(__a , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(__a ):
snake_case_ : Union[str, Any] = float(input() )
snake_case_ : int = int(input('enter the value to interpolate: ' ) )
snake_case_ : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __a ):
for j in range(n - i ):
snake_case_ : int = y[j + 1][i - 1] - y[j][i - 1]
snake_case_ : str = y[0][0]
for i in range(1 , __a ):
summ += (ucal(__a , __a ) * y[0][i]) / math.factorial(__a )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 88 | 0 |
import math
import unittest
def A ( _lowerCamelCase ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def snake_case__ ( self):
'''simple docstring'''
with self.assertRaises(__a):
is_prime(-19)
self.assertFalse(
is_prime(0), "Zero doesn't have any positive factors, primes must have exactly two.", )
self.assertFalse(
is_prime(1), "One only has 1 positive factor, primes must have exactly two.", )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 36 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=None , __a=True , __a=None , **__a ):
__lowerCAmelCase = parent
__lowerCAmelCase = config_class
__lowerCAmelCase = has_text_modality
__lowerCAmelCase = kwargs
__lowerCAmelCase = common_properties
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__a , __a ) , msg=f"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(__a ):
try:
setattr(__a , __a , __a )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__a ):
try:
__lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , "config.json" )
config_first.to_json_file(__a )
__lowerCAmelCase = self.config_class.from_json_file(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , __a )
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a , subfolder=__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def snake_case ( self ):
if self.config_class.is_composition:
return
__lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(__a )
def snake_case ( self ):
__lowerCAmelCase = copy.deepcopy(__a )
__lowerCAmelCase = self.config_class(**__a )
__lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(__a , __a ) != value:
wrong_values.append((key, getattr(__a , __a ), value) )
if len(__a ) > 0:
__lowerCAmelCase = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(f"The following keys were not properly set in the config:\n{errors}" )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 57 | 0 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="attention" ):
'''simple docstring'''
UpperCAmelCase = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
UpperCAmelCase = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
UpperCAmelCase = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
UpperCAmelCase = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
'''simple docstring'''
if split_mlp_wi:
UpperCAmelCase = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
UpperCAmelCase = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
UpperCAmelCase = (wi_a, wi_a)
else:
UpperCAmelCase = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
UpperCAmelCase = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def _lowerCAmelCase ( lowerCAmelCase , *, lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = traverse_util.flatten_dict(variables["""target"""] )
UpperCAmelCase = {"""/""".join(lowerCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCAmelCase = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowerCAmelCase )
UpperCAmelCase = collections.OrderedDict()
# Shared embeddings.
UpperCAmelCase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , """encoder""" , """pre_attention_layer_norm""" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , """encoder""" , """attention""" )
UpperCAmelCase = layer_norm
UpperCAmelCase = k.T
UpperCAmelCase = o.T
UpperCAmelCase = q.T
UpperCAmelCase = v.T
# Block i, layer 1 (MLP).
UpperCAmelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , """encoder""" , """pre_mlp_layer_norm""" )
UpperCAmelCase , UpperCAmelCase = tax_mlp_lookup(lowerCAmelCase , lowerCAmelCase , """encoder""" , lowerCAmelCase )
UpperCAmelCase = layer_norm
if split_mlp_wi:
UpperCAmelCase = wi[0].T
UpperCAmelCase = wi[1].T
else:
UpperCAmelCase = wi.T
UpperCAmelCase = wo.T
UpperCAmelCase = old[
"""encoder/relpos_bias/rel_embedding"""
].T
UpperCAmelCase = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , """pre_self_attention_layer_norm""" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , """self_attention""" )
UpperCAmelCase = layer_norm
UpperCAmelCase = k.T
UpperCAmelCase = o.T
UpperCAmelCase = q.T
UpperCAmelCase = v.T
# Block i, layer 1 (Cross Attention).
UpperCAmelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , """pre_cross_attention_layer_norm""" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , """encoder_decoder_attention""" )
UpperCAmelCase = layer_norm
UpperCAmelCase = k.T
UpperCAmelCase = o.T
UpperCAmelCase = q.T
UpperCAmelCase = v.T
# Block i, layer 2 (MLP).
UpperCAmelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , """pre_mlp_layer_norm""" )
UpperCAmelCase , UpperCAmelCase = tax_mlp_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , lowerCAmelCase )
UpperCAmelCase = layer_norm
if split_mlp_wi:
UpperCAmelCase = wi[0].T
UpperCAmelCase = wi[1].T
else:
UpperCAmelCase = wi.T
UpperCAmelCase = wo.T
UpperCAmelCase = old["""decoder/decoder_norm/scale"""]
UpperCAmelCase = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCAmelCase = old["""decoder/logits_dense/kernel"""].T
return new
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
UpperCAmelCase = state_dict["""shared.weight"""]
return state_dict
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = checkpoints.load_tax_checkpoint(lowerCAmelCase )
UpperCAmelCase = convert_tax_to_pytorch(lowerCAmelCase , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase )
UpperCAmelCase = make_state_dict(lowerCAmelCase , lowerCAmelCase )
model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False ):
'''simple docstring'''
UpperCAmelCase = TaConfig.from_json_file(lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCAmelCase = TaEncoderModel(lowerCAmelCase )
else:
UpperCAmelCase = TaForConditionalGeneration(lowerCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase )
print("""Done""" )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
lowerCAmelCase_ : Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 371 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase , lowerCAmelCase )
# Predict target for test data
UpperCAmelCase = xgb.predict(lowerCAmelCase )
UpperCAmelCase = predictions.reshape(len(lowerCAmelCase ) , 1 )
return predictions
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = fetch_california_housing()
UpperCAmelCase , UpperCAmelCase = data_handling(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = train_test_split(
lowerCAmelCase , lowerCAmelCase , test_size=0.25 , random_state=1 )
UpperCAmelCase = xgboost(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase , lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowerCAmelCase , lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 248 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = 4_2
UpperCAmelCase__ : List[Any] = 4_2
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> Optional[Any]:
__UpperCamelCase =[[] for _ in range(A_ )]
__UpperCamelCase =size
def __getitem__( self , A_ ) -> Any:
return iter(self._graph[vertex] )
@property
def _a ( self ) -> Optional[Any]:
return self._size
def _a ( self , A_ , A_ , A_ ) -> str:
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(A_ , A_ ) )
def _a ( self , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =deque([start_vertex] )
__UpperCamelCase =[None] * self.size
__UpperCamelCase =0
while queue:
__UpperCamelCase =queue.popleft()
__UpperCamelCase =distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__UpperCamelCase =current_distance + edge.weight
__UpperCamelCase =distances[edge.destination_vertex]
if (
isinstance(A_ , A_ )
and new_distance >= dest_vertex_distance
):
continue
__UpperCamelCase =new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Any = data
_lowerCamelCase : Node | None = None
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : str = None
_lowerCamelCase : str = None
def __iter__( self ):
_lowerCamelCase : List[str] = self.head
while self.head:
yield node.data
_lowerCamelCase : Optional[int] = node.next
if node == self.head:
break
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join(str(lowercase ) for item in iter(self ) )
def A_ ( self , lowercase ):
self.insert_nth(len(self ) , lowercase )
def A_ ( self , lowercase ):
self.insert_nth(0 , lowercase )
def A_ ( self , lowercase , lowercase ):
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : List[Any] = Node(lowercase )
if self.head is None:
_lowerCamelCase : str = new_node # first node points itself
_lowerCamelCase : Union[str, Any] = new_node
elif index == 0: # insert at head
_lowerCamelCase : List[str] = self.head
_lowerCamelCase : str = new_node
else:
_lowerCamelCase : Union[str, Any] = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : Union[str, Any] = temp.next
_lowerCamelCase : List[str] = new_node
if index == len(self ) - 1: # insert at tail
_lowerCamelCase : Any = new_node
def A_ ( self ):
return self.delete_nth(0 )
def A_ ( self ):
return self.delete_nth(len(self ) - 1 )
def A_ ( self , lowercase = 0 ):
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : Any = self.head
if self.head == self.tail: # just one node
_lowerCamelCase : List[str] = None
elif index == 0: # delete head node
_lowerCamelCase : List[str] = self.tail.next.next
_lowerCamelCase : Optional[int] = self.head.next
else:
_lowerCamelCase : Dict = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : int = temp.next
_lowerCamelCase : Optional[int] = temp.next.next
if index == len(self ) - 1: # delete at tail
_lowerCamelCase : List[Any] = temp
return delete_node.data
def A_ ( self ):
return len(self ) == 0
def _snake_case ( ):
_lowerCamelCase : Union[str, Any] = CircularLinkedList()
assert len(lowercase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase__ ) == i
circular_linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase : List[str] = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_lowerCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCAmelCase : Optional[Any] = numpy.array([0, 0])
_lowerCAmelCase : Dict = numpy.array([0.5, 0.8660254])
_lowerCAmelCase : Any = numpy.array([1, 0])
_lowerCAmelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase_( _snake_case : list[numpy.ndarray] , _snake_case : int ):
"""simple docstring"""
__a =initial_vectors
for _ in range(_snake_case ):
__a =iteration_step(_snake_case )
return vectors
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =[]
for i, start_vector in enumerate(vectors[:-1] ):
__a =vectors[i + 1]
new_vectors.append(_snake_case )
__a =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase_( _snake_case : numpy.ndarray , _snake_case : float ):
"""simple docstring"""
__a =numpy.radians(_snake_case )
__a , __a =numpy.cos(_snake_case ), numpy.sin(_snake_case )
__a =numpy.array(((c, -s), (s, c)) )
return numpy.dot(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a , __a =zip(*_snake_case )
plt.plot(_snake_case , _snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 308 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = RoCBertTokenizer
_UpperCAmelCase :Tuple = None
_UpperCAmelCase :Optional[int] = False
_UpperCAmelCase :str = True
_UpperCAmelCase :Union[str, Any] = filter_non_english
def _snake_case ( self ):
super().setUp()
lowercase__: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
lowercase__: Optional[Any] = {}
lowercase__: Optional[Any] = {}
for i, value in enumerate(__lowerCamelCase ):
lowercase__: Any = i
lowercase__: Optional[Any] = i
lowercase__: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowercase__: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(__lowerCamelCase , __lowerCamelCase , ensure_ascii=__lowerCamelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(__lowerCamelCase , __lowerCamelCase , ensure_ascii=__lowerCamelCase )
def _snake_case ( self ):
lowercase__: List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowercase__: Optional[Any] = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(__lowerCamelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
def _snake_case ( self ):
lowercase__: Tuple = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _snake_case ( self ):
lowercase__: Dict = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _snake_case ( self ):
lowercase__: List[Any] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _snake_case ( self ):
lowercase__: List[str] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _snake_case ( self ):
lowercase__: int = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _snake_case ( self ):
lowercase__: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _snake_case ( self ):
lowercase__: Tuple = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _snake_case ( self ):
lowercase__: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _snake_case ( self ):
lowercase__: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _snake_case ( self ):
lowercase__: Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowercase__: Optional[int] = {}
for i, token in enumerate(__lowerCamelCase ):
lowercase__: List[str] = i
lowercase__: Union[str, Any] = RoCBertWordpieceTokenizer(vocab=__lowerCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def _snake_case ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _snake_case ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _snake_case ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def _snake_case ( self ):
lowercase__: Dict = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowercase__: List[Any] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def _snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
lowercase__: int = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowercase__: Optional[int] = tokenizer_r.encode_plus(
__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase , )
lowercase__: Tuple = tokenizer_r.do_lower_case if hasattr(__lowerCamelCase , '''do_lower_case''' ) else False
lowercase__: Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def _snake_case ( self ):
lowercase__: Tuple = ["的", "人", "有"]
lowercase__: int = "".join(__lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__: Union[str, Any] = True
lowercase__: Tuple = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
lowercase__: str = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
lowercase__: List[Any] = tokenizer_p.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowercase__: Tuple = tokenizer_r.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowercase__: Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__lowerCamelCase )
lowercase__: Dict = tokenizer_p.convert_ids_to_tokens(__lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowercase__: Union[str, Any] = False
lowercase__: List[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
lowercase__: str = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
lowercase__: Dict = tokenizer_r.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowercase__: Optional[Any] = tokenizer_p.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowercase__: Dict = tokenizer_r.convert_ids_to_tokens(__lowerCamelCase )
lowercase__: int = tokenizer_p.convert_ids_to_tokens(__lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__: Tuple = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__lowerCamelCase )
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self ):
lowercase__: Optional[int] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowercase__: Optional[int] = tokenizer.encode('''你好''' , add_special_tokens=__lowerCamelCase )
lowercase__: List[Any] = tokenizer.encode('''你是谁''' , add_special_tokens=__lowerCamelCase )
lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
lowercase__: int = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _snake_case ( self ):
lowercase__: Tuple = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowercase__: Any = "你好,你是谁"
lowercase__: Dict = tokenizer.tokenize(__lowerCamelCase )
lowercase__: Any = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
lowercase__: Optional[Any] = tokenizer.convert_tokens_to_shape_ids(__lowerCamelCase )
lowercase__: str = tokenizer.convert_tokens_to_pronunciation_ids(__lowerCamelCase )
lowercase__: Tuple = tokenizer.prepare_for_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowercase__: List[Any] = tokenizer.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 177 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 | 0 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__A = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__A = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
__A = "zero2"
__A = "zero3"
__A = [ZEROa, ZEROa]
def lowerCAmelCase_ ( __a , __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: List[Any] =parameterized.to_safe_name("_".join(str(__a ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
__A = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int) ->str:
'''simple docstring'''
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]) ->str:
'''simple docstring'''
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any) ->List[str]:
'''simple docstring'''
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]) ->Dict:
'''simple docstring'''
self.run_and_check(
stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : str) ->Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[Any] =models[model]
lowerCamelCase__: Dict =self.run_trainer(
stage=UpperCAmelCase_ , model_name=UpperCAmelCase_ , eval_steps=UpperCAmelCase_ , num_train_epochs=1 , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , )
self.do_checks(UpperCAmelCase_)
return output_dir
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.get_auto_remove_tmp_dir("./xxx" , after=UpperCAmelCase_)
lowerCamelCase__: Dict =F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(UpperCAmelCase_)}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["--fp16"])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
lowerCamelCase__: Dict =F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
lowerCamelCase__: List[Any] =[F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
lowerCamelCase__: Dict =self.get_launcher(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCAmelCase_ , env=self.get_env())
return output_dir
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Tuple=False) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =min(2 , get_gpu_count()) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 273 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__A = ["text", "image", "audio"]
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =[]
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__a , __a ):
inputs.append(create_inputs(__a ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =[]
for output in outputs:
if isinstance(__a , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__a , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__a , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs"))
self.assertTrue(hasattr(self.tool , "outputs"))
lowerCamelCase__: Tuple =self.tool.inputs
for _input in inputs:
if isinstance(_input , UpperCAmelCase_):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
lowerCamelCase__: Optional[Any] =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: List[str] =create_inputs(self.tool.inputs)
lowerCamelCase__: str =self.tool(*UpperCAmelCase_)
# There is a single output
if len(self.tool.outputs) == 1:
lowerCamelCase__: Optional[Any] =[outputs]
self.assertListEqual(output_types(UpperCAmelCase_) , self.tool.outputs)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description"))
self.assertTrue(hasattr(self.tool , "default_checkpoint"))
self.assertTrue(self.tool.description.startswith("This is a tool that"))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =create_inputs(self.tool.inputs)
lowerCamelCase__: Dict =self.tool(*UpperCAmelCase_)
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Tuple =[outputs]
self.assertEqual(len(UpperCAmelCase_) , len(self.tool.outputs))
for output, output_type in zip(UpperCAmelCase_ , self.tool.outputs):
lowerCamelCase__: Any =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Any =create_inputs(self.tool.inputs)
lowerCamelCase__: int =[]
for _input, input_type in zip(UpperCAmelCase_ , self.tool.inputs):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
lowerCamelCase__: Union[str, Any] =self.tool(*UpperCAmelCase_)
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: str =[outputs]
self.assertEqual(len(UpperCAmelCase_) , len(self.tool.outputs))
| 273 | 1 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase_ = logging.get_logger(__name__)
# General docstring
lowercase_ = "PoolFormerConfig"
# Base docstring
lowercase_ = "sail/poolformer_s12"
lowercase_ = [1, 512, 7, 7]
# Image classification docstring
lowercase_ = "sail/poolformer_s12"
lowercase_ = "tabby, tabby cat"
lowercase_ = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : float = 0.0 , __lowerCamelCase : bool = False ) ->Dict:
if drop_prob == 0.0 or not training:
return input
_SCREAMING_SNAKE_CASE = 1 - drop_prob
_SCREAMING_SNAKE_CASE = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_SCREAMING_SNAKE_CASE = keep_prob + torch.rand(__lowerCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_SCREAMING_SNAKE_CASE = input.div(__lowerCamelCase ) * random_tensor
return output
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A = None ) -> None:
super().__init__()
_SCREAMING_SNAKE_CASE = drop_prob
def snake_case_( self , A ) -> torch.Tensor:
return drop_path(a__ , self.drop_prob , self.training )
def snake_case_( self ) -> str:
return "p={}".format(self.drop_prob )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A , A , A , A , A , A=None ) -> Tuple:
super().__init__()
_SCREAMING_SNAKE_CASE = patch_size if isinstance(a__ , collections.abc.Iterable ) else (patch_size, patch_size)
_SCREAMING_SNAKE_CASE = stride if isinstance(a__ , collections.abc.Iterable ) else (stride, stride)
_SCREAMING_SNAKE_CASE = padding if isinstance(a__ , collections.abc.Iterable ) else (padding, padding)
_SCREAMING_SNAKE_CASE = nn.Convad(a__ , a__ , kernel_size=a__ , stride=a__ , padding=a__ )
_SCREAMING_SNAKE_CASE = norm_layer(a__ ) if norm_layer else nn.Identity()
def snake_case_( self , A ) -> str:
_SCREAMING_SNAKE_CASE = self.projection(a__ )
_SCREAMING_SNAKE_CASE = self.norm(a__ )
return embeddings
class a_ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , A , **A ) -> List[str]:
super().__init__(1 , a__ , **a__ )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = nn.AvgPoolad(a__ , stride=1 , padding=pool_size // 2 , count_include_pad=a__ )
def snake_case_( self , A ) -> Dict:
return self.pool(a__ ) - hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A , A , A , A ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = nn.Convad(a__ , a__ , 1 )
_SCREAMING_SNAKE_CASE = nn.Convad(a__ , a__ , 1 )
_SCREAMING_SNAKE_CASE = PoolFormerDropPath(a__ )
if isinstance(config.hidden_act , a__ ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
def snake_case_( self , A ) -> Dict:
_SCREAMING_SNAKE_CASE = self.conva(a__ )
_SCREAMING_SNAKE_CASE = self.act_fn(a__ )
_SCREAMING_SNAKE_CASE = self.drop(a__ )
_SCREAMING_SNAKE_CASE = self.conva(a__ )
_SCREAMING_SNAKE_CASE = self.drop(a__ )
return hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A , A , A , A , A , A ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE = PoolFormerPooling(a__ )
_SCREAMING_SNAKE_CASE = PoolFormerOutput(a__ , a__ , a__ , a__ )
_SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(a__ )
_SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(a__ )
# Useful for training neural nets
_SCREAMING_SNAKE_CASE = PoolFormerDropPath(a__ ) if drop_path > 0.0 else nn.Identity()
_SCREAMING_SNAKE_CASE = config.use_layer_scale
if config.use_layer_scale:
_SCREAMING_SNAKE_CASE = nn.Parameter(
config.layer_scale_init_value * torch.ones((a__) ) , requires_grad=a__ )
_SCREAMING_SNAKE_CASE = nn.Parameter(
config.layer_scale_init_value * torch.ones((a__) ) , requires_grad=a__ )
def snake_case_( self , A ) -> List[Any]:
if self.use_layer_scale:
_SCREAMING_SNAKE_CASE = self.pooling(self.before_norm(a__ ) )
_SCREAMING_SNAKE_CASE = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_SCREAMING_SNAKE_CASE = hidden_states + self.drop_path(a__ )
_SCREAMING_SNAKE_CASE = ()
_SCREAMING_SNAKE_CASE = self.output(self.after_norm(a__ ) )
_SCREAMING_SNAKE_CASE = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_SCREAMING_SNAKE_CASE = hidden_states + self.drop_path(a__ )
_SCREAMING_SNAKE_CASE = (output,) + outputs
return outputs
else:
_SCREAMING_SNAKE_CASE = self.drop_path(self.pooling(self.before_norm(a__ ) ) )
# First residual connection
_SCREAMING_SNAKE_CASE = pooling_output + hidden_states
_SCREAMING_SNAKE_CASE = ()
# Second residual connection inside the PoolFormerOutput block
_SCREAMING_SNAKE_CASE = self.drop_path(self.output(self.after_norm(a__ ) ) )
_SCREAMING_SNAKE_CASE = hidden_states + layer_output
_SCREAMING_SNAKE_CASE = (output,) + outputs
return outputs
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A ) -> Tuple:
super().__init__()
_SCREAMING_SNAKE_CASE = config
# stochastic depth decay rule
_SCREAMING_SNAKE_CASE = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_SCREAMING_SNAKE_CASE = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_SCREAMING_SNAKE_CASE = nn.ModuleList(a__ )
# Transformer blocks
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_SCREAMING_SNAKE_CASE = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
a__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(a__ ) )
_SCREAMING_SNAKE_CASE = nn.ModuleList(a__ )
def snake_case_( self , A , A=False , A=True ) -> Tuple:
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
_SCREAMING_SNAKE_CASE = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = layers
# Get patch embeddings from hidden_states
_SCREAMING_SNAKE_CASE = embedding_layer(a__ )
# Send the embeddings through the blocks
for _, blk in enumerate(a__ ):
_SCREAMING_SNAKE_CASE = blk(a__ )
_SCREAMING_SNAKE_CASE = layer_outputs[0]
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=a__ , hidden_states=a__ )
class a_ ( lowercase_ ):
'''simple docstring'''
UpperCamelCase = PoolFormerConfig
UpperCamelCase = "poolformer"
UpperCamelCase = "pixel_values"
UpperCamelCase = True
def snake_case_( self , A ) -> List[Any]:
if isinstance(a__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(a__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def snake_case_( self , A , A=False ) -> Dict:
if isinstance(a__ , a__ ):
_SCREAMING_SNAKE_CASE = value
lowercase_ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , lowercase_ , )
class a_ ( lowercase_ ):
'''simple docstring'''
def __init__( self , A ) -> str:
super().__init__(a__ )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = PoolFormerEncoder(a__ )
# Initialize weights and apply final processing
self.post_init()
def snake_case_( self ) -> Dict:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case_( self , A = None , A = None , A = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.encoder(
a__ , output_hidden_states=a__ , return_dict=a__ , )
_SCREAMING_SNAKE_CASE = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=a__ , hidden_states=encoder_outputs.hidden_states , )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.hidden_size )
def snake_case_( self , A ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.dense(a__ )
return output
@add_start_docstrings(
'''\n PoolFormer Model transformer with an image classification head on top\n ''' , lowercase_ , )
class a_ ( lowercase_ ):
'''simple docstring'''
def __init__( self , A ) -> List[Any]:
super().__init__(a__ )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = PoolFormerModel(a__ )
# Final norm
_SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_SCREAMING_SNAKE_CASE = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case_( self , A = None , A = None , A = None , A = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.poolformer(
a__ , output_hidden_states=a__ , return_dict=a__ , )
_SCREAMING_SNAKE_CASE = outputs[0]
_SCREAMING_SNAKE_CASE = self.classifier(self.norm(a__ ).mean([-2, -1] ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(a__ , a__ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(a__ , a__ )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states )
| 58 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_( snake_case : list[int] ):
'''simple docstring'''
return len(set(snake_case ) ) == len(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
A : List[str] = torch.nn.Linear(10 , 10 )
A : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
A : Optional[int] = Accelerator()
A : Tuple = accelerator.prepare(__lowerCamelCase )
try:
pickle.loads(pickle.dumps(__lowerCamelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 369 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
__SCREAMING_SNAKE_CASE = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
__SCREAMING_SNAKE_CASE = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_INIT_CONFIGURATION
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ElectraTokenizer
def __init__( self : int , __lowerCamelCase : str=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=True , __lowerCamelCase : int="[UNK]" , __lowerCamelCase : Any="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : str="[CLS]" , __lowerCamelCase : Tuple="[MASK]" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=None , **__lowerCamelCase : str , ) -> List[str]:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
A : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
A : Union[str, Any] = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
A : List[Any] = do_lower_case
A : Tuple = strip_accents
A : Any = tokenize_chinese_chars
A : Tuple = normalizer_class(**__lowerCamelCase )
A : Optional[Any] = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=None ) -> List[Any]:
A : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
A : int = [self.sep_token_id]
A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
A : List[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase ) | 256 | 0 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case_ ( ) -> List[Any]:
lowercase__: Optional[int] = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
lowercase__: Union[str, Any] = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(snake_case )
# Let's go
lowercase__: str = parser.parse_args()
if not hasattr(snake_case , 'func' ):
parser.print_help()
exit(1 )
# Run
lowercase__: Any = args.func(snake_case )
service.run()
if __name__ == "__main__":
main()
| 196 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__lowerCAmelCase = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase = '''RegNetConfig'''
# Base docstring
__lowerCAmelCase = '''facebook/regnet-y-040'''
__lowerCAmelCase = [1, 10_88, 7, 7]
# Image classification docstring
__lowerCAmelCase = '''facebook/regnet-y-040'''
__lowerCAmelCase = '''tabby, tabby cat'''
__lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = "relu" , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase__: Any = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , )
lowercase__: str = nn.BatchNormad(lowerCAmelCase__ )
lowercase__: Union[str, Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = self.convolution(lowerCAmelCase__ )
lowercase__: Optional[Any] = self.normalization(lowerCAmelCase__ )
lowercase__: Union[str, Any] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase__: Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase__: Dict = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowercase__: Optional[int] = self.embedder(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase__: Optional[Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase__: Union[str, Any] = nn.BatchNormad(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Tensor:
'''simple docstring'''
lowercase__: Any = self.convolution(lowerCAmelCase__ )
lowercase__: str = self.normalization(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase__: Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__: str = nn.Sequential(
nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
# b c h w -> b c 1 1
lowercase__: str = self.pooler(lowerCAmelCase__ )
lowercase__: List[str] = self.attention(lowerCAmelCase__ )
lowercase__: List[Any] = hidden_state * attention
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: str = in_channels != out_channels or stride != 1
lowercase__: Optional[int] = max(1 , out_channels // config.groups_width )
lowercase__: Union[str, Any] = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowercase__: Dict = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
lowercase__: Tuple = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Dict = hidden_state
lowercase__: Union[str, Any] = self.layer(lowerCAmelCase__ )
lowercase__: int = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowercase__: Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: Optional[int] = in_channels != out_channels or stride != 1
lowercase__: List[str] = max(1 , out_channels // config.groups_width )
lowercase__: Any = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowercase__: str = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetSELayer(lowerCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
lowercase__: Union[str, Any] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[Any] = hidden_state
lowercase__: Optional[int] = self.layer(lowerCAmelCase__ )
lowercase__: str = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowercase__: Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , ) -> Tuple:
'''simple docstring'''
super().__init__()
lowercase__: Optional[int] = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
lowercase__: str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: str = self.layers(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: int = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__: int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
lowercase__: List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__: Optional[Any] = hidden_states + (hidden_state,)
lowercase__: List[Any] = stage_module(lowerCAmelCase__ )
if output_hidden_states:
lowercase__: Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ )
class __a ( __UpperCamelCase ):
__lowercase : Dict = RegNetConfig
__lowercase : Dict = 'regnet'
__lowercase : str = 'pixel_values'
__lowercase : List[str] = True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Any = value
__lowerCAmelCase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowerCAmelCase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowercase__: Tuple = config
lowercase__: List[str] = RegNetEmbeddings(lowerCAmelCase__ )
lowercase__: Optional[int] = RegNetEncoder(lowerCAmelCase__ )
lowercase__: Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowercase__: List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Any = self.embedder(lowerCAmelCase__ )
lowercase__: List[Any] = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Optional[Any] = encoder_outputs[0]
lowercase__: Optional[int] = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowercase__: Dict = config.num_labels
lowercase__: Dict = RegNetModel(lowerCAmelCase__ )
# classification head
lowercase__: str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
lowercase__: str = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Optional[int] = self.regnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Dict = outputs.pooler_output if return_dict else outputs[1]
lowercase__: List[str] = self.classifier(lowerCAmelCase__ )
lowercase__: Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__: Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__: Optional[int] = 'single_label_classification'
else:
lowercase__: Tuple = 'multi_label_classification'
if self.config.problem_type == "regression":
lowercase__: List[Any] = MSELoss()
if self.num_labels == 1:
lowercase__: Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
lowercase__: Dict = CrossEntropyLoss()
lowercase__: Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__: List[Any] = BCEWithLogitsLoss()
lowercase__: Any = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
lowercase__: int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
| 196 | 1 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 351 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''gpt_neo'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_layers
_snake_case = num_heads
_snake_case = intermediate_size
_snake_case = window_size
_snake_case = activation_function
_snake_case = resid_dropout
_snake_case = embed_dropout
_snake_case = attention_dropout
_snake_case = classifier_dropout
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = attention_types
_snake_case = self.expand_attention_types_params(lowerCAmelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ ) -> Any:
_snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
import torch
_snake_case = input.size()
_snake_case = len(UpperCamelCase__ )
_snake_case = shape[dimension]
_snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1
_snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
_snake_case = [slice(UpperCamelCase__ )] * rank
_snake_case = indices
_snake_case = input[s]
_snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
import torch
_snake_case = torch.arange(1 , UpperCamelCase__ )
_snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = remainders == 0
_snake_case = candidates[divisor_indices]
_snake_case = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' )
class UpperCamelCase_ ( _lowerCamelCase ):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase ( self ) -> int:
return self._config.num_heads
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]:
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self ) -> int:
return 13
| 295 | 0 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__a = logging.get_logger(__name__)
@add_end_docstrings(
_a , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : GenericTensor ):
if self.framework == "tf":
snake_case__ : Optional[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
snake_case__ : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def lowerCamelCase ( self : Optional[Any] , snake_case_ : GenericTensor ):
snake_case__ : List[Any] = self.get_masked_index(snake_case_ )
snake_case__ : List[Any] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def lowerCamelCase ( self : Tuple , snake_case_ : GenericTensor ):
if isinstance(snake_case_ , snake_case_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case_ )
def lowerCamelCase ( self : List[Any] , snake_case_ : Any , snake_case_ : Optional[int]=None , **snake_case_ : Optional[Any] ):
if return_tensors is None:
snake_case__ : Tuple = self.framework
snake_case__ : Optional[Any] = self.tokenizer(snake_case_ , return_tensors=snake_case_ )
self.ensure_exactly_one_mask_token(snake_case_ )
return model_inputs
def lowerCamelCase ( self : str , snake_case_ : str ):
snake_case__ : Union[str, Any] = self.model(**snake_case_ )
snake_case__ : Dict = model_inputs["""input_ids"""]
return model_outputs
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : str=5 , snake_case_ : List[Any]=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
snake_case__ : Any = target_ids.shape[0]
snake_case__ : List[Any] = model_outputs["""input_ids"""][0]
snake_case__ : Optional[Any] = model_outputs["""logits"""]
if self.framework == "tf":
snake_case__ : Optional[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
snake_case__ : Optional[int] = outputs.numpy()
snake_case__ : Optional[int] = outputs[0, masked_index, :]
snake_case__ : Optional[int] = stable_softmax(snake_case_ , axis=-1 )
if target_ids is not None:
snake_case__ : Optional[Any] = tf.gather_nd(tf.squeeze(snake_case_ , 0 ) , target_ids.reshape(-1 , 1 ) )
snake_case__ : Optional[int] = tf.expand_dims(snake_case_ , 0 )
snake_case__ : int = tf.math.top_k(snake_case_ , k=snake_case_ )
snake_case__ , snake_case__ : Any = topk.values.numpy(), topk.indices.numpy()
else:
snake_case__ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
snake_case__ : Tuple = outputs[0, masked_index, :]
snake_case__ : Tuple = logits.softmax(dim=-1 )
if target_ids is not None:
snake_case__ : List[str] = probs[..., target_ids]
snake_case__ , snake_case__ : List[str] = probs.topk(snake_case_ )
snake_case__ : Tuple = []
snake_case__ : List[str] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
snake_case__ : Union[str, Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
snake_case__ : Dict = input_ids.numpy().copy()
if target_ids is not None:
snake_case__ : Any = target_ids[p].tolist()
snake_case__ : Union[str, Any] = p
# Filter padding out:
snake_case__ : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
snake_case__ : str = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
snake_case__ : Union[str, Any] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(snake_case_ )
result.append(snake_case_ )
if single_mask:
return result[0]
return result
def lowerCamelCase ( self : int , snake_case_ : Any , snake_case_ : str=None ):
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : Union[str, Any] = [targets]
try:
snake_case__ : Any = self.tokenizer.get_vocab()
except Exception:
snake_case__ : str = {}
snake_case__ : List[Any] = []
for target in targets:
snake_case__ : List[str] = vocab.get(snake_case_ , snake_case_ )
if id_ is None:
snake_case__ : int = self.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )["""input_ids"""]
if len(snake_case_ ) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
snake_case__ : Optional[Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
snake_case__ : Optional[Any] = list(set(snake_case_ ) )
if len(snake_case_ ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
snake_case__ : Dict = np.array(snake_case_ )
return target_ids
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Tuple=None , snake_case_ : Union[str, Any]=None ):
snake_case__ : Union[str, Any] = {}
if targets is not None:
snake_case__ : List[str] = self.get_target_ids(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = target_ids
if top_k is not None:
snake_case__ : Optional[int] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self : List[str] , snake_case_ : Union[str, Any] , *snake_case_ : Tuple , **snake_case_ : List[Any] ):
snake_case__ : Optional[int] = super().__call__(snake_case_ , **snake_case_ )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1:
return outputs[0]
return outputs
| 35 |
def __lowerCamelCase ( ):
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_UpperCAmelCase : Union[str, Any] = generate_large_matrix()
_UpperCAmelCase : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid )
assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(UpperCamelCase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case_ = (left + right) // 2
snake_case_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case_ = mid + 1
else:
snake_case_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(grid[0] )
for i in range(len(UpperCamelCase__ ) ):
snake_case_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(UpperCamelCase__ ) * len(grid[0] )) - total
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
for row in grid:
for i, number in enumerate(UpperCamelCase__ ):
if number < 0:
total += len(UpperCamelCase__ ) - i
break
return total
def __lowerCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print('Running benchmarks' )
snake_case_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCamelCase__ , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 285 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = ShapEPipeline
lowerCAmelCase__ = ["prompt"]
lowerCAmelCase__ = ["prompt"]
lowerCAmelCase__ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
lowerCAmelCase__ = False
@property
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def A__ ( self ) -> int:
'''simple docstring'''
return 32
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return 8
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def A__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCAmelCase )
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
lowercase_ = PriorTransformer(**UpperCAmelCase )
return model
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
lowercase_ = ShapERenderer(**UpperCAmelCase )
return model
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = self.dummy_prior
lowercase_ = self.dummy_text_encoder
lowercase_ = self.dummy_tokenizer
lowercase_ = self.dummy_renderer
lowercase_ = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=UpperCAmelCase , clip_sample=UpperCAmelCase , clip_sample_range=1.0 , )
lowercase_ = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = "cpu"
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = pipe(**self.get_dummy_inputs(UpperCAmelCase ) )
lowercase_ = output.images[0]
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase_ = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = torch_device == "cpu"
lowercase_ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCAmelCase , relax_max_difference=UpperCAmelCase , )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = 1
lowercase_ = 2
lowercase_ = self.get_dummy_inputs(UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase_ = batch_size * [inputs[key]]
lowercase_ = pipe(**UpperCAmelCase , num_images_per_prompt=UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
lowercase_ = ShapEPipeline.from_pretrained("openai/shap-e" )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = pipe(
"a shark" , generator=UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
| 297 |
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float ):
'''simple docstring'''
return 10 - x * x
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float ):
'''simple docstring'''
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) >= 0:
raise ValueError("Wrong space!" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(__lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 297 | 1 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """segformer"""
def __init__( self :Optional[int] , lowercase_ :Any=3 , lowercase_ :Tuple=4 , lowercase_ :Optional[Any]=[2, 2, 2, 2] , lowercase_ :Optional[int]=[8, 4, 2, 1] , lowercase_ :str=[32, 64, 1_60, 2_56] , lowercase_ :Dict=[7, 3, 3, 3] , lowercase_ :List[str]=[4, 2, 2, 2] , lowercase_ :Tuple=[1, 2, 5, 8] , lowercase_ :str=[4, 4, 4, 4] , lowercase_ :Tuple="gelu" , lowercase_ :Tuple=0.0 , lowercase_ :Tuple=0.0 , lowercase_ :Optional[int]=0.1 , lowercase_ :List[str]=0.02 , lowercase_ :Tuple=0.1 , lowercase_ :str=1E-6 , lowercase_ :int=2_56 , lowercase_ :List[Any]=2_55 , **lowercase_ :Any , ) -> str:
super().__init__(**lowercase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , lowercase_ , )
UpperCAmelCase = num_channels
UpperCAmelCase = num_encoder_blocks
UpperCAmelCase = depths
UpperCAmelCase = sr_ratios
UpperCAmelCase = hidden_sizes
UpperCAmelCase = patch_sizes
UpperCAmelCase = strides
UpperCAmelCase = mlp_ratios
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = classifier_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = drop_path_rate
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = decoder_hidden_size
UpperCAmelCase = kwargs.get('reshape_last_stage' , lowercase_ )
UpperCAmelCase = semantic_loss_ignore_index
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self :Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase__ ( self :str ) -> float:
return 1E-4
@property
def UpperCAmelCase__ ( self :Dict ) -> int:
return 12
| 78 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2", "stage3"] , UpperCAmelCase=[1, 2, 3] , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase ):
_UpperCAmelCase = 0
return t
def check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase={} ):
with torch.no_grad():
_UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase ).to_tuple()
def recursive_check(UpperCAmelCase , UpperCAmelCase ):
if isinstance(UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase , UpperCAmelCase ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase ) , set_nan_tensor_to_zero(UpperCAmelCase ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}. Dict has"""
F""" `nan`: {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}."""
) , )
recursive_check(UpperCAmelCase , UpperCAmelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
@require_torch
class __lowerCamelCase ( unittest.TestCase , snake_case__):
"""simple docstring"""
UpperCamelCase__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase__ = MaskFormerSwinConfig
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(UpperCAmelCase )
backbone.to(UpperCAmelCase )
backbone.eval()
_UpperCAmelCase = backbone(**UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**UpperCAmelCase , output_hidden_states=UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**UpperCAmelCase , output_attentions=UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 39 | 0 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase__ : Any = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
a = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(snake_case_, id=snake_case_ )
| 355 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'luke'
def __init__( self : Dict ,__lowerCamelCase : Optional[Any]=5_02_67 ,__lowerCamelCase : str=50_00_00 ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : int=2_56 ,__lowerCamelCase : Optional[int]=12 ,__lowerCamelCase : Tuple=12 ,__lowerCamelCase : Any=30_72 ,__lowerCamelCase : Any="gelu" ,__lowerCamelCase : Any=0.1 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : Tuple=5_12 ,__lowerCamelCase : int=2 ,__lowerCamelCase : Optional[int]=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : Any=1 ,__lowerCamelCase : Dict=0 ,__lowerCamelCase : Any=2 ,**__lowerCamelCase : str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase )
a = vocab_size
a = entity_vocab_size
a = hidden_size
a = entity_emb_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = use_entity_aware_attention
a = classifier_dropout
| 330 | 0 |
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
SCREAMING_SNAKE_CASE = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 296 |
import os
from distutils.util import strtobool
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
for e in env_keys:
SCREAMING_SNAKE_CASE = int(os.environ.get(_SCREAMING_SNAKE_CASE , -1 ) )
if val >= 0:
return val
return default
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) )
return strtobool(_SCREAMING_SNAKE_CASE ) == 1 # As its name indicates `strtobool` actually returns an int...
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="no" ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) )
return value
| 296 | 1 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple ,_a : Tuple ,_a : Optional[Any]=13 ,_a : List[str]=7 ,_a : str=True ,_a : List[str]=True ,_a : Optional[int]=True ,_a : Union[str, Any]=True ,_a : Union[str, Any]=True ,_a : Union[str, Any]=False ,_a : Union[str, Any]=False ,_a : List[Any]=False ,_a : str=2 ,_a : List[str]=99 ,_a : Tuple=0 ,_a : List[str]=32 ,_a : Dict=5 ,_a : Union[str, Any]=4 ,_a : Union[str, Any]=0.1 ,_a : Tuple=0.1 ,_a : Union[str, Any]=512 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : List[str]=2 ,_a : Dict=4 ,_a : int="last" ,_a : Optional[Any]=True ,_a : str=None ,_a : str=0 ,):
'''simple docstring'''
_a : Dict = parent
_a : Tuple = batch_size
_a : List[Any] = seq_length
_a : Any = is_training
_a : List[Any] = use_input_lengths
_a : Optional[Any] = use_token_type_ids
_a : Tuple = use_labels
_a : Dict = gelu_activation
_a : int = sinusoidal_embeddings
_a : int = causal
_a : Any = asm
_a : Dict = n_langs
_a : str = vocab_size
_a : Optional[int] = n_special
_a : List[Any] = hidden_size
_a : Any = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : Optional[Any] = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Dict = max_position_embeddings
_a : List[str] = type_sequence_label_size
_a : List[str] = initializer_range
_a : Any = num_labels
_a : Optional[int] = num_choices
_a : List[str] = summary_type
_a : Optional[Any] = use_proj
_a : Dict = scope
_a : List[Any] = bos_token_id
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_a : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_a : int = None
if self.use_input_lengths:
_a : Union[str, Any] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_a : Optional[Any] = None
if self.use_token_type_ids:
_a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
_a : Any = None
_a : List[Any] = None
_a : Any = None
if self.use_labels:
_a : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_a : Optional[Any] = ids_tensor([self.batch_size] ,2 ).float()
_a : List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
_a : str = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : str ,_a : str ,_a : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : List[Any] ,_a : Optional[Any] ,_a : List[Any] ,):
'''simple docstring'''
_a : List[str] = XLMModel(config=_a )
model.to(_a )
model.eval()
_a : Union[str, Any] = model(_a ,lengths=_a ,langs=_a )
_a : List[str] = model(_a ,langs=_a )
_a : Any = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Any ,_a : Any ,_a : int ,_a : str ,_a : Dict ,_a : Union[str, Any] ,_a : Optional[int] ,_a : List[str] ,_a : Dict ,_a : str ,):
'''simple docstring'''
_a : Union[str, Any] = XLMWithLMHeadModel(_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : List[Any] ,_a : Tuple ,_a : List[str] ,_a : Optional[int] ,_a : Optional[Any] ,_a : Any ,_a : int ,_a : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,):
'''simple docstring'''
_a : str = XLMForQuestionAnsweringSimple(_a )
model.to(_a )
model.eval()
_a : int = model(_a )
_a : int = model(_a ,start_positions=_a ,end_positions=_a )
_a : Optional[int] = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowercase ( self : Optional[int] ,_a : Optional[Any] ,_a : Optional[int] ,_a : Any ,_a : Tuple ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Union[str, Any] ,_a : str ,_a : List[Any] ,):
'''simple docstring'''
_a : Tuple = XLMForQuestionAnswering(_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
_a : Tuple = model(
_a ,start_positions=_a ,end_positions=_a ,cls_index=_a ,is_impossible=_a ,p_mask=_a ,)
_a : Tuple = model(
_a ,start_positions=_a ,end_positions=_a ,cls_index=_a ,is_impossible=_a ,)
((_a), ) : str = result_with_labels.to_tuple()
_a : int = model(_a ,start_positions=_a ,end_positions=_a )
((_a), ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def __lowercase ( self : List[str] ,_a : List[str] ,_a : List[str] ,_a : Union[str, Any] ,_a : Dict ,_a : Optional[Any] ,_a : Any ,_a : Union[str, Any] ,_a : Tuple ,_a : str ,):
'''simple docstring'''
_a : Dict = XLMForSequenceClassification(_a )
model.to(_a )
model.eval()
_a : Any = model(_a )
_a : Optional[int] = model(_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : str ,_a : Optional[Any] ,_a : int ,_a : Optional[int] ,_a : Dict ,_a : int ,_a : int ,_a : Optional[int] ,_a : Optional[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
_a : List[str] = self.num_labels
_a : Optional[int] = XLMForTokenClassification(_a )
model.to(_a )
model.eval()
_a : Tuple = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Any ,_a : List[Any] ,_a : Tuple ,_a : List[Any] ,_a : Optional[Any] ,_a : List[str] ,_a : Any ,_a : Optional[int] ,_a : int ,_a : Tuple ,):
'''simple docstring'''
_a : Tuple = self.num_choices
_a : Optional[Any] = XLMForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_a : Tuple = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_a : int = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_a : List[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_a : Optional[int] = model(
_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Dict = self.prepare_config_and_inputs()
(
(
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
), (
_a
),
) : List[str] = config_and_inputs
_a : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__UpperCAmelCase : Any = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowercase ( self : int ,_a : str ,_a : Union[str, Any] ,_a : Optional[int] ,_a : Any ,_a : Any ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowercase ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Optional[Any]=False ):
'''simple docstring'''
_a : Union[str, Any] = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_a : Tuple = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
_a : Any = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Dict = XLMModelTester(self )
_a : Union[str, Any] = ConfigTester(self ,config_class=_a ,emb_dim=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_a )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_a )
def __lowercase ( self : Tuple ,_a : Tuple ,_a : Dict ,_a : Union[str, Any] ,_a : List[Any] ,_a : Dict ,_a : List[str]=False ,_a : str=1 ):
'''simple docstring'''
self.assertIsInstance(_a ,_a )
self.assertListEqual(
[isinstance(_a ,_a ) for iter_attentions in attentions] ,[True] * len(_a ) )
self.assertEqual(len(_a ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_a ):
# adds PAD dummy token
_a : Optional[Any] = min_length + idx + 1
_a : Optional[Any] = min_length + idx + 1
_a : List[Any] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(_a ) )
def __lowercase ( self : Any ,_a : int ,_a : int ,_a : List[str] ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[int]=False ,_a : List[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(_a ,_a )
self.assertListEqual(
[isinstance(_a ,_a ) for iter_hidden_states in hidden_states] ,[True] * len(_a ) ,)
self.assertEqual(len(_a ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_a ):
# adds PAD dummy token
_a : Dict = min_length + idx + 1
_a : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(_a ) ,)
pass
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Union[str, Any] = XLMModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(_a )
_a : Optional[int] = torch.tensor([[14, 447]] ,dtype=torch.long ,device=_a ) # the president
_a : int = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_a : Dict = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,_a )
| 5 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A__ : Optional[Any] =logging.get_logger(__name__)
A__ : str ={
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class UpperCAmelCase ( snake_case_ , snake_case_ ):
_lowercase: str = '''resnet'''
_lowercase: str = ['''basic''', '''bottleneck''']
def __init__( self : Optional[int] , __snake_case : Tuple=3 , __snake_case : List[str]=64 , __snake_case : Optional[Any]=[2_56, 5_12, 10_24, 20_48] , __snake_case : str=[3, 4, 6, 3] , __snake_case : int="bottleneck" , __snake_case : Optional[Any]="relu" , __snake_case : int=False , __snake_case : int=None , __snake_case : Union[str, Any]=None , **__snake_case : Any , ) -> int:
super().__init__(**__snake_case )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
_lowerCAmelCase = num_channels
_lowerCAmelCase = embedding_size
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = depths
_lowerCAmelCase = layer_type
_lowerCAmelCase = hidden_act
_lowerCAmelCase = downsample_in_first_stage
_lowerCAmelCase = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__snake_case ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=__snake_case , out_indices=__snake_case , stage_names=self.stage_names )
class UpperCAmelCase ( snake_case_ ):
_lowercase: Optional[int] = version.parse('''1.11''' )
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowercase__ ( self : Optional[int] ) -> float:
return 1E-3
| 70 |
import argparse
import os
import re
import packaging.version
A__ : Dict = '''examples/'''
A__ : Any = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
A__ : Any = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
A__ : Any = '''README.md'''
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
with open(__UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCAmelCase_ : Tuple = f.read()
lowerCAmelCase_ , lowerCAmelCase_ : Dict = REPLACE_PATTERNS[pattern]
lowerCAmelCase_ : Tuple = replace.replace('''VERSION''' ,__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = re_pattern.sub(__UpperCamelCase ,__UpperCamelCase )
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ,pattern='''examples''' )
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def UpperCamelCase( ):
lowerCAmelCase_ : List[str] = '''🤗 Transformers currently provides the following architectures'''
lowerCAmelCase_ : List[Any] = '''1. Want to contribute a new model?'''
with open(__UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCAmelCase_ : Union[str, Any] = f.readlines()
# Find the start of the list.
lowerCAmelCase_ : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCAmelCase_ : int = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__UpperCamelCase )
def UpperCamelCase( ):
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
lowerCAmelCase_ : Optional[Any] = f.read()
lowerCAmelCase_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : Dict=False ):
lowerCAmelCase_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCAmelCase_ : List[str] = default_version.base_version
elif patch:
lowerCAmelCase_ : int = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowerCAmelCase_ : int = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowerCAmelCase_ : Optional[Any] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(__UpperCamelCase ) == 0:
lowerCAmelCase_ : List[str] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(__UpperCamelCase ,patch=__UpperCamelCase )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def UpperCamelCase( ):
lowerCAmelCase_ : Any = get_version()
lowerCAmelCase_ : int = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowerCAmelCase_ : Optional[Any] = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase_ : Optional[Any] = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(__UpperCamelCase ) == 0:
lowerCAmelCase_ : int = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(__UpperCamelCase )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
A__ : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 103 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = '''time_series_transformer'''
UpperCamelCase_ : Optional[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : str = "student_t" , lowerCAmelCase__ : str = "nll" , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase__ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : str = "gelu" , lowerCAmelCase__ : int = 6_4 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : int = 1_0_0 , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : Tuple , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = prediction_length
_UpperCAmelCase : Optional[Any] = context_length or prediction_length
_UpperCAmelCase : Optional[Any] = distribution_output
_UpperCAmelCase : Union[str, Any] = loss
_UpperCAmelCase : Dict = input_size
_UpperCAmelCase : int = num_time_features
_UpperCAmelCase : Any = lags_sequence
_UpperCAmelCase : Dict = scaling
_UpperCAmelCase : Tuple = num_dynamic_real_features
_UpperCAmelCase : Dict = num_static_real_features
_UpperCAmelCase : Union[str, Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : Optional[int] = cardinality
else:
_UpperCAmelCase : Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : List[Any] = embedding_dimension
else:
_UpperCAmelCase : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase : str = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase : Union[str, Any] = input_size * len(lowerCAmelCase__ ) + self._number_of_features
_UpperCAmelCase : str = d_model
_UpperCAmelCase : Optional[Any] = encoder_attention_heads
_UpperCAmelCase : Dict = decoder_attention_heads
_UpperCAmelCase : List[Any] = encoder_ffn_dim
_UpperCAmelCase : str = decoder_ffn_dim
_UpperCAmelCase : Dict = encoder_layers
_UpperCAmelCase : str = decoder_layers
_UpperCAmelCase : Any = dropout
_UpperCAmelCase : str = attention_dropout
_UpperCAmelCase : List[Any] = activation_dropout
_UpperCAmelCase : Dict = encoder_layerdrop
_UpperCAmelCase : Any = decoder_layerdrop
_UpperCAmelCase : Optional[Any] = activation_function
_UpperCAmelCase : Tuple = init_std
_UpperCAmelCase : List[str] = use_cache
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 363 | '''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
_UpperCAmelCase : Optional[int] = key.replace("module.encoder", "glpn.encoder" )
if key.startswith("module.decoder" ):
_UpperCAmelCase : List[Any] = key.replace("module.decoder", "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )]
_UpperCAmelCase : Union[str, Any] = key.replace(f"""patch_embed{idx}""", f"""patch_embeddings.{int(a_ )-1}""" )
if "norm" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("norm", "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase : str = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
_UpperCAmelCase : Optional[Any] = key.replace(f"""layer_norm{idx}""", f"""layer_norm.{int(a_ )-1}""" )
if "layer_norm1" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("layer_norm1", "layer_norm_1" )
if "layer_norm2" in key:
_UpperCAmelCase : List[Any] = key.replace("layer_norm2", "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase : Optional[Any] = key[key.find("block" ) + len("block" )]
_UpperCAmelCase : List[str] = key.replace(f"""block{idx}""", f"""block.{int(a_ )-1}""" )
if "attn.q" in key:
_UpperCAmelCase : Optional[int] = key.replace("attn.q", "attention.self.query" )
if "attn.proj" in key:
_UpperCAmelCase : List[str] = key.replace("attn.proj", "attention.output.dense" )
if "attn" in key:
_UpperCAmelCase : Dict = key.replace("attn", "attention.self" )
if "fc1" in key:
_UpperCAmelCase : List[Any] = key.replace("fc1", "dense1" )
if "fc2" in key:
_UpperCAmelCase : List[Any] = key.replace("fc2", "dense2" )
if "linear_pred" in key:
_UpperCAmelCase : Any = key.replace("linear_pred", "classifier" )
if "linear_fuse" in key:
_UpperCAmelCase : Dict = key.replace("linear_fuse.conv", "linear_fuse" )
_UpperCAmelCase : List[str] = key.replace("linear_fuse.bn", "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase : List[Any] = key[key.find("linear_c" ) + len("linear_c" )]
_UpperCAmelCase : Tuple = key.replace(f"""linear_c{idx}""", f"""linear_c.{int(a_ )-1}""" )
if "bot_conv" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("bot_conv", "0.convolution" )
if "skip_conv1" in key:
_UpperCAmelCase : Optional[int] = key.replace("skip_conv1", "1.convolution" )
if "skip_conv2" in key:
_UpperCAmelCase : Optional[int] = key.replace("skip_conv2", "2.convolution" )
if "fusion1" in key:
_UpperCAmelCase : List[str] = key.replace("fusion1", "1.fusion" )
if "fusion2" in key:
_UpperCAmelCase : List[str] = key.replace("fusion2", "2.fusion" )
if "fusion3" in key:
_UpperCAmelCase : Optional[Any] = key.replace("fusion3", "3.fusion" )
if "fusion" in key and "conv" in key:
_UpperCAmelCase : List[Any] = key.replace("conv", "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
_UpperCAmelCase : Optional[int] = key.replace("module.last_layer_depth", "head.head" )
_UpperCAmelCase : int = value
return new_state_dict
def __UpperCAmelCase ( a_: str, a_: List[Any] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase : Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase : Dict = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase : Optional[Any] = kv_bias[config.hidden_sizes[i] :]
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : List[Any] = Image.open(requests.get(a_, stream=a_ ).raw )
return image
@torch.no_grad()
def __UpperCAmelCase ( a_: Tuple, a_: Any, a_: Optional[Any]=False, a_: List[Any]=None ):
_UpperCAmelCase : Optional[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_UpperCAmelCase : Dict = GLPNImageProcessor()
# prepare image
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(images=a_, return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
_UpperCAmelCase : Union[str, Any] = torch.load(a_, map_location=torch.device("cpu" ) )
# rename keys
_UpperCAmelCase : List[str] = rename_keys(a_ )
# key and value matrices need special treatment
read_in_k_v(a_, a_ )
# create HuggingFace model and load state dict
_UpperCAmelCase : List[str] = GLPNForDepthEstimation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
_UpperCAmelCase : Dict = model(a_ )
_UpperCAmelCase : List[str] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
_UpperCAmelCase : Tuple = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
_UpperCAmelCase : Dict = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3], a_, atol=1e-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add model", use_temp_dir=a_, )
image_processor.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add image processor", use_temp_dir=a_, )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
__a = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name) | 17 | 0 |
"""simple docstring"""
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float) -> float:
'''simple docstring'''
if mass < 0:
raise ValueError("The mass of a body cannot be negative")
return 0.5 * mass * abs(UpperCamelCase_) * abs(UpperCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 17 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
lowercase_ , lowercase_ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
lowercase_ = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
lowercase_ = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowercase_ = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 58 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase= get_sagemaker_input()
else:
__lowercase= get_cluster_input()
return config
def _lowerCamelCase( lowercase__=None ) -> List[str]:
'''simple docstring'''
if subparsers is not None:
__lowercase= subparsers.add_parser('config' , description=lowercase__ )
else:
__lowercase= argparse.ArgumentParser('Accelerate config command' , description=lowercase__ )
parser.add_argument(
'--config_file' , default=lowercase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= get_user_input()
if args.config_file is not None:
__lowercase= args.config_file
else:
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__lowercase= default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase__ )
else:
config.to_yaml_file(lowercase__ )
print(F'accelerate configuration saved at {config_file}' )
def _lowerCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= config_command_parser()
__lowercase= parser.parse_args()
config_command(lowercase__ )
if __name__ == "__main__":
main()
| 304 |
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304 | 1 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float , ) ->tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = (DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : int = scheduler_class(**lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = 10, 0.0
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for t in scheduler.timesteps:
UpperCAmelCase_ : Dict = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowercase_ , num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowercase_ , eta=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = 10, 0.0
scheduler.set_timesteps(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter
UpperCAmelCase_ : Any = self.dummy_sample_deter + 0.1
UpperCAmelCase_ : int = self.dummy_sample_deter - 0.1
UpperCAmelCase_ : List[Any] = samplea.shape[0]
UpperCAmelCase_ : int = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase_ : int = torch.arange(lowercase_ )[0:3, None].repeat(1 , lowercase_ )
UpperCAmelCase_ : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase_ : Optional[Any] = scheduler.batch_step_no_noise(lowercase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase_ )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.full_loop()
UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : List[str] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : Dict = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : int = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 61 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : str = '''switch_transformers'''
__lowercase : str = ['''past_key_values''']
__lowercase : Optional[int] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCAmelCase__=3_2_1_2_8 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=6_4 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=6_4 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3 , lowerCAmelCase__=1_2 , lowerCAmelCase__=8 , lowerCAmelCase__=False , lowerCAmelCase__=0.01 , lowerCAmelCase__="float32" , lowerCAmelCase__=False , lowerCAmelCase__=3_2 , lowerCAmelCase__=1_2_8 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1E-6 , lowerCAmelCase__=0.0_01 , lowerCAmelCase__=0.0_01 , lowerCAmelCase__=1.0 , lowerCAmelCase__="relu" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0 , lowerCAmelCase__=1 , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = d_kv
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE = num_layers
__SCREAMING_SNAKE_CASE = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = num_experts
__SCREAMING_SNAKE_CASE = expert_capacity
__SCREAMING_SNAKE_CASE = router_bias
__SCREAMING_SNAKE_CASE = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}")
__SCREAMING_SNAKE_CASE = router_dtype
__SCREAMING_SNAKE_CASE = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = relative_attention_max_distance
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = feed_forward_proj
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = add_router_probs
__SCREAMING_SNAKE_CASE = router_z_loss_coef
__SCREAMING_SNAKE_CASE = router_aux_loss_coef
__SCREAMING_SNAKE_CASE = self.feed_forward_proj.split("""-""")
__SCREAMING_SNAKE_CASE = act_info[-1]
__SCREAMING_SNAKE_CASE = act_info[0] == """gated"""
if len(lowerCAmelCase__) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE = """gelu_new"""
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 255 |
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_5_5 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , lowerCAmelCase__ = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , lowerCAmelCase__ = True , lowerCAmelCase__=7 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=3 , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 2_8_8}
__SCREAMING_SNAKE_CASE = size_divisor
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_pad
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
def snake_case_ ( self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=False):
if not batched:
__SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
__SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image.size
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
__SCREAMING_SNAKE_CASE = size / min(lowerCAmelCase__ , lowerCAmelCase__)
if h < w:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = size, scale * w
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = scale * h, size
__SCREAMING_SNAKE_CASE = int((1_3_3_3 / 8_0_0) * size)
if max(lowerCAmelCase__ , lowerCAmelCase__) > max_size:
__SCREAMING_SNAKE_CASE = max_size / max(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = newh * scale
__SCREAMING_SNAKE_CASE = neww * scale
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = int(newh + 0.5), int(neww + 0.5)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__SCREAMING_SNAKE_CASE = []
for image in image_inputs:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__: item[0])[0]
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Tuple = BridgeTowerImageProcessor if is_vision_available() else None
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = BridgeTowerImageProcessingTester(self)
@property
def snake_case_ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean"""))
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std"""))
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize"""))
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize"""))
self.assertTrue(hasattr(lowerCAmelCase__ , """size"""))
self.assertTrue(hasattr(lowerCAmelCase__ , """size_divisor"""))
def snake_case_ ( self):
pass
def snake_case_ ( self):
# Initialize image processor
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image)
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""").pixel_values
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self):
# Initialize image processor
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray)
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""").pixel_values
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self):
# Initialize image processor
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor)
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""").pixel_values
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 255 | 1 |
lowercase : Any = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def A_ ( A__ ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(A__ , A__ ):
a__ : List[Any] = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(A__ )
a__ : str = ''.join(bin(A__ )[2:].zfill(8 ) for byte in data )
a__ : List[Any] = len(A__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
a__ : str = B'=' * ((6 - len(A__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A__ ) % 6)
else:
a__ : Any = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A__ ) , 6 ) ).encode()
+ padding
)
def A_ ( A__ ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(A__ , A__ ) and not isinstance(A__ , A__ ):
a__ : int = (
'argument should be a bytes-like object or ASCII string, '
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(A__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A__ , A__ ):
try:
a__ : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
a__ : List[Any] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
a__ : Optional[int] = encoded_data[:-padding]
a__ : Optional[int] = ''.join(
bin(B64_CHARSET.index(A__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
a__ : Optional[int] = ''.join(
bin(B64_CHARSET.index(A__ ) )[2:].zfill(6 ) for char in encoded_data )
a__ : Any = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A__ ) , 8 )
]
return bytes(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
lowerCAmelCase_ = 42
# setable values
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(common=__SCREAMING_SNAKE_CASE , init_noise_sigma=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
@dataclass
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = 42
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCAmelCase_ = 42
@property
def _snake_case ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 10_00 , __SCREAMING_SNAKE_CASE = 0.0_001 , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = "linear" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "fixed_small" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "epsilon" , __SCREAMING_SNAKE_CASE = jnp.floataa , ):
"""simple docstring"""
lowercase_ : Dict = dtype
def _snake_case ( self , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if common is None:
lowercase_ : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase_ : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
lowercase_ : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__SCREAMING_SNAKE_CASE , init_noise_sigma=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
return sample
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = () ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase_ : int = (jnp.arange(0 , __SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase_ : List[Any] = state.common.alphas_cumprod[t]
lowercase_ : str = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase_ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase_ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase_ : int = jnp.clip(__SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase_ : List[str] = jnp.log(jnp.clip(__SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase_ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase_ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase_ : Optional[Any] = variance
lowercase_ : Union[str, Any] = state.common.betas[t]
lowercase_ : Union[str, Any] = (predicted_variance + 1) / 2
lowercase_ : Any = frac * max_log + (1 - frac) * min_log
return variance
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
lowercase_ : Optional[int] = timestep
if key is None:
lowercase_ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase_ , lowercase_ : Optional[Any] = jnp.split(__SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase_ : int = None
# 1. compute alphas, betas
lowercase_ : Any = state.common.alphas_cumprod[t]
lowercase_ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase_ : int = 1 - alpha_prod_t
lowercase_ : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase_ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase_ : Any = model_output
elif self.config.prediction_type == "v_prediction":
lowercase_ : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase_ : Optional[Any] = jnp.clip(__SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase_ : Optional[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase_ : str = jax.random.split(__SCREAMING_SNAKE_CASE , num=1 )
lowercase_ : List[Any] = jax.random.normal(__SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , predicted_variance=__SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase_ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase_ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return add_noise_common(state.common , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return get_velocity_common(state.common , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 93 | 0 |
'''simple docstring'''
def _a ( _lowercase : str ):
'''simple docstring'''
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def _a ( _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : int = credit_card_number
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Optional[Any] = len(_lowercase ) - 2
for i in range(_lowercase , -1 , -2 ):
# double the value of every second digit
__UpperCAmelCase : Tuple = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
__UpperCAmelCase : List[Any] = cc_number[:i] + str(_lowercase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_lowercase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def _a ( _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Tuple = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_lowercase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_lowercase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_lowercase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323") | 240 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase :Any = logging.get_logger(__name__)
__UpperCAmelCase :Dict = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowercase : Tuple ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Any = k.replace(_lowercase , _lowercase )
if k.startswith('''encoder''' ):
__UpperCAmelCase : str = k.replace('''.attn''' , '''.self_attn''' )
__UpperCAmelCase : Any = k.replace('''norm1''' , '''self_attn_layer_norm''' )
__UpperCAmelCase : List[str] = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
__UpperCAmelCase : int = k.replace('''norm1''' , '''self_attn_layer_norm''' )
__UpperCAmelCase : Union[str, Any] = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
__UpperCAmelCase : List[Any] = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def _a ( _lowercase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
__UpperCAmelCase : Any = sd.pop(_lowercase )
__UpperCAmelCase : Optional[int] = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
__UpperCAmelCase :str = ["START"]
@torch.no_grad()
def _a ( _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = torch.load(_lowercase , map_location='''cpu''' )
__UpperCAmelCase : List[str] = model['''model''']
__UpperCAmelCase : Optional[Any] = BlenderbotConfig.from_json_file(_lowercase )
__UpperCAmelCase : Optional[Any] = BlenderbotForConditionalGeneration(_lowercase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : int = []
__UpperCAmelCase : List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(_lowercase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : Union[str, Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowercase )
m.model.load_state_dict(_lowercase , strict=_lowercase )
m.half()
m.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCAmelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCAmelCase :Tuple = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 240 | 1 |
from collections.abc import Sequence
def _UpperCAmelCase ( snake_case , snake_case = False ):
"""simple docstring"""
if not arr:
return 0
_lowerCAmelCase = 0 if allow_empty_subarrays else float("""-inf""" )
_lowerCAmelCase = 0.0
for num in arr:
_lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
_lowerCAmelCase = max(snake_case , snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
A__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"{max_subarray_sum(nums) = }")
| 82 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __snake_case ( __UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ,):
"""simple docstring"""
A_ , A_ = coefficient_matrix.shape
A_ , A_ = constant_matrix.shape
if rowsa != colsa:
A_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if colsa != 1:
A_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if rowsa != rowsa:
A_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != rowsa:
A_ = (
"Number of initial values must be equal to number of rows in coefficient "
f'''matrix but received {len(__UpperCamelCase )} and {rowsa}'''
)
raise ValueError(__UpperCamelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
A_ = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
A_ , A_ = table.shape
strictly_diagonally_dominant(__UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(__UpperCamelCase ):
A_ = []
for row in range(__UpperCamelCase ):
A_ = 0
for col in range(__UpperCamelCase ):
if col == row:
A_ = table[row][col]
elif col == cols - 1:
A_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ = (temp + val) / denom
new_val.append(__UpperCamelCase )
A_ = new_val
return [float(__UpperCamelCase ) for i in new_val]
def __snake_case ( __UpperCamelCase : NDArray[floataa] ):
"""simple docstring"""
A_ , A_ = table.shape
A_ = True
for i in range(0 ,__UpperCamelCase ):
A_ = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod() | 312 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __a ( unittest.TestCase ):
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Dict = inspect.getfile(accelerate.test_utils )
UpperCamelCase__ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCamelCase__ : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCamelCase__ : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __lowercase ( self : Tuple ):
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices.' )
UpperCamelCase__ : Dict = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def __lowercase ( self : int ):
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices.' )
UpperCamelCase__ : int = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def __lowercase ( self : Dict ):
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' )
UpperCamelCase__ : Tuple = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase : List[Any] =Accelerator()
lowerCamelCase : Dict =(accelerator.state.process_index + 2, 10)
lowerCamelCase : int =torch.randint(0, 10, shape).to(accelerator.device)
lowerCamelCase : Tuple =''''''
lowerCamelCase : Optional[int] =accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCamelCase : str =accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCamelCase : Optional[int] =accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 196 |
import argparse
import os
import re
import packaging.version
lowerCamelCase : Optional[Any] ='''examples/'''
lowerCamelCase : List[Any] ={
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowerCamelCase : List[str] ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
lowerCamelCase : int ='''README.md'''
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ : List[Any] = f.read()
UpperCamelCase__ , UpperCamelCase__ : List[str] = REPLACE_PATTERNS[pattern]
UpperCamelCase__ : Union[str, Any] = replace.replace("VERSION" , __lowerCAmelCase )
UpperCamelCase__ : Tuple = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="examples" )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Optional[int]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = "🤗 Transformers currently provides the following architectures"
UpperCamelCase__ : Tuple = "1. Want to contribute a new model?"
with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ : Optional[int] = f.readlines()
# Find the start of the list.
UpperCamelCase__ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCamelCase__ : Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
UpperCamelCase__ : str = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(__lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
with open(REPLACE_FILES["init"] , "r" ) as f:
UpperCamelCase__ : str = f.read()
UpperCamelCase__ : Dict = REPLACE_PATTERNS["init"][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase=False ) -> Optional[int]:
UpperCamelCase__ : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
UpperCamelCase__ : List[str] = default_version.base_version
elif patch:
UpperCamelCase__ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
UpperCamelCase__ : Tuple = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
UpperCamelCase__ : Tuple = input(f'Which version are you releasing? [{default_version}]' )
if len(__lowerCAmelCase ) == 0:
UpperCamelCase__ : Any = default_version
print(f'Updating version to {version}.' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE ( ) -> int:
UpperCamelCase__ : str = get_version()
UpperCamelCase__ : Dict = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
UpperCamelCase__ : int = current_version.base_version
# Check with the user we got that right.
UpperCamelCase__ : List[str] = input(f'Which version are we developing now? [{dev_version}]' )
if len(__lowerCAmelCase ) == 0:
UpperCamelCase__ : Optional[Any] = dev_version
print(f'Updating version to {version}.' )
global_version_update(__lowerCAmelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCamelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowerCamelCase : Optional[Any] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work() | 196 | 1 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
class snake_case ( __lowerCAmelCase ):
"""simple docstring"""
_lowerCamelCase = ["input_features"]
def __init__( self , UpperCamelCase=80 , UpperCamelCase=1_6000 , UpperCamelCase=160 , UpperCamelCase=30 , UpperCamelCase=400 , UpperCamelCase=0.0 , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
feature_size=UpperCamelCase , sampling_rate=UpperCamelCase , padding_value=UpperCamelCase , return_attention_mask=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = n_fft
lowerCamelCase_ = hop_length
lowerCamelCase_ = chunk_length
lowerCamelCase_ = chunk_length * sampling_rate
lowerCamelCase_ = self.n_samples // hop_length
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCamelCase , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=UpperCamelCase , norm="slaney" , mel_scale="slaney" , )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = spectrogram(
UpperCamelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowerCamelCase_ = log_spec[:, :-1]
lowerCamelCase_ = np.maximum(UpperCamelCase , log_spec.max() - 8.0 )
lowerCamelCase_ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
lowerCamelCase_ = np.array(UpperCamelCase , np.intaa )
lowerCamelCase_ = []
for vector, length in zip(UpperCamelCase , attention_mask.sum(-1 ) ):
lowerCamelCase_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCamelCase_ = padding_value
normed_input_values.append(UpperCamelCase )
else:
lowerCamelCase_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , UpperCamelCase , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "max_length" , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCamelCase_ = isinstance(UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase_ = is_batched_numpy or (
isinstance(UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase , np.ndarray ):
lowerCamelCase_ = np.asarray(UpperCamelCase , dtype=np.floataa )
elif isinstance(UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ = [np.asarray([raw_speech] ).T]
lowerCamelCase_ = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowerCamelCase_ = self.pad(
UpperCamelCase , padding=UpperCamelCase , max_length=max_length if max_length else self.n_samples , truncation=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCamelCase_ = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowerCamelCase_ = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowerCamelCase_ = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowerCamelCase_ = [self._np_extract_fbank_features(UpperCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , UpperCamelCase ):
lowerCamelCase_ = [np.asarray(UpperCamelCase , dtype=np.floataa ) for feature in input_features]
else:
lowerCamelCase_ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCamelCase_ = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
lowerCamelCase_ = padded_inputs.convert_to_tensors(UpperCamelCase )
return padded_inputs
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
lowerCamelCase_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 55 | """simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase__ = TypeVar('T')
lowercase__ = Union[List[T], Tuple[T, ...]]
lowercase__ = Union[T, List[T], Dict[str, T]]
lowercase__ = Union[str, bytes, os.PathLike]
| 290 | 0 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Tuple="shi-labs/oneformer_demo" ) -> Tuple:
"""simple docstring"""
with open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = json.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for key, info in class_info.items():
SCREAMING_SNAKE_CASE__ = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ = thing_ids
SCREAMING_SNAKE_CASE__ = class_names
return metadata
class __snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , _lowercase : Optional[Any] , _lowercase : int=7 , _lowercase : Any=3 , _lowercase : int=30 , _lowercase : List[Any]=4_00 , _lowercase : Union[str, Any]=None , _lowercase : Dict=True , _lowercase : Tuple=True , _lowercase : int=[0.5, 0.5, 0.5] , _lowercase : List[str]=[0.5, 0.5, 0.5] , _lowercase : str=10 , _lowercase : Union[str, Any]=False , _lowercase : int=2_55 , _lowercase : List[str]="shi-labs/oneformer_demo" , _lowercase : Any="ade20k_panoptic.json" , _lowercase : Any=10 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = {"""shortest_edge""": 32, """longest_edge""": 13_33} if size is None else size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = class_info_file
SCREAMING_SNAKE_CASE__ = prepare_metadata(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = num_text
SCREAMING_SNAKE_CASE__ = repo_path
# for the post_process_functions
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = do_reduce_labels
SCREAMING_SNAKE_CASE__ = ignore_index
def __a ( self : Optional[int] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __a ( self : List[Any] , _lowercase : Any , _lowercase : Optional[Any]=False ):
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE__ = image_inputs[0]
if isinstance(_lowercase , Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.size
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ = int(self.size["""shortest_edge"""] * h / w )
SCREAMING_SNAKE_CASE__ = self.size["""shortest_edge"""]
elif w > h:
SCREAMING_SNAKE_CASE__ = self.size["""shortest_edge"""]
SCREAMING_SNAKE_CASE__ = int(self.size["""shortest_edge"""] * w / h )
else:
SCREAMING_SNAKE_CASE__ = self.size["""shortest_edge"""]
SCREAMING_SNAKE_CASE__ = self.size["""shortest_edge"""]
else:
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ = max(_lowercase , key=lambda _lowercase : item[0] )[0]
SCREAMING_SNAKE_CASE__ = max(_lowercase , key=lambda _lowercase : item[1] )[1]
return expected_height, expected_width
def __a ( self : Dict ):
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __snake_case ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
lowerCAmelCase_ = image_processing_class
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = OneFormerImageProcessorTester(self )
@property
def __a ( self : str ):
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
self.assertTrue(hasattr(_lowercase , """ignore_index""" ) )
self.assertTrue(hasattr(_lowercase , """class_info_file""" ) )
self.assertTrue(hasattr(_lowercase , """num_text""" ) )
self.assertTrue(hasattr(_lowercase , """repo_path""" ) )
self.assertTrue(hasattr(_lowercase , """metadata""" ) )
self.assertTrue(hasattr(_lowercase , """do_reduce_labels""" ) )
def __a ( self : Any ):
"""simple docstring"""
pass
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processing_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processing_tester.get_expected_values(_lowercase , batched=_lowercase )
SCREAMING_SNAKE_CASE__ = image_processor(
_lowercase , ["""semantic"""] * len(_lowercase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processing_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processing_tester.get_expected_values(_lowercase , batched=_lowercase )
SCREAMING_SNAKE_CASE__ = image_processor(
_lowercase , ["""semantic"""] * len(_lowercase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processing_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processing_tester.get_expected_values(_lowercase , batched=_lowercase )
SCREAMING_SNAKE_CASE__ = image_processor(
_lowercase , ["""semantic"""] * len(_lowercase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : Tuple , _lowercase : Optional[int]=False , _lowercase : Any=False , _lowercase : List[str]="np" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
SCREAMING_SNAKE_CASE__ = self.image_processing_tester.num_labels
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowercase )
if with_segmentation_maps:
SCREAMING_SNAKE_CASE__ = num_labels
if is_instance_map:
SCREAMING_SNAKE_CASE__ = list(range(_lowercase ) ) * 2
SCREAMING_SNAKE_CASE__ = dict(enumerate(_lowercase ) )
SCREAMING_SNAKE_CASE__ = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
SCREAMING_SNAKE_CASE__ = [Image.fromarray(_lowercase ) for annotation in annotations]
SCREAMING_SNAKE_CASE__ = image_processor(
_lowercase , ["""semantic"""] * len(_lowercase ) , _lowercase , return_tensors="""pt""" , instance_id_to_semantic_id=_lowercase , pad_and_return_pixel_mask=_lowercase , )
return inputs
def __a ( self : str ):
"""simple docstring"""
pass
def __a ( self : Tuple ):
"""simple docstring"""
def common(_lowercase : Optional[int]=False , _lowercase : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE__ = self.comm_get_image_processor_inputs(
with_segmentation_maps=_lowercase , is_instance_map=_lowercase , segmentation_type=_lowercase )
SCREAMING_SNAKE_CASE__ = inputs["""mask_labels"""]
SCREAMING_SNAKE_CASE__ = inputs["""class_labels"""]
SCREAMING_SNAKE_CASE__ = inputs["""pixel_values"""]
SCREAMING_SNAKE_CASE__ = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(_lowercase , _lowercase , _lowercase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_lowercase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_lowercase )
common(is_instance_map=_lowercase , segmentation_type="""pil""" )
common(is_instance_map=_lowercase , segmentation_type="""pil""" )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = np.zeros((20, 50) )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = binary_mask_to_rle(_lowercase )
self.assertEqual(len(_lowercase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
SCREAMING_SNAKE_CASE__ = self.image_processing_tester.get_fake_oneformer_outputs()
SCREAMING_SNAKE_CASE__ = fature_extractor.post_process_semantic_segmentation(_lowercase )
self.assertEqual(len(_lowercase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
SCREAMING_SNAKE_CASE__ = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
SCREAMING_SNAKE_CASE__ = fature_extractor.post_process_semantic_segmentation(_lowercase , target_sizes=_lowercase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
SCREAMING_SNAKE_CASE__ = self.image_processing_tester.get_fake_oneformer_outputs()
SCREAMING_SNAKE_CASE__ = image_processor.post_process_instance_segmentation(_lowercase , threshold=0 )
self.assertTrue(len(_lowercase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , _lowercase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
SCREAMING_SNAKE_CASE__ = self.image_processing_tester.get_fake_oneformer_outputs()
SCREAMING_SNAKE_CASE__ = image_processor.post_process_panoptic_segmentation(_lowercase , threshold=0 )
self.assertTrue(len(_lowercase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , _lowercase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 204 | import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
__lowerCamelCase : Optional[Any] = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
__lowerCamelCase : int = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE__ = numpy_to_pil(__UpperCamelCase )
return images
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if images.ndim == 3:
SCREAMING_SNAKE_CASE__ = images[None, ...]
SCREAMING_SNAKE_CASE__ = (images * 2_55).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
SCREAMING_SNAKE_CASE__ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
SCREAMING_SNAKE_CASE__ = [Image.fromarray(__UpperCamelCase ) for image in images]
return pil_images
| 204 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def a( A : List[str] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
super().__init__()
a = module
a = nn.Sequential(
nn.Linear(module.in_features , lowerCamelCase_ , bias=lowerCamelCase_ ) , nn.Linear(lowerCamelCase_ , module.out_features , bias=lowerCamelCase_ ) , )
a = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCamelCase_ (self , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return self.module(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) + self.adapter(lowerCamelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__A = "bigscience/bloom-1b7"
# Constant values
__A = 2.109_659_552_692_574
__A = "Hello my name is"
__A = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
__A = 10
def UpperCamelCase_ (self ):
"""simple docstring"""
a = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
# Models and tokenizer
a = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
a = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
def UpperCamelCase_ (self ):
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_abit.config
self.assertTrue(hasattr(lowerCamelCase_ , "quantization_config" ) )
a = config.to_dict()
a = config.to_diff_dict()
a = config.to_json_string()
def UpperCamelCase_ (self ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
a = self.model_fpaa.get_memory_footprint()
a = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCamelCase_ (self ):
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCamelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.tokenizer(self.input_text , return_tensors="pt" )
a = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase_ ) , self.EXPECTED_OUTPUTS )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = BitsAndBytesConfig()
a = True
a = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase_ , device_map="auto" )
a = self.tokenizer(self.input_text , return_tensors="pt" )
a = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase_ ) , self.EXPECTED_OUTPUTS )
def UpperCamelCase_ (self ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = BitsAndBytesConfig()
with self.assertRaises(lowerCamelCase_ ):
a = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase_ , load_in_abit=lowerCamelCase_ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def UpperCamelCase_ (self ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(lowerCamelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCamelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(lowerCamelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCamelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a = self.tokenizer(self.input_text , return_tensors="pt" )
a = self.model_fpaa.to(torch.floataa )
a = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
a = self.model_fpaa.to("cpu" )
# Check this does not throw an error
a = self.model_fpaa.half()
# Check this does not throw an error
a = self.model_fpaa.float()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase_ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCamelCase_ (cls ):
"""simple docstring"""
a = "t5-small"
a = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
a = AutoTokenizer.from_pretrained(cls.model_name )
a = "Translate in German: Hello, my dog is cute"
def UpperCamelCase_ (self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ (self ):
"""simple docstring"""
from transformers import TaForConditionalGeneration
a = TaForConditionalGeneration._keep_in_fpaa_modules
a = None
# test with `t5-small`
a = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
a = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a = model.generate(**lowerCamelCase_ )
# test with `flan-t5-small`
a = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
a = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a = model.generate(**lowerCamelCase_ )
a = modules
def UpperCamelCase_ (self ):
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a = model.generate(**lowerCamelCase_ )
# test with `flan-t5-small`
a = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
a = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a = model.generate(**lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
# model_name
a = "bigscience/bloom-560m"
a = "t5-small"
# Different types of model
a = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
# Sequence classification model
a = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
# CausalLM model
a = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
# Seq2seq model
a = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
def UpperCamelCase_ (self ):
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ (self ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
def UpperCamelCase_ (self ):
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase_ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
a = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "facebook/opt-350m"
super().setUp()
def UpperCamelCase_ (self ):
"""simple docstring"""
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
a = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCamelCase_ ) ):
a = LoRALayer(module.q_proj , rank=16 )
a = LoRALayer(module.k_proj , rank=16 )
a = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
a = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a = model.forward(**lowerCamelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCamelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "gpt2-xl"
__A = 3.3_191_854_854_152_187
| 227 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase: Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = XLMProphetNetTokenizer
__A = False
__A = True
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "[PAD]"
a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCamelCase_ ) , 1012 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
a = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCamelCase_ (self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "Hello World!"
a = [35389, 6672, 49, 2]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 227 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 317 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 317 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=13 , lowercase=10 , lowercase=3 , lowercase=2 , lowercase=2 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase="divided_space_time" , lowercase=None , ):
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : Optional[Any] = patch_size
_lowerCamelCase : Any = num_frames
_lowerCamelCase : List[Any] = is_training
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : List[str] = attention_type
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Any = scope
_lowerCamelCase : int = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = (num_frames) * self.num_patches_per_frame + 1
def A_ ( self ):
_lowerCamelCase : str = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase : int = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
_lowerCamelCase : str = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_lowerCamelCase : List[str] = self.num_labels
return config
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Union[str, Any] = TimesformerModel(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Any = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = TimesformerForVideoClassification(lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : List[Any] = model(lowercase )
# verify the logits shape
_lowerCamelCase : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = config_and_inputs
_lowerCamelCase : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = TimesformerModelTester(self )
_lowerCamelCase : List[Any] = ConfigTester(
self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def A_ ( self , lowercase , lowercase , lowercase=False ):
_lowerCamelCase : Tuple = copy.deepcopy(lowercase )
if return_labels:
if model_class in get_values(lowercase ):
_lowerCamelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def A_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def A_ ( self ):
pass
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(lowercase )
_lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A_ ( self ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase )
@slow
def A_ ( self ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str = TimesformerModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def A_ ( self ):
if not self.has_attentions:
pass
else:
_lowerCamelCase, _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Union[str, Any] = True
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = self.model_tester.seq_length
_lowerCamelCase : List[str] = self.model_tester.num_frames
_lowerCamelCase : Dict = True
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[int] = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(lowercase , lowercase ) )
_lowerCamelCase : Optional[int] = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCamelCase : int = True
_lowerCamelCase : Optional[int] = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(lowercase , lowercase ) )
_lowerCamelCase : List[Any] = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_lowerCamelCase : Any = len(lowercase )
# Check attention is always last and order is fine
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Union[str, Any] = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_lowerCamelCase : str = model(**self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + 1 , len(lowercase ) )
_lowerCamelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def A_ ( self ):
def check_hidden_states_output(lowercase , lowercase , lowercase ):
_lowerCamelCase : Any = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(lowercase , lowercase ) )
_lowerCamelCase : int = outputs.hidden_states
_lowerCamelCase : str = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase ) , lowercase )
_lowerCamelCase : Optional[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Tuple = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Dict = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def _snake_case ( ):
_lowerCamelCase : str = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_lowerCamelCase : Optional[Any] = np.load(lowercase__ )
return list(lowercase__ )
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A_ ( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
_lowerCamelCase : List[Any] = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
lowercase )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : Union[str, Any] = prepare_video()
_lowerCamelCase : Any = image_processor(video[:8] , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Any = model(**lowercase )
# verify the logits
_lowerCamelCase : Dict = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase )
_lowerCamelCase : Optional[Any] = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) ) | 96 |
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase ):
A__ = set_counts
A__ = max(__lowerCamelCase )
A__ = len(__lowerCamelCase )
A__ = [1] * num_sets
A__ = list(range(__lowerCamelCase ) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = self.get_parent(__lowerCamelCase )
A__ = self.get_parent(__lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
A__ = 0
A__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
A__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
A__ = 0
A__ = src_parent
A__ = self.set_counts[src_parent]
A__ = max(self.max_set,__lowerCamelCase )
return True
def UpperCamelCase ( self,__lowerCamelCase ):
if self.parents[disj_set] == disj_set:
return disj_set
A__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 193 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = ["image_processor", "tokenizer"]
UpperCAmelCase = "LayoutLMv3ImageProcessor"
UpperCAmelCase = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self: Tuple , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase_ , )
A__ = kwargs.pop("""feature_extractor""" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase_ , lowercase_ )
def __call__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCamelCase: Union[List[List[int]], List[List[List[int]]]] = None , UpperCamelCase: Optional[Union[List[int], List[List[int]]]] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[bool, str, PaddingStrategy] = False , UpperCamelCase: Union[bool, str, TruncationStrategy] = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: int = 0 , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[str, TensorType]] = None , **UpperCamelCase: List[str] , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A__ = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
A__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
A__ = features["""words"""]
A__ = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
A__ = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A__ = self.get_overflowing_images(lowercase_ , encoded_inputs["""overflow_to_sample_mapping"""] )
A__ = images
return encoded_inputs
def UpperCamelCase ( self: str , UpperCamelCase: List[str] , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f""" {len(lowercase_ )} and {len(lowercase_ )}""" )
return images_with_overflow
def UpperCamelCase ( self: int , *UpperCamelCase: Tuple , **UpperCamelCase: Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self: Any , *UpperCamelCase: Union[str, Any] , **UpperCamelCase: Dict ):
"""simple docstring"""
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase_ , )
return self.image_processor_class
@property
def UpperCamelCase ( self: Any ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase_ , )
return self.image_processor
| 369 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , *UpperCamelCase: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Any=None ):
"""simple docstring"""
A__ = {}
if top_k is not None:
A__ = top_k
return {}, {}, postprocess_params
def __call__( self: Union[str, Any] , UpperCamelCase: Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCamelCase: Dict ):
"""simple docstring"""
return super().__call__(UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Any , UpperCamelCase: int ):
"""simple docstring"""
A__ = load_image(UpperCamelCase )
A__ = self.image_processor(images=UpperCamelCase , return_tensors=self.framework )
return model_inputs
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = self.model(**UpperCamelCase )
return model_outputs
def UpperCamelCase ( self: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: int=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.softmax(-1 )[0]
A__ , A__ = probs.topk(UpperCamelCase )
elif self.framework == "tf":
A__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
A__ = tf.math.top_k(UpperCamelCase , k=UpperCamelCase )
A__ , A__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase , UpperCamelCase )]
| 69 | 0 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: int , UpperCamelCase__: Dict=32 ):
set_seed(0 )
lowerCamelCase__ : str = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
lowerCamelCase__ : Dict = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Any = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__ : int = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
lowerCamelCase__ : int = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ , lowerCamelCase__ : int = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Any = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : int = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 41 | import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = (KDPMaDiscreteScheduler,)
__lowerCamelCase : List[str] = 10
def UpperCAmelCase__ ( self , **snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : int ={
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**snake_case__ )
return config
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : str =self.dummy_model()
UpperCAmelCase : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Union[str, Any] =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Any =model(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : int =output.prev_sample
UpperCAmelCase : Dict =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Optional[Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : Any =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config()
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Optional[int] =self.dummy_model()
UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : str =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Dict =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =model(snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =output.prev_sample
UpperCAmelCase : Any =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Union[str, Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : List[Any] =self.scheduler_classes[0]
UpperCAmelCase : Dict =self.get_scheduler_config()
UpperCAmelCase : List[str] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
UpperCAmelCase : int =self.dummy_model()
UpperCAmelCase : Tuple =self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase : Optional[Any] =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : int =model(snake_case__ , snake_case__ )
UpperCAmelCase : str =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =output.prev_sample
UpperCAmelCase : List[str] =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Dict =torch.mean(torch.abs(snake_case__ ) )
if str(snake_case__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 348 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_A = Lock()
def lowercase_ ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Any:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case = min(__a , __a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case = max(__a , __a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__a )
def lowercase_ ( A__ ) -> Optional[Any]:
"""simple docstring"""
snake_case = []
snake_case = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case = temp_rs
snake_case = temp_rr
for i in range(1 , len(__a ) - 1 ):
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case = temp_rs
snake_case = temp_rr
process_array_.append(
Process(
target=__a , args=(
len(__a ) - 1,
arr[len(__a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__a ) ):
snake_case = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*__a )
snake_case = odd_even_transposition(__a )
print("Sorted List\n" )
print(*__a )
if __name__ == "__main__":
main()
| 355 |
from collections import defaultdict
class lowerCamelCase :
def __init__(self : Tuple , _A : Optional[int] , _A : List[str] ) -> Union[str, Any]:
snake_case = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
snake_case = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_A ) )
]
snake_case = defaultdict(_A ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
snake_case = (1 << len(_A )) - 1
def UpperCAmelCase(self : str , _A : Optional[Any] , _A : List[Any] ) -> str:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
snake_case = self.count_ways_until(_A , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
snake_case = total_ways_util
return self.dp[mask][task_no]
def UpperCAmelCase(self : Any , _A : Dict ) -> Optional[Any]:
# Store the list of persons for each task
for i in range(len(_A ) ):
for j in task_performed[i]:
self.task[j].append(_A )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_A = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_A = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 137 | 0 |
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
snake_case_ = len(UpperCAmelCase )
snake_case_ = sum(UpperCAmelCase )
snake_case_ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
snake_case_ = True
for i in range(1 , s + 1 ):
snake_case_ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
snake_case_ = dp[i][j - 1]
if arr[i - 1] <= j:
snake_case_ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
snake_case_ = s - 2 * j
break
return diff
| 69 | """simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
snake_case_ = nn.functional.normalize(UpperCAmelCase )
snake_case_ = nn.functional.normalize(UpperCAmelCase )
return torch.mm(UpperCAmelCase , normalized_text_embeds.t() )
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = CLIPConfig
SCREAMING_SNAKE_CASE_ = ["CLIPEncoderLayer"]
def __init__( self, lowerCAmelCase__) -> Optional[int]:
super().__init__(lowerCAmelCase__)
snake_case_ = CLIPVisionModel(config.vision_config)
snake_case_ = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(17), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(3), requires_grad=lowerCAmelCase__)
@torch.no_grad()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = self.vision_model(lowerCAmelCase__)[1] # pooled_output
snake_case_ = self.visual_projection(lowerCAmelCase__)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ = cosine_distance(lowerCAmelCase__, self.special_care_embeds).cpu().float().numpy()
snake_case_ = cosine_distance(lowerCAmelCase__, self.concept_embeds).cpu().float().numpy()
snake_case_ = []
snake_case_ = image_embeds.shape[0]
for i in range(lowerCAmelCase__):
snake_case_ = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ = 0.0
for concept_idx in range(len(special_cos_dist[0])):
snake_case_ = special_cos_dist[i][concept_idx]
snake_case_ = self.special_care_embeds_weights[concept_idx].item()
snake_case_ = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]})
snake_case_ = 0.01
for concept_idx in range(len(cos_dist[0])):
snake_case_ = cos_dist[i][concept_idx]
snake_case_ = self.concept_embeds_weights[concept_idx].item()
snake_case_ = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__)
result.append(lowerCAmelCase__)
snake_case_ = [len(res['bad_concepts']) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
snake_case_ = self.vision_model(lowerCAmelCase__)[1] # pooled_output
snake_case_ = self.visual_projection(lowerCAmelCase__)
snake_case_ = cosine_distance(lowerCAmelCase__, self.special_care_embeds)
snake_case_ = cosine_distance(lowerCAmelCase__, self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ = 0.0
snake_case_ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ = torch.any(special_scores > 0, dim=1)
snake_case_ = special_care * 0.01
snake_case_ = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1])
snake_case_ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ = torch.any(concept_scores > 0, dim=1)
return images, has_nsfw_concepts
| 69 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : Dict= {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str]= [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any]= [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_a : Tuple= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 | """simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_a : Optional[int]= logging.get_logger()
@dataclass
class UpperCamelCase :
UpperCAmelCase : nn.Module
UpperCAmelCase : List[nn.Module] = field(default_factory=lowercase )
UpperCAmelCase : list = field(default_factory=lowercase )
def _lowercase (self : str , _A : Optional[Any] , _A : Tensor , _A : Tensor) -> Any:
__snake_case : str = len(list(m.modules())) == 1 or isinstance(_A , nn.Convad) or isinstance(_A , nn.BatchNormad)
if has_not_submodules:
self.traced.append(_A)
def __call__(self : Dict , _A : Tensor) -> Optional[Any]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(_A)
[x.remove() for x in self.handles]
return self
@property
def _lowercase (self : Union[str, Any]) -> List[str]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _A: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class UpperCamelCase :
UpperCAmelCase : nn.Module
UpperCAmelCase : nn.Module
UpperCAmelCase : int = 0
UpperCAmelCase : List = field(default_factory=lowercase )
UpperCAmelCase : List = field(default_factory=lowercase )
def __call__(self : List[str] , _A : Tensor) -> List[Any]:
__snake_case : Any = Tracker(self.dest)(_A).parametrized
__snake_case : int = Tracker(self.src)(_A).parametrized
__snake_case : List[Any] = list(filter(lambda _A: type(_A) not in self.src_skip , _A))
__snake_case : Any = list(filter(lambda _A: type(_A) not in self.dest_skip , _A))
if len(_A) != len(_A):
raise Exception(
f"Numbers of operations are different. Source module has {len(_A)} operations while"
f" destination module has {len(_A)}.")
for dest_m, src_m in zip(_A , _A):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}")
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : ResNetConfig , UpperCAmelCase_ : Path , UpperCAmelCase_ : bool = True ) -> List[str]:
'''simple docstring'''
print(F"Converting {name}..." )
with torch.no_grad():
__snake_case : Dict = timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ ).eval()
__snake_case : List[Any] = ResNetForImageClassification(UpperCAmelCase_ ).eval()
__snake_case : int = ModuleTransfer(src=UpperCAmelCase_ , dest=UpperCAmelCase_ )
__snake_case : Optional[Any] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(UpperCAmelCase_ )
assert torch.allclose(from_model(UpperCAmelCase_ ) , our_model(UpperCAmelCase_ ).logits ), "The model logits don't match the original one."
__snake_case : str = F"resnet{'-'.join(name.split('resnet' ) )}"
print(UpperCAmelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCAmelCase_ , )
# we can use the convnext one
__snake_case : int = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCAmelCase_ , )
print(F"Pushed {checkpoint_name}" )
def __UpperCAmelCase ( UpperCAmelCase_ : Path , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = True ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : str = 'imagenet-1k-id2label.json'
__snake_case : Optional[Any] = 10_00
__snake_case : Any = (1, num_labels)
__snake_case : List[Any] = 'huggingface/label-files'
__snake_case : Dict = num_labels
__snake_case : Any = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
__snake_case : Any = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : Optional[Any] = {v: k for k, v in idalabel.items()}
__snake_case : Optional[int] = partial(UpperCAmelCase_ , num_labels=UpperCAmelCase_ , idalabel=UpperCAmelCase_ , labelaid=UpperCAmelCase_ )
__snake_case : str = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(UpperCAmelCase_ , names_to_config[model_name] , UpperCAmelCase_ , UpperCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_a : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_a : Union[str, Any]= parser.parse_args()
_a : Path= args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 95 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """blip_2_vision_model"""
def __init__( self , __lowerCAmelCase=1_4_0_8 , __lowerCAmelCase=6_1_4_4 , __lowerCAmelCase=3_9 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2_2_4 , __lowerCAmelCase=1_4 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0_0001 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1E-10 , __lowerCAmelCase=True , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = patch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = hidden_act
lowerCamelCase__ = qkv_bias
@classmethod
def __lowerCamelCase ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
cls._set_token_in_kwargs(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowerCamelCase__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """blip_2_qformer"""
def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase="absolute" , __lowerCAmelCase=2 , __lowerCAmelCase=1_4_0_8 , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = cross_attention_frequency
lowerCamelCase__ = encoder_hidden_size
@classmethod
def __lowerCamelCase ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
cls._set_token_in_kwargs(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowerCamelCase__ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """blip-2"""
lowerCAmelCase_ = True
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=3_2 , **__lowerCAmelCase ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if vision_config is None:
lowerCamelCase__ = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
lowerCamelCase__ = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
lowerCamelCase__ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
lowerCamelCase__ = BlipaVisionConfig(**__lowerCAmelCase )
lowerCamelCase__ = BlipaQFormerConfig(**__lowerCAmelCase )
lowerCamelCase__ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
lowerCamelCase__ = CONFIG_MAPPING[text_model_type](**__lowerCAmelCase )
lowerCamelCase__ = self.text_config.tie_word_embeddings
lowerCamelCase__ = self.text_config.is_encoder_decoder
lowerCamelCase__ = num_query_tokens
lowerCamelCase__ = self.vision_config.hidden_size
lowerCamelCase__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase__ = 1.0
lowerCamelCase__ = 0.02
@classmethod
def __lowerCamelCase ( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__lowerCAmelCase , )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.vision_config.to_dict()
lowerCamelCase__ = self.qformer_config.to_dict()
lowerCamelCase__ = self.text_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 209 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__(__snake_case ) -> int: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__() -> Any:
'''simple docstring'''
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase__ = [1, 2, 3]
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case ,__snake_case ,num_proc=2 )
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case ,__snake_case ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' ,[2, -1] )
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = [1, 2]
lowerCamelCase__ = {'''a''': 1, '''b''': 2}
lowerCamelCase__ = {'''a''': [1, 2], '''b''': [3, 4]}
lowerCamelCase__ = {'''a''': {'''1''': 1}, '''b''': 2}
lowerCamelCase__ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowerCamelCase__ = [2, 3]
lowerCamelCase__ = {'''a''': 2, '''b''': 3}
lowerCamelCase__ = {'''a''': [2, 3], '''b''': [4, 5]}
lowerCamelCase__ = {'''a''': {'''1''': 2}, '''b''': 3}
lowerCamelCase__ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
| 209 | 1 |
'''simple docstring'''
def A (__lowerCamelCase :str , __lowerCamelCase :str ):
_lowerCAmelCase = len(__lowerCamelCase ) + 1
_lowerCAmelCase = len(__lowerCamelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_lowerCAmelCase = [[0 for i in range(__lowerCamelCase )] for j in range(__lowerCamelCase )]
# since string of zero length match pattern of zero length
_lowerCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowerCamelCase ):
_lowerCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowerCamelCase ):
_lowerCAmelCase = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowerCamelCase ):
for j in range(1 , __lowerCamelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_lowerCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_lowerCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_lowerCAmelCase = dp[i - 1][j]
else:
_lowerCAmelCase = 0
else:
_lowerCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_lowercase = """aab"""
_lowercase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 229 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
_lowercase = logging.getLogger(__name__)
_lowercase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[Any] = '''bertabs'''
def __init__( self , _lowercase=30_522 , _lowercase=512 , _lowercase=6 , _lowercase=512 , _lowercase=8 , _lowercase=512 , _lowercase=0.2 , _lowercase=6 , _lowercase=768 , _lowercase=8 , _lowercase=2_048 , _lowercase=0.2 , **_lowercase , ):
"""simple docstring"""
super().__init__(**_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_pos
_lowerCAmelCase = enc_layers
_lowerCAmelCase = enc_hidden_size
_lowerCAmelCase = enc_heads
_lowerCAmelCase = enc_ff_size
_lowerCAmelCase = enc_dropout
_lowerCAmelCase = dec_layers
_lowerCAmelCase = dec_hidden_size
_lowerCAmelCase = dec_heads
_lowerCAmelCase = dec_ff_size
_lowerCAmelCase = dec_dropout
| 229 | 1 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=8 ):
"""simple docstring"""
lowercase__ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase__ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _a ( A__ ):
def __init__( self: Tuple , UpperCamelCase_: MultilingualCLIP , UpperCamelCase_: XLMRobertaTokenizer , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase_: VQModel , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Any , UpperCamelCase_: Dict , UpperCamelCase_: str , UpperCamelCase_: str ) -> Tuple:
"""simple docstring"""
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase__ = latents.to(UpperCamelCase_ )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
lowercase__ = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
lowercase__ = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , truncation=UpperCamelCase_ , max_length=77 , return_attention_mask=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors='''pt''' , )
lowercase__ = text_inputs.input_ids
lowercase__ = self.tokenizer(UpperCamelCase_ , padding='''longest''' , return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
lowercase__ = text_input_ids.to(UpperCamelCase_ )
lowercase__ = text_inputs.attention_mask.to(UpperCamelCase_ )
lowercase__ = self.text_encoder(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowercase__ = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
lowercase__ = text_encoder_hidden_states.repeat_interleave(UpperCamelCase_ , dim=0 )
lowercase__ = text_mask.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [""] * batch_size
elif type(UpperCamelCase_ ) is not type(UpperCamelCase_ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase_ )} !='
f' {type(UpperCamelCase_ )}.' )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = [negative_prompt]
elif batch_size != len(UpperCamelCase_ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase_ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=77 , truncation=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors='''pt''' , )
lowercase__ = uncond_input.input_ids.to(UpperCamelCase_ )
lowercase__ = uncond_input.attention_mask.to(UpperCamelCase_ )
lowercase__ = self.text_encoder(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = negative_prompt_embeds.shape[1]
lowercase__ = negative_prompt_embeds.repeat(1 , UpperCamelCase_ )
lowercase__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ )
lowercase__ = uncond_text_encoder_hidden_states.shape[1]
lowercase__ = uncond_text_encoder_hidden_states.repeat(1 , UpperCamelCase_ , 1 )
lowercase__ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
lowercase__ = uncond_text_mask.repeat_interleave(UpperCamelCase_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase__ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase__ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCamelCase_ ( self: Any , UpperCamelCase_: List[str]=0 ) -> Dict:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
lowercase__ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Tuple=0 ) -> Tuple:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase__ = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
if self.safety_checker is not None:
lowercase__ = cpu_offload_with_hook(self.safety_checker , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: str , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Optional[Union[str, List[str]]] = None , UpperCamelCase_: int = 512 , UpperCamelCase_: int = 512 , UpperCamelCase_: int = 100 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Tuple:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = len(UpperCamelCase_ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' )
lowercase__ = self._execution_device
lowercase__ = batch_size * num_images_per_prompt
lowercase__ = guidance_scale > 1.0
lowercase__ = self._encode_prompt(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = torch.cat(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.unet.config.in_channels
lowercase__ = get_new_h_w(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase__ = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ = noise_pred.chunk(2 )
lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , ).prev_sample
# post-processing
lowercase__ = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 110 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase : int ={
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] =[
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] =['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 189 | 0 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
def __init__(self , *lowerCAmelCase , **lowerCAmelCase ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 304 |
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304 | 1 |
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int , snake_case__ : List[str] , snake_case__ : List[str] ):
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(__SCREAMING_SNAKE_CASE ) for s in shape] )}.npy"""
def UpperCAmelCase__ ( self : List[Any] ):
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Tuple=0 , snake_case__ : List[Any]=(4, 4, 64, 64) , snake_case__ : List[Any]=False ):
lowerCamelCase_ : Any =jnp.bfloataa if fpaa else jnp.floataa
lowerCamelCase_ : int =jnp.array(load_hf_numpy(self.get_file_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , dtype=__SCREAMING_SNAKE_CASE )
return image
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Tuple=False , snake_case__ : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
lowerCamelCase_ : Union[str, Any] =jnp.bfloataa if fpaa else jnp.floataa
lowerCamelCase_ : Union[str, Any] ='''bf16''' if fpaa else None
lowerCamelCase_ : Dict =FlaxUNetaDConditionModel.from_pretrained(
__SCREAMING_SNAKE_CASE , subfolder="unet" , dtype=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE )
return model, params
def UpperCAmelCase__ ( self : Dict , snake_case__ : str=0 , snake_case__ : List[Any]=(4, 77, 768) , snake_case__ : Any=False ):
lowerCamelCase_ : Optional[Any] =jnp.bfloataa if fpaa else jnp.floataa
lowerCamelCase_ : List[str] =jnp.array(load_hf_numpy(self.get_file_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , dtype=__SCREAMING_SNAKE_CASE )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : Tuple =self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Any =self.get_latents(__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[Any] =self.get_encoder_hidden_states(__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[Any] =model.apply(
{"params": params} , __SCREAMING_SNAKE_CASE , jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.intaa ) , encoder_hidden_states=__SCREAMING_SNAKE_CASE , ).sample
assert sample.shape == latents.shape
lowerCamelCase_ : Dict =jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCamelCase_ : Tuple =jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Any ):
lowerCamelCase_ : Union[str, Any] =self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : List[Any] =self.get_latents(__SCREAMING_SNAKE_CASE , shape=(4, 4, 96, 96) , fpaa=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[Any] =self.get_encoder_hidden_states(__SCREAMING_SNAKE_CASE , shape=(4, 77, 1024) , fpaa=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[Any] =model.apply(
{"params": params} , __SCREAMING_SNAKE_CASE , jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.intaa ) , encoder_hidden_states=__SCREAMING_SNAKE_CASE , ).sample
assert sample.shape == latents.shape
lowerCamelCase_ : Optional[int] =jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCamelCase_ : List[Any] =jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-2 )
| 144 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = [10, 20, 30, 40, 50, 60]
lowercase_ : Optional[Any] = [2, 4, 6, 8, 10, 12]
lowercase_ : Union[str, Any] = 1_00
self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 2_10 )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Weight can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Profit can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 93 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , A : Dict , A : Optional[int]=1_3 , A : Dict=7 , A : Union[str, Any]=True , A : Union[str, Any]=True , A : Optional[Any]=True , A : str=True , A : Any=9_9 , A : Dict=3_2 , A : Union[str, Any]=5 , A : Tuple=4 , A : List[Any]=3_7 , A : Tuple="gelu" , A : str=0.1 , A : Union[str, Any]=0.1 , A : Optional[int]=5_1_2 , A : int=1_6 , A : Any=2 , A : List[str]=0.02 , A : int=4 , ):
'''simple docstring'''
a : Dict = parent
a : List[Any] = batch_size
a : Optional[Any] = seq_length
a : Tuple = is_training
a : int = use_attention_mask
a : Optional[int] = use_token_type_ids
a : Any = use_labels
a : List[Any] = vocab_size
a : Optional[int] = hidden_size
a : List[Any] = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Any = intermediate_size
a : Tuple = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Tuple = type_vocab_size
a : Any = type_sequence_label_size
a : str = initializer_range
a : Dict = num_choices
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : List[str] = None
if self.use_attention_mask:
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : Optional[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_UpperCAmelCase , )
return config, input_ids, attention_mask
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : Union[str, Any] = self.prepare_config_and_inputs()
a, a, a : Optional[int] = config_and_inputs
a : int = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class snake_case ( lowerCamelCase_ , unittest.TestCase ):
__magic_name__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Any = FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a : List[Any] = model_class_name.from_pretrained('distilbert-base-uncased' )
a : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Union[str, Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
a : List[Any] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
a : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
a : List[str] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , _UpperCAmelCase )
a : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
| 367 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
_UpperCamelCase : int = 'docs/source/en/_toctree.yml'
def snake_case (A_ :Optional[Any] ):
'''simple docstring'''
a : List[Any] = defaultdict(A_ )
for doc in model_doc:
counts[doc["local"]] += 1
a : Optional[Any] = [key for key, value in counts.items() if value > 1]
a : List[str] = []
for duplicate_key in duplicates:
a : int = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(A_ ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(A_ , key=lambda A_ : s["title"].lower() )
def snake_case (A_ :List[str]=False ):
'''simple docstring'''
with open(A_ , encoding='utf-8' ) as f:
a : Dict = yaml.safe_load(f.read() )
# Get to the API doc
a : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
a : List[str] = content[api_idx]['sections']
# Then to the model doc
a : Optional[int] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
a : Optional[Any] = api_doc[model_idx]['sections']
a : Dict = [(idx, section) for idx, section in enumerate(A_ ) if 'sections' in section]
a : List[str] = False
for idx, modality_doc in modalities_docs:
a : str = modality_doc['sections']
a : str = clean_model_doc_toc(A_ )
if old_modality_doc != new_modality_doc:
a : str = True
if overwrite:
a : Any = new_modality_doc
if diff:
if overwrite:
a : Any = model_doc
a : str = api_doc
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(A_ , allow_unicode=A_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_UpperCamelCase : Any = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 186 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : dict ) -> bool:
'''simple docstring'''
__snake_case : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__snake_case : set[int] = set()
return any(
node not in visited and depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for node in graph )
def __UpperCAmelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : int , UpperCAmelCase_ : set , UpperCAmelCase_ : set ) -> bool:
'''simple docstring'''
visited.add(UpperCAmelCase_ )
rec_stk.add(UpperCAmelCase_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(UpperCAmelCase_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 172 | """simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
_a : Optional[int]= False
_a : int= False
def __UpperCAmelCase ( UpperCAmelCase_ : Namespace ) -> Optional[Any]:
'''simple docstring'''
return TrainCommand(UpperCAmelCase_ )
class UpperCamelCase ( lowercase ):
@staticmethod
def _lowercase (_A : ArgumentParser) -> Any:
__snake_case : Any = parser.add_parser('train' , help='CLI tool to train a model on a task.')
train_parser.add_argument(
'--train_data' , type=_A , required=_A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=_A , default=0 , help='Column of the dataset csv file with example labels.')
train_parser.add_argument(
'--column_text' , type=_A , default=1 , help='Column of the dataset csv file with example texts.')
train_parser.add_argument(
'--column_id' , type=_A , default=2 , help='Column of the dataset csv file with example ids.')
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).')
train_parser.add_argument('--validation_data' , type=_A , default='' , help='path to validation dataset.')
train_parser.add_argument(
'--validation_split' , type=_A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=_A , default='./' , help='path to saved the trained model.')
train_parser.add_argument(
'--task' , type=_A , default='text_classification' , help='Task to train the model on.')
train_parser.add_argument(
'--model' , type=_A , default='bert-base-uncased' , help='Model\'s name or path to stored model.')
train_parser.add_argument('--train_batch_size' , type=_A , default=32 , help='Batch size for training.')
train_parser.add_argument('--valid_batch_size' , type=_A , default=64 , help='Batch size for validation.')
train_parser.add_argument('--learning_rate' , type=_A , default=3E-5 , help='Learning rate.')
train_parser.add_argument('--adam_epsilon' , type=_A , default=1E-08 , help='Epsilon for Adam optimizer.')
train_parser.set_defaults(func=_A)
def __init__(self : int , _A : Namespace) -> Tuple:
__snake_case : Optional[int] = logging.get_logger('transformers-cli/training')
__snake_case : Optional[int] = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=_A)
__snake_case : List[Any] = args.output
__snake_case : Any = args.column_label
__snake_case : str = args.column_text
__snake_case : Any = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
__snake_case : List[str] = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
__snake_case : List[Any] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case : List[str] = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
__snake_case : Dict = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case : List[str] = args.validation_split
__snake_case : str = args.train_batch_size
__snake_case : Any = args.valid_batch_size
__snake_case : Union[str, Any] = args.learning_rate
__snake_case : str = args.adam_epsilon
def _lowercase (self : List[str]) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _lowercase (self : str) -> int:
raise NotImplementedError
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 172 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : torch.Tensor # [batch_size x 3]
lowerCAmelCase : torch.Tensor # [batch_size x 3]
lowerCAmelCase : torch.Tensor # [batch_size x 3]
lowerCAmelCase : torch.Tensor # [batch_size x 3]
lowerCAmelCase : int
lowerCAmelCase : int
lowerCAmelCase : float
lowerCAmelCase : float
lowerCAmelCase : Tuple[int]
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] ,dtype=np.floataa ) )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] ,dtype=np.floataa ) )
def UpperCAmelCase ( self : Dict ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Optional[Any] = torch.arange(self.height * self.width )
lowercase__ : Optional[int] = torch.stack(
[
pixel_indices % self.width,
torch.div(_snake_case ,self.width ,rounding_mode='''trunc''' ),
] ,axis=1 ,)
return coords
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , *lowercase__ : Dict = self.shape
lowercase__ : Union[str, Any] = int(np.prod(_snake_case ) )
lowercase__ : str = self.get_image_coords()
lowercase__ : Tuple = torch.broadcast_to(coords.unsqueeze(0 ) ,[batch_size * inner_batch_size, *coords.shape] )
lowercase__ : Union[str, Any] = self.get_camera_rays(_snake_case )
lowercase__ : str = rays.view(_snake_case ,inner_batch_size * self.height * self.width ,2 ,3 )
return rays
def UpperCAmelCase ( self : Any ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ , *lowercase__ , lowercase__ : Any = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowercase__ : int = coords.view(_snake_case ,-1 ,2 )
lowercase__ : Optional[Any] = self.resolution()
lowercase__ : List[str] = self.fov()
lowercase__ : Dict = (flat.float() / (res - 1)) * 2 - 1
lowercase__ : List[str] = fracs * torch.tan(fov / 2 )
lowercase__ : Dict = fracs.view(_snake_case ,-1 ,2 )
lowercase__ : Tuple = (
self.z.view(_snake_case ,1 ,3 )
+ self.x.view(_snake_case ,1 ,3 ) * fracs[:, :, :1]
+ self.y.view(_snake_case ,1 ,3 ) * fracs[:, :, 1:]
)
lowercase__ : Tuple = directions / directions.norm(dim=-1 ,keepdim=_snake_case )
lowercase__ : Tuple = torch.stack(
[
torch.broadcast_to(self.origin.view(_snake_case ,1 ,3 ) ,[batch_size, directions.shape[1], 3] ),
directions,
] ,dim=2 ,)
return rays.view(_snake_case ,*_snake_case ,2 ,3 )
def UpperCAmelCase ( self : int ,_snake_case : int ,_snake_case : int ) -> "DifferentiableProjectiveCamera":
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin ,x=self.x ,y=self.y ,z=self.z ,width=_snake_case ,height=_snake_case ,x_fov=self.x_fov ,y_fov=self.y_fov ,)
def __UpperCAmelCase ( __lowerCamelCase ) -> DifferentiableProjectiveCamera:
lowercase__ : List[str] = []
lowercase__ : Dict = []
lowercase__ : Dict = []
lowercase__ : Dict = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
lowercase__ : Any = np.array([np.sin(__lowerCamelCase ), np.cos(__lowerCamelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowercase__ : int = -z * 4
lowercase__ : int = np.array([np.cos(__lowerCamelCase ), -np.sin(__lowerCamelCase ), 0.0] )
lowercase__ : Any = np.cross(__lowerCamelCase , __lowerCamelCase )
origins.append(__lowerCamelCase )
xs.append(__lowerCamelCase )
ys.append(__lowerCamelCase )
zs.append(__lowerCamelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , width=__lowerCamelCase , height=__lowerCamelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowerCamelCase )) , )
| 302 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = '''The dog is cute and lives in the garden house'''
lowercase__ : int = jnp.array([tokenizer.encode(_snake_case )] )
lowercase__ : Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowercase__ : Tuple = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
lowercase__ : Optional[Any] = model(_snake_case )['''last_hidden_state''']
self.assertEqual(output.shape ,_snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,_snake_case ,atol=1e-3 ) )
| 302 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Union[str, Any] = PhobertTokenizer
__lowercase : Any = False
def snake_case_ ( self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__SCREAMING_SNAKE_CASE = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
__SCREAMING_SNAKE_CASE = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
__SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l à</w>"""]
__SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(lowerCAmelCase__))
def snake_case_ ( self , **lowerCAmelCase__):
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """Tôi là VinAI Research"""
__SCREAMING_SNAKE_CASE = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
__SCREAMING_SNAKE_CASE = """Tôi là VinAI Research"""
__SCREAMING_SNAKE_CASE = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
print(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , lowerCAmelCase__)
| 100 |
"""simple docstring"""
__magic_name__ = "Tobias Carryer"
from time import time
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=int(time())): # noqa: B008
__SCREAMING_SNAKE_CASE = multiplier
__SCREAMING_SNAKE_CASE = increment
__SCREAMING_SNAKE_CASE = modulo
__SCREAMING_SNAKE_CASE = seed
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__magic_name__ = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 100 | 1 |
'''simple docstring'''
lowerCamelCase_ = {str(digit): digit**5 for digit in range(10)}
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__A ) )
def SCREAMING_SNAKE_CASE_ ( ) -> int:
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(__A ) )
if __name__ == "__main__":
print(solution())
| 111 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''deberta-v2'''
def __init__( self : str , __lowerCamelCase : Union[str, Any]=1_2_8_1_0_0 , __lowerCamelCase : Optional[int]=1_5_3_6 , __lowerCamelCase : Optional[int]=2_4 , __lowerCamelCase : Optional[int]=2_4 , __lowerCamelCase : Tuple=6_1_4_4 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=5_1_2 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : int=1e-7 , __lowerCamelCase : Any=False , __lowerCamelCase : Any=-1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Any="gelu" , **__lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = max_relative_positions
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = position_biased_input
# Backwards compatibility
if type(__lowerCamelCase ) == str:
_SCREAMING_SNAKE_CASE = [x.strip() for x in pos_att_type.lower().split("|" )]
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = kwargs.get("pooler_hidden_size" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = pooler_dropout
_SCREAMING_SNAKE_CASE = pooler_hidden_act
class lowercase_ ( A ):
"""simple docstring"""
@property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"}
else:
_SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return 1_2
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 4_0 , __lowerCamelCase : int = 4_0 , __lowerCamelCase : "PreTrainedTokenizerBase" = None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(preprocessor=__lowerCamelCase , framework=__lowerCamelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 111 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__ = random.Random()
if is_torch_available():
import torch
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str=1.0 , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None ) -> Union[str, Any]:
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=7 , _UpperCAmelCase : Dict=4_00 , _UpperCAmelCase : Optional[int]=20_00 , _UpperCAmelCase : Any=1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Union[str, Any]=1_60_00 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Union[str, Any]=True , ) -> List[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = feature_size
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = return_attention_mask
__lowercase = do_normalize
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self : str , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : int=False ) -> Any:
"""simple docstring"""
def _flatten(_UpperCAmelCase : Union[str, Any] ):
return list(itertools.chain(*_SCREAMING_SNAKE_CASE ) )
if equal_length:
__lowercase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowercase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(_SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCAmelCase__ : int = ASTFeatureExtractor
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = ASTFeatureExtractionTester(self )
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowercase = [np.asarray(_SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test not batched input
__lowercase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
__lowercase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test batched
__lowercase = feat_extract(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
__lowercase = feat_extract(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__lowercase = np.asarray(_SCREAMING_SNAKE_CASE )
__lowercase = feat_extract(_SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
__lowercase = feat_extract(_SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
@require_torch
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
import torch
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(1_00 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowercase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self : Dict , _UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
from datasets import load_dataset
__lowercase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__lowercase = ds.sort('id' ).select(range(_SCREAMING_SNAKE_CASE ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = ASTFeatureExtractor()
__lowercase = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 325 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__snake_case : Optional[int] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__snake_case : str = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__snake_case : str = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
return float((preds == labels).mean() )
def _lowercase ( __snake_case ,__snake_case ) -> str:
__lowerCAmelCase : str = simple_accuracy(__snake_case ,__snake_case )
__lowerCAmelCase : Any = float(fa_score(y_true=__snake_case ,y_pred=__snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : Union[str, Any] = np.array(__snake_case )
__lowerCAmelCase : Tuple = np.array(__snake_case )
__lowerCAmelCase : List[Any] = en_sentvecs.shape[0]
# mean centering
__lowerCAmelCase : Union[str, Any] = en_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : int = in_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : Optional[Any] = cdist(__snake_case ,__snake_case ,"cosine" )
__lowerCAmelCase : int = np.array(range(__snake_case ) )
__lowerCAmelCase : int = sim.argsort(axis=1 )[:, :10]
__lowerCAmelCase : Optional[Any] = np.any(preds == actual[:, None] ,axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
"references": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
}) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any]) -> int:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]") | 269 | 0 |
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> int:
while a != 0:
_snake_case , _snake_case : Optional[Any] = b % a, a
return b
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> int:
if gcd(lowerCAmelCase , lowerCAmelCase ) != 1:
_snake_case : Any = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(lowerCAmelCase )
_snake_case , _snake_case , _snake_case : Optional[Any] = 1, 0, a
_snake_case , _snake_case , _snake_case : Optional[int] = 0, 1, m
while va != 0:
_snake_case : Dict = ua // va
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 260 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: Dict , lowerCAmelCase: str )-> List[str]:
# Initialise PyTorch model
_snake_case : Optional[Any] = MobileBertConfig.from_json_file(lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_snake_case : Optional[int] = MobileBertForPreTraining(lowerCAmelCase )
# Load weights from tf checkpoint
_snake_case : Optional[int] = load_tf_weights_in_mobilebert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 260 | 1 |
from collections import deque
from math import floor
from random import random
from time import time
class A :
"""simple docstring"""
def __init__( self : List[Any] )-> Optional[int]:
'''simple docstring'''
A__ = {}
def snake_case__ ( self : Dict,lowercase_ : Union[str, Any],lowercase_ : str,lowercase_ : List[str]=1 )-> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowercase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A__ = [[w, v]]
if not self.graph.get(lowercase_ ):
A__ = []
def snake_case__ ( self : List[Any] )-> str:
'''simple docstring'''
return list(self.graph )
def snake_case__ ( self : Dict,lowercase_ : Optional[int],lowercase_ : List[Any] )-> List[Any]:
'''simple docstring'''
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
def snake_case__ ( self : List[Any],lowercase_ : Optional[int]=-2,lowercase_ : str=-1 )-> List[Any]:
'''simple docstring'''
if s == d:
return []
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
A__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
A__ = stack[len(lowercase_ ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def snake_case__ ( self : List[Any],lowercase_ : str=-1 )-> str:
'''simple docstring'''
if c == -1:
A__ = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
A__ = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_,lowercase_,1 )
def snake_case__ ( self : Dict,lowercase_ : Dict=-2 )-> int:
'''simple docstring'''
A__ = deque()
A__ = []
if s == -2:
A__ = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
A__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int] )-> Optional[Any]:
'''simple docstring'''
A__ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def snake_case__ ( self : Tuple,lowercase_ : Optional[Any] )-> Any:
'''simple docstring'''
return len(self.graph[u] )
def snake_case__ ( self : Union[str, Any],lowercase_ : int=-2 )-> int:
'''simple docstring'''
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
A__ = s
A__ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase_ ) != 0:
A__ = stack[len(lowercase_ ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return sorted_nodes
def snake_case__ ( self : int )-> Optional[int]:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase_ ) != 0:
A__ = stack[len(lowercase_ ) - 1]
else:
A__ = False
indirect_parents.append(lowercase_ )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def snake_case__ ( self : List[Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase_ ) != 0:
A__ = stack[len(lowercase_ ) - 1]
else:
A__ = False
indirect_parents.append(lowercase_ )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def snake_case__ ( self : Tuple,lowercase_ : List[Any]=-2,lowercase_ : Optional[int]=-1 )-> int:
'''simple docstring'''
A__ = time()
self.dfs(lowercase_,lowercase_ )
A__ = time()
return end - begin
def snake_case__ ( self : int,lowercase_ : List[str]=-2 )-> Union[str, Any]:
'''simple docstring'''
A__ = time()
self.bfs(lowercase_ )
A__ = time()
return end - begin
class A :
"""simple docstring"""
def __init__( self : Tuple )-> Optional[Any]:
'''simple docstring'''
A__ = {}
def snake_case__ ( self : str,lowercase_ : Optional[Any],lowercase_ : str,lowercase_ : Any=1 )-> Union[str, Any]:
'''simple docstring'''
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A__ = [[w, v]]
# add the other way
if self.graph.get(lowercase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A__ = [[w, u]]
def snake_case__ ( self : List[str],lowercase_ : Optional[int],lowercase_ : Optional[int] )-> List[Any]:
'''simple docstring'''
if self.graph.get(lowercase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_ )
# the other way round
if self.graph.get(lowercase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase_ )
def snake_case__ ( self : Dict,lowercase_ : Any=-2,lowercase_ : List[str]=-1 )-> Any:
'''simple docstring'''
if s == d:
return []
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
A__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_ ) != 0:
A__ = stack[len(lowercase_ ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return visited
def snake_case__ ( self : Tuple,lowercase_ : Any=-1 )-> Optional[int]:
'''simple docstring'''
if c == -1:
A__ = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(lowercase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
A__ = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase_,lowercase_,1 )
def snake_case__ ( self : Union[str, Any],lowercase_ : List[Any]=-2 )-> Union[str, Any]:
'''simple docstring'''
A__ = deque()
A__ = []
if s == -2:
A__ = list(self.graph )[0]
d.append(lowercase_ )
visited.append(lowercase_ )
while d:
A__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def snake_case__ ( self : Optional[Any],lowercase_ : int )-> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase_ ) != 0:
A__ = stack[len(lowercase_ ) - 1]
else:
A__ = False
indirect_parents.append(lowercase_ )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return list(lowercase_ )
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase_ )
visited.append(lowercase_ )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase_ ) != 0:
A__ = stack[len(lowercase_ ) - 1]
else:
A__ = False
indirect_parents.append(lowercase_ )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase_ ) == 0:
return False
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
return list(self.graph )
def snake_case__ ( self : Dict,lowercase_ : Union[str, Any]=-2,lowercase_ : str=-1 )-> str:
'''simple docstring'''
A__ = time()
self.dfs(lowercase_,lowercase_ )
A__ = time()
return end - begin
def snake_case__ ( self : Optional[int],lowercase_ : List[Any]=-2 )-> Dict:
'''simple docstring'''
A__ = time()
self.bfs(lowercase_ )
A__ = time()
return end - begin
| 7 |
import os
# Precomputes a list of the 100 first triangular numbers
lowercase_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _snake_case( ) -> int:
'''simple docstring'''
A__ = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE__ ) )
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'words.txt' )
A__ = ''
with open(SCREAMING_SNAKE_CASE__ ) as f:
A__ = f.readline()
A__ = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
A__ = [
word
for word in [sum(ord(SCREAMING_SNAKE_CASE__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution())
| 7 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """nllb-moe"""
__SCREAMING_SNAKE_CASE = ["""past_key_values"""]
__SCREAMING_SNAKE_CASE = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _snake_case=12_8112 , _snake_case=1024 , _snake_case=12 , _snake_case=4096 , _snake_case=16 , _snake_case=12 , _snake_case=4096 , _snake_case=16 , _snake_case=0.05 , _snake_case=0.05 , _snake_case=True , _snake_case=True , _snake_case="relu" , _snake_case=1024 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=2 , _snake_case=True , _snake_case=False , _snake_case="float32" , _snake_case=False , _snake_case=128 , _snake_case=64 , _snake_case=4 , _snake_case=4 , _snake_case=0.001 , _snake_case=0.001 , _snake_case="all" , _snake_case=False , _snake_case=False , _snake_case=1.0 , _snake_case=0.2 , _snake_case=1 , _snake_case=0 , _snake_case=2 , _snake_case=False , **_snake_case , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = router_z_loss_coef
UpperCAmelCase = router_aux_loss_coef
UpperCAmelCase = decoder_sparse_step
UpperCAmelCase = encoder_sparse_step
UpperCAmelCase = num_experts
UpperCAmelCase = expert_capacity
UpperCAmelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
UpperCAmelCase = router_dtype
UpperCAmelCase = router_ignore_padding_tokens
UpperCAmelCase = batch_prioritized_routing
UpperCAmelCase = second_expert_policy
UpperCAmelCase = normalize_router_prob_before_dropping
UpperCAmelCase = moe_eval_capacity_token_fraction
UpperCAmelCase = moe_token_dropout
UpperCAmelCase = output_router_logits
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , **_snake_case , )
| 152 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 152 | 1 |
from __future__ import annotations
lowercase = tuple[int, int, int]
lowercase = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowercase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
lowercase = "EGZWVONAHDCLFQMSIPJBYUKXTR"
lowercase = "FOBHMDKEXQNRAULPGSJVTYICZW"
lowercase = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
lowercase = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
lowercase = "RMDJXFUWGISLHVTCQNKYPBEZOA"
lowercase = "SGLCPQWZHKXAREONTFBVIYJUDM"
lowercase = "HVSICLTYKQUBXDWAJZOMFGPREN"
lowercase = "RZWQHFMVDBKICJLNTUXAGYPSOE"
lowercase = "LFKIJODBEGAMQPXVUHYSTCZRWN"
lowercase = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def __UpperCAmelCase ( a_ , a_ , a_):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(a_))) < 3:
snake_case_ = f'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(a_)
# Checks if rotor positions are valid
snake_case_ , snake_case_ , snake_case_ = rotpos
if not 0 < rotorposa <= len(a_):
snake_case_ = f'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(a_)
if not 0 < rotorposa <= len(a_):
snake_case_ = f'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(a_)
if not 0 < rotorposa <= len(a_):
snake_case_ = f'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(a_)
# Validates string and returns dict
snake_case_ = _plugboard(a_)
return rotpos, rotsel, pbdict
def __UpperCAmelCase ( a_):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(a_ , a_):
snake_case_ = f'''Plugboard setting isn\'t type string ({type(a_)})'''
raise TypeError(a_)
elif len(a_) % 2 != 0:
snake_case_ = f'''Odd number of symbols ({len(a_)})'''
raise Exception(a_)
elif pbstring == "":
return {}
pbstring.replace(' ' , '')
# Checks if all characters are unique
snake_case_ = set()
for i in pbstring:
if i not in abc:
snake_case_ = f'''\'{i}\' not in list of symbols'''
raise Exception(a_)
elif i in tmppbl:
snake_case_ = f'''Duplicate symbol ({i})'''
raise Exception(a_)
else:
tmppbl.add(a_)
del tmppbl
# Created the dictionary
snake_case_ = {}
for j in range(0 , len(a_) - 1 , 2):
snake_case_ = pbstring[j + 1]
snake_case_ = pbstring[j]
return pb
def __UpperCAmelCase ( a_ , a_ , a_ = (rotora, rotora, rotora) , a_ = "" , ):
snake_case_ = text.upper()
snake_case_ , snake_case_ , snake_case_ = _validator(
a_ , a_ , plugb.upper())
snake_case_ , snake_case_ , snake_case_ = rotor_position
snake_case_ , snake_case_ , snake_case_ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
snake_case_ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
snake_case_ = plugboard[symbol]
# rotor ra --------------------------
snake_case_ = abc.index(a_) + rotorposa
snake_case_ = rotora[index % len(a_)]
# rotor rb --------------------------
snake_case_ = abc.index(a_) + rotorposa
snake_case_ = rotora[index % len(a_)]
# rotor rc --------------------------
snake_case_ = abc.index(a_) + rotorposa
snake_case_ = rotora[index % len(a_)]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
snake_case_ = reflector[symbol]
# 2nd rotors
snake_case_ = abc[rotora.index(a_) - rotorposa]
snake_case_ = abc[rotora.index(a_) - rotorposa]
snake_case_ = abc[rotora.index(a_) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
snake_case_ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(a_):
snake_case_ = 0
rotorposa += 1
if rotorposa >= len(a_):
snake_case_ = 0
rotorposa += 1
if rotorposa >= len(a_):
snake_case_ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(a_)
return "".join(a_)
if __name__ == "__main__":
lowercase = "This is my Python script that emulates the Enigma machine from WWII."
lowercase = (1, 1, 1)
lowercase = "pictures"
lowercase = (rotora, rotora, rotora)
lowercase = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 178 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''BlipImageProcessor'''
lowerCAmelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , a , a ) -> Tuple:
snake_case_ = False
super().__init__(a , a )
snake_case_ = self.image_processor
def __call__( self , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = False , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
snake_case_ = self.tokenizer
snake_case_ = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
# add pixel_values
snake_case_ = self.image_processor(a , return_tensors=a )
if text is not None:
snake_case_ = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
else:
snake_case_ = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def _UpperCamelCase ( self , *a , **a ) -> int:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCamelCase ( self , *a , **a ) -> Any:
return self.tokenizer.decode(*a , **a )
@property
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 178 | 1 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
debug_launcher(test_script.main )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 347 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :List[str] , __magic_name__ :int="</s>" , __magic_name__ :List[Any]="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :Optional[int]=125 , __magic_name__ :List[str]=None , **__magic_name__ :List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
a = [F'<extra_id_{i}>' for i in range(__magic_name__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
a = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
super().__init__(
eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
a = extra_ids
a = 2**8 # utf is 8 bits
# define special tokens dict
a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
a = len(self.special_tokens_encoder )
a = len(__magic_name__ )
for i, token in enumerate(__magic_name__ ):
a = self.vocab_size + i - n
a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__magic_name__ )) + [1]
return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def lowerCamelCase__ ( self :str , __magic_name__ :List[int] ):
'''simple docstring'''
if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = self._add_eos_if_not_present(__magic_name__ )
if token_ids_a is None:
return token_ids_a
else:
a = self._add_eos_if_not_present(__magic_name__ )
return token_ids_a + token_ids_a
def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ):
'''simple docstring'''
a = [chr(__magic_name__ ) for i in text.encode("""utf-8""" )]
return tokens
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ):
'''simple docstring'''
if token in self.special_tokens_encoder:
a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
a = self.added_tokens_encoder[token]
elif len(__magic_name__ ) != 1:
a = self.unk_token_id
else:
a = ord(__magic_name__ ) + self._num_special_tokens
return token_id
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ):
'''simple docstring'''
if index in self.special_tokens_decoder:
a = self.special_tokens_decoder[index]
else:
a = chr(index - self._num_special_tokens )
return token
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = b""""""
for token in tokens:
if token in self.special_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
a = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
a = token.encode("""utf-8""" )
else:
a = bytes([ord(__magic_name__ )] )
bstring += tok_string
a = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
'''simple docstring'''
return ()
| 347 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A =logging.getLogger(__name__)
def snake_case_ (_a : Dict , _a : Union[str, Any] ):
return (preds == labels).mean()
@dataclass
class _a :
__a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _a :
__a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__a : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
__a : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def snake_case_ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _a )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase = processors[data_args.task_name]()
UpperCAmelCase = processor.get_labels()
UpperCAmelCase = len(_a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_a : EvalPrediction ) -> Dict:
UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_a , p.label_ids )}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(_a , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _a , _a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_a )
return results
def snake_case_ (_a : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 34 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
_UpperCAmelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
from math import loga
def __A ( _lowercase ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_lowercase , _lowercase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__A = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: List[Any] , *__A: Union[str, Any] , **__A: Optional[Any] ) -> None:
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , __A , )
super().__init__(*__A , **__A )
| 75 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase = get_tests_dir('''fixtures''')
class A ( unittest.TestCase ):
def _A (self ):
# A mock response for an HTTP head request to emulate server down
__lowercase= mock.Mock()
__lowercase= 5_0_0
__lowercase= {}
__lowercase= HTTPError
__lowercase= {}
# Download this model to make sure it's in the cache.
__lowercase= WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=_lowerCAmelCase ) as mock_head:
__lowercase= WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def _A (self ):
# This test is for deprecated behavior and can be removed in v5
__lowercase= WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class A ( unittest.TestCase ):
@classmethod
def _A (cls ):
__lowercase= TOKEN
HfFolder.save_token(_lowerCAmelCase )
@classmethod
def _A (cls ):
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def _A (self ):
__lowercase= WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
__lowercase= WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCAmelCase , repo_id='test-feature-extractor' , push_to_hub=_lowerCAmelCase , use_auth_token=self._token )
__lowercase= WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
def _A (self ):
__lowercase= WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
__lowercase= WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCAmelCase , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=_lowerCAmelCase , use_auth_token=self._token )
__lowercase= WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
def _A (self ):
CustomFeatureExtractor.register_for_auto_class()
__lowercase= CustomFeatureExtractor.from_pretrained(_lowerCAmelCase )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
__lowercase= AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=_lowerCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 295 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __a ( UpperCAmelCase ) ->Tuple: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __a ( UpperCAmelCase ) ->int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def A (self : Tuple ):
A = {}
A = []
A = 1
A = [1, 2]
A = {"""a""": 1, """b""": 2}
A = {"""a""": [1, 2], """b""": [3, 4]}
A = {"""a""": {"""1""": 1}, """b""": 2}
A = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
A = {}
A = []
A = 2
A = [2, 3]
A = {"""a""": 2, """b""": 3}
A = {"""a""": [2, 3], """b""": [4, 5]}
A = {"""a""": {"""1""": 2}, """b""": 3}
A = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
A = 2
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
A = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
A = {"""a""": 2, """b""": 0, """c""": 2}
A = {
"""a""": np.eye(2 ).astype(_lowerCAmelCase ),
"""b""": np.zeros(3 ).astype(_lowerCAmelCase ),
"""c""": np.ones(2 ).astype(_lowerCAmelCase ),
}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCAmelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCAmelCase : x + 1 , _lowerCAmelCase , num_proc=_lowerCAmelCase )
def A (self : List[Any] ):
A = {"""a""": 1, """b""": 2}
A = {"""a""": 3, """b""": 4}
A = {"""a""": 5, """b""": 6}
A = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = '''bar'''
A = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(_lowerCAmelCase , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
A = {f"""{i}""": i for i in range(UpperCAmelCase )}
A = map_nested(lambda UpperCAmelCase : x + 10 , UpperCAmelCase , num_proc=UpperCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@require_tf
def A (self : Dict ):
import tensorflow as tf
from tensorflow.keras import layers
A = layers.Dense(2 )
def gen_random_output():
A = tf.random.uniform((1, 3) )
return model(_lowerCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A (self : Tuple ):
import torch
def gen_random_output():
A = torch.nn.Linear(3 , 2 )
A = torch.rand(1 , 3 )
return model(_lowerCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A (self : str ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A = gen_random_output()
with temp_seed(42 ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).flatten()
assert output == expected_output
def __a ( ) ->Optional[Any]:
"""simple docstring"""
A = A(x=1 , y="""foobar""" )
A = {"""x""": 1, """y""": """foobar"""}
assert asdict(UpperCAmelCase ) == expected_output
A = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
A = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(UpperCAmelCase ) == expected_output
with pytest.raises(UpperCAmelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return text.split()
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __a ( ) ->Optional[int]:
"""simple docstring"""
with Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(UpperCAmelCase ) == 4
| 258 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
A = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
A = {
"google/rembert": 2_5_6,
}
A = "▁"
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : str = VOCAB_FILES_NAMES
__lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = RemBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Dict = do_lower_case
UpperCAmelCase : Dict = remove_space
UpperCAmelCase : Any = keep_accents
UpperCAmelCase : Optional[Any] = vocab_file
UpperCAmelCase : List[str] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("""Vocabulary path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) )
return
UpperCAmelCase : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 371 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE = 6 ) -> None:
'''simple docstring'''
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
self.create_linked_list(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Node()
UpperCAmelCase : Dict = current_node
UpperCAmelCase : Any = current_node
UpperCAmelCase : Optional[int] = current_node
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = Node()
UpperCAmelCase : Tuple = current_node
UpperCAmelCase : Any = previous_node
UpperCAmelCase : List[Any] = current_node
UpperCAmelCase : List[str] = self.front
UpperCAmelCase : Tuple = previous_node
def SCREAMING_SNAKE_CASE ( self ) -> bool:
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def SCREAMING_SNAKE_CASE ( self ) -> Any | None:
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase : Optional[Any] = self.rear.next
if self.rear:
UpperCAmelCase : Optional[int] = data
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase : Tuple = self.front.data
UpperCAmelCase : int = None
return data
UpperCAmelCase : Dict = self.front
UpperCAmelCase : Tuple = old_front.next
UpperCAmelCase : str = old_front.data
UpperCAmelCase : int = None
return data
def SCREAMING_SNAKE_CASE ( self ) -> None:
'''simple docstring'''
if self.is_empty():
raise Exception("""Empty Queue""" )
def SCREAMING_SNAKE_CASE ( self ) -> None:
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class SCREAMING_SNAKE_CASE__ :
def __init__( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Any | None = None
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 | 0 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ )
return k
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ )
SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ )
SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE : Dict = v.T
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE : int = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Any = array
return tf_weights
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name
SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ )
SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE : int = task_specific_params
SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : Any = Path(args.tf_ckpt_path).parent.name
a__ : int = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313 | 1 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_lowerCamelCase : Optional[int] = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
_lowerCamelCase : List[str] = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
_lowerCamelCase : Optional[int] = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def _UpperCAmelCase (UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
'''simple docstring'''
return float((preds == labels).mean() )
def _UpperCAmelCase (UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict="binary" ):
'''simple docstring'''
_lowerCAmelCase : str = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Tuple = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {}
for id_pred, label in zip(UpperCamelCase_ , UpperCamelCase_ ):
_lowerCAmelCase : Optional[int] = F"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"
_lowerCAmelCase : Optional[Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_lowerCAmelCase : Tuple = [(pred, label)]
_lowerCAmelCase : List[str] = [], []
for question, preds_labels in question_map.items():
_lowerCAmelCase : List[Any] = zip(*UpperCamelCase_ )
_lowerCAmelCase : Dict = fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average="""macro""" )
fas.append(UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase_ ) )
ems.append(UpperCamelCase_ )
_lowerCAmelCase : List[Any] = float(sum(UpperCamelCase_ ) / len(UpperCamelCase_ ) )
_lowerCAmelCase : Union[str, Any] = sum(UpperCamelCase_ ) / len(UpperCamelCase_ )
_lowerCAmelCase : Any = float(fa_score(y_true=UpperCamelCase_ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case (datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_UpperCAmelCase , _UpperCAmelCase )}
elif self.config_name == "cb":
return acc_and_fa(_UpperCAmelCase , _UpperCAmelCase , fa_avg="""macro""" )
elif self.config_name == "record":
_lowerCAmelCase : Any = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
_lowerCAmelCase : Dict = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_UpperCAmelCase , _UpperCAmelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_UpperCAmelCase , _UpperCAmelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 359 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_lowerCamelCase : Tuple = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_lowerCamelCase : List[str] = get_tests_dir("fixtures/vocab.json")
_lowerCamelCase : str = get_tests_dir("fixtures")
class __snake_case (unittest.TestCase ):
lowerCAmelCase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Any = 0
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : List[Any] = WavaVecaConfig()
_lowerCAmelCase : str = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Any = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """vocab.json""" ) )
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Any = WavaVecaFeatureExtractor()
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_lowerCAmelCase : List[str] = WavaVecaProcessor(_UpperCAmelCase , _UpperCAmelCase )
# save in new folder
processor.save_pretrained(_UpperCAmelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """r""" ) as f:
_lowerCAmelCase : Union[str, Any] = json.load(_UpperCAmelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """w""" ) as f:
f.write(json.dumps(_UpperCAmelCase ) )
_lowerCAmelCase : List[Any] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Dict = WavaVecaFeatureExtractor()
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_lowerCAmelCase : str = WavaVecaProcessor(_UpperCAmelCase , _UpperCAmelCase )
# save in new folder
processor.save_pretrained(_UpperCAmelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """r""" ) as f:
_lowerCAmelCase : str = json.load(_UpperCAmelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """w""" ) as f:
f.write(json.dumps(_UpperCAmelCase ) )
_lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Tuple = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(_UpperCAmelCase )
# copy relevant files
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """w""" ) as f:
f.write("""{}""" )
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Any = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : List[str] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
_lowerCAmelCase : Optional[int] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
_lowerCAmelCase : Union[str, Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase )
_lowerCAmelCase : List[str] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
try:
AutoConfig.register("""custom""" , _UpperCAmelCase )
AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCAmelCase : List[str] = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Tuple = os.path.join(_UpperCAmelCase , """vocab.txt""" )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_lowerCAmelCase : str = CustomTokenizer(_UpperCAmelCase )
_lowerCAmelCase : List[str] = CustomProcessor(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
'''simple docstring'''
class __snake_case (_a ):
lowerCAmelCase__ = False
class __snake_case (_a ):
lowerCAmelCase__ = False
class __snake_case (_a ):
lowerCAmelCase__ = "AutoFeatureExtractor"
lowerCAmelCase__ = "AutoTokenizer"
lowerCAmelCase__ = False
try:
AutoConfig.register("""custom""" , _UpperCAmelCase )
AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# If remote code is not set, the default is to use local classes.
_lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_lowerCAmelCase : str = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class __snake_case (unittest.TestCase ):
lowerCAmelCase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[str] = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple ) -> Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = WavaVecaProcessor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_UpperCAmelCase , """test-processor""" ) , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
_lowerCAmelCase : str = WavaVecaProcessor.from_pretrained(f"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(new_processor.feature_extractor , _UpperCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
_lowerCAmelCase : int = WavaVecaProcessor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_UpperCAmelCase , """test-processor-org""" ) , push_to_hub=_UpperCAmelCase , use_auth_token=self._token , organization="""valid_org""" , )
_lowerCAmelCase : str = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(new_processor.feature_extractor , _UpperCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_lowerCAmelCase : Any = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : int = os.path.join(_UpperCAmelCase , """vocab.txt""" )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_lowerCAmelCase : List[str] = CustomTokenizer(_UpperCAmelCase )
_lowerCAmelCase : List[str] = CustomProcessor(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"{USER}/test-dynamic-processor" , token=self._token )
_lowerCAmelCase : Union[str, Any] = Repository(_UpperCAmelCase , clone_from=f"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(_UpperCAmelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_UpperCAmelCase , """tokenizer_config.json""" ) ) as f:
_lowerCAmelCase : str = json.load(_UpperCAmelCase )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , """custom_processing.py""" ) ) )
repo.push_to_hub()
_lowerCAmelCase : Tuple = AutoProcessor.from_pretrained(f"{USER}/test-dynamic-processor" , trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 159 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str] ) -> Union[str, Any]:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
_lowerCAmelCase : Optional[int] = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_lowerCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 301 | 0 |
from __future__ import annotations
import time
import numpy as np
lowercase : Optional[Any] = [8, 5, 9, 7]
lowercase : Union[str, Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowercase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __snake_case :
def __init__( self ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Any = claim_vector
lowercase : Union[str, Any] = allocated_resources_table
lowercase : Dict = maximum_claim_table
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return {self.__need().index(snake_case ): i for i in self.__need()}
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
lowercase : Any = self.__need()
lowercase : List[str] = self.__allocated_resources_table
lowercase : int = self.__available_resources()
lowercase : int = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
lowercase : List[str] = False
for each_need in need_list:
lowercase : Optional[Any] = True
for index, need in enumerate(snake_case ):
if need > available_resources[index]:
lowercase : Optional[Any] = False
break
if execution:
lowercase : Optional[Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowercase : Union[str, Any] = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(snake_case )
# update available/freed resources stack
lowercase : Dict = np.array(snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(snake_case ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(snake_case ) + 1}"
+ """ """.join(f"{it:>8}" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(snake_case ) + 1}"
+ """ """.join(f"{it:>8}" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(snake_case ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _snake_case( ) -> tuple[list[int], int]:
lowercase : List[Any] = [randint(-1_000 , 1_000 ) for i in range(10 )]
lowercase : Tuple = randint(-5_000 , 5_000 )
return (arr, r)
lowercase : List[Any] = make_dataset()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> tuple[int, ...]:
for triplet in permutations(SCREAMING_SNAKE_CASE__ , 3 ):
if sum(SCREAMING_SNAKE_CASE__ ) == target:
return tuple(sorted(SCREAMING_SNAKE_CASE__ ) )
return (0, 0, 0)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> tuple[int, int, int]:
arr.sort()
lowercase : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(n - 1 ):
lowercase , lowercase : Optional[Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _snake_case( ) -> tuple[float, float]:
lowercase : Dict = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
lowercase : Tuple = """
triplet_sum1(*dataset)
"""
lowercase : int = """
triplet_sum2(*dataset)
"""
lowercase : str = repeat(setup=SCREAMING_SNAKE_CASE__ , stmt=SCREAMING_SNAKE_CASE__ , repeat=5 , number=10_000 )
lowercase : Dict = repeat(setup=SCREAMING_SNAKE_CASE__ , stmt=SCREAMING_SNAKE_CASE__ , repeat=5 , number=10_000 )
return (min(SCREAMING_SNAKE_CASE__ ), min(SCREAMING_SNAKE_CASE__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase : Union[str, Any] = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 285 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE_:int = random.Random()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
A : str = global_rng
A : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=7, lowerCamelCase__=400, lowerCamelCase__=2000, lowerCamelCase__=2048, lowerCamelCase__=128, lowerCamelCase__=1, lowerCamelCase__=512, lowerCamelCase__=30, lowerCamelCase__=4_4100, ):
A : str = parent
A : Optional[int] = batch_size
A : str = min_seq_length
A : int = max_seq_length
A : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A : Tuple = spectrogram_length
A : Optional[Any] = feature_size
A : Any = num_audio_channels
A : str = hop_length
A : int = chunk_length
A : Optional[Any] = sampling_rate
def _lowerCAmelCase ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*UpperCAmelCase_ ) )
if equal_length:
A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
A : Dict = [np.asarray(UpperCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = TvltFeatureExtractor
def _lowerCAmelCase ( self ):
A : Dict = TvltFeatureExtractionTester(self )
def _lowerCAmelCase ( self ):
A : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCAmelCase_, """spectrogram_length""" ) )
self.assertTrue(hasattr(UpperCAmelCase_, """feature_size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_, """num_audio_channels""" ) )
self.assertTrue(hasattr(UpperCAmelCase_, """hop_length""" ) )
self.assertTrue(hasattr(UpperCAmelCase_, """chunk_length""" ) )
self.assertTrue(hasattr(UpperCAmelCase_, """sampling_rate""" ) )
def _lowerCAmelCase ( self ):
A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A : Optional[int] = feat_extract_first.save_pretrained(UpperCAmelCase_ )[0]
check_json_file_has_correct_format(UpperCAmelCase_ )
A : Any = self.feature_extraction_class.from_pretrained(UpperCAmelCase_ )
A : Union[str, Any] = feat_extract_first.to_dict()
A : List[Any] = feat_extract_second.to_dict()
A : Any = dict_first.pop("""mel_filters""" )
A : str = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(UpperCAmelCase_, UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_, UpperCAmelCase_ )
def _lowerCAmelCase ( self ):
A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A : List[str] = os.path.join(UpperCAmelCase_, """feat_extract.json""" )
feat_extract_first.to_json_file(UpperCAmelCase_ )
A : Union[str, Any] = self.feature_extraction_class.from_json_file(UpperCAmelCase_ )
A : List[Any] = feat_extract_first.to_dict()
A : Dict = feat_extract_second.to_dict()
A : Optional[Any] = dict_first.pop("""mel_filters""" )
A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(UpperCAmelCase_, UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_, UpperCAmelCase_ )
def _lowerCAmelCase ( self ):
# Initialize feature_extractor
A : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
A : Dict = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
# Test not batched input
A : List[Any] = feature_extractor(np_speech_inputs[0], return_tensors="""np""", sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A : List[str] = feature_extractor(UpperCAmelCase_, return_tensors="""np""", sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A : Tuple = feature_extractor(
UpperCAmelCase_, return_tensors="""np""", sampling_rate=4_4100, mask_audio=UpperCAmelCase_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A : Optional[int] = np.asarray(UpperCAmelCase_ )
A : List[str] = feature_extractor(UpperCAmelCase_, return_tensors="""np""", sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[int] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""", """clean""", split="""validation""" )
# automatic decoding with librispeech
A : List[str] = ds.sort("""id""" ).select(range(UpperCAmelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _lowerCAmelCase ( self ):
A : List[Any] = self._load_datasamples(1 )
A : Union[str, Any] = TvltFeatureExtractor()
A : List[Any] = feature_extractor(UpperCAmelCase_, return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape, (1, 1, 192, 128) )
A : List[Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2], UpperCAmelCase_, atol=1e-4 ) )
| 116 |
from datetime import datetime as dt
import os
from github import Github
__A : Dict = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase : List[str] = g.get_repo('huggingface/transformers' )
lowerCAmelCase : Tuple = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase : Dict = sorted([comment for comment in issue.get_comments()], key=lambda _UpperCAmelCase : i.created_at, reverse=_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 138 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__snake_case : Optional[int] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self , _UpperCamelCase ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = torchvision.models.resnetaaa(pretrained=_UpperCamelCase )
lowerCAmelCase__ = list(model.children() )[:-2]
lowerCAmelCase__ = nn.Sequential(*_UpperCamelCase )
lowerCAmelCase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowerCAmelCase__ = self.pool(self.model(_UpperCamelCase ) )
lowerCAmelCase__ = torch.flatten(_UpperCamelCase , start_dim=2 )
lowerCAmelCase__ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __SCREAMING_SNAKE_CASE ( __lowercase):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = [json.loads(_UpperCamelCase ) for l in open(_UpperCamelCase )]
lowerCAmelCase__ = os.path.dirname(_UpperCamelCase )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = labels
lowerCAmelCase__ = len(_UpperCamelCase )
lowerCAmelCase__ = max_seq_length
lowerCAmelCase__ = transforms
def __len__( self ):
"""simple docstring"""
return len(self.data )
def __getitem__( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=_UpperCamelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sentence[0], sentence[1:-1], sentence[-1]
lowerCAmelCase__ = sentence[: self.max_seq_length]
lowerCAmelCase__ = torch.zeros(self.n_classes )
lowerCAmelCase__ = 1
lowerCAmelCase__ = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
lowerCAmelCase__ = self.transforms(_UpperCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def _UpperCamelCase ( UpperCamelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = [len(row['sentence'] ) for row in batch]
lowerCAmelCase__ , lowerCAmelCase__ = len(UpperCamelCase_ ), max(UpperCamelCase_ )
lowerCAmelCase__ = torch.zeros(UpperCamelCase_ , UpperCamelCase_ , dtype=torch.long )
lowerCAmelCase__ = torch.zeros(UpperCamelCase_ , UpperCamelCase_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(UpperCamelCase_ , UpperCamelCase_ ) ):
lowerCAmelCase__ = input_row['sentence']
lowerCAmelCase__ = 1
lowerCAmelCase__ = torch.stack([row['image'] for row in batch] )
lowerCAmelCase__ = torch.stack([row['label'] for row in batch] )
lowerCAmelCase__ = torch.stack([row['image_start_token'] for row in batch] )
lowerCAmelCase__ = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 122 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Dict = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 122 | 1 |
"""simple docstring"""
lowerCAmelCase__ = range(2, 20 + 1)
lowerCAmelCase__ = [10**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase : int = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase , lowerCAmelCase : List[Any] = 0, 0
lowerCAmelCase : int = n - i
lowerCAmelCase : Optional[Any] = memo.get(SCREAMING_SNAKE_CASE )
if sub_memo is not None:
lowerCAmelCase : Dict = sub_memo.get(SCREAMING_SNAKE_CASE )
if jumps is not None and len(SCREAMING_SNAKE_CASE ) > 0:
# find and make the largest jump without going over
lowerCAmelCase : int = -1
for _k in range(len(SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCAmelCase : str = _k
break
if max_jump >= 0:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCAmelCase : Optional[int] = diff + c
for j in range(min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) ):
lowerCAmelCase , lowerCAmelCase : Any = divmod(SCREAMING_SNAKE_CASE , 1_0 )
if new_c > 0:
add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : Dict = []
else:
lowerCAmelCase : Union[str, Any] = {c: []}
lowerCAmelCase : List[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCAmelCase , lowerCAmelCase : Any = next_term(SCREAMING_SNAKE_CASE , k - 1 , i + dn , SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCAmelCase , lowerCAmelCase : Dict = compute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , i + dn , SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
lowerCAmelCase : Any = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCAmelCase : Optional[Any] = 0
while j < len(SCREAMING_SNAKE_CASE ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(SCREAMING_SNAKE_CASE , (diff, dn, k) )
return (diff, dn)
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(SCREAMING_SNAKE_CASE ):
a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCAmelCase : int = i
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = 0, 0, 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCAmelCase : Dict = ds_c + ds_b
diff += addend
lowerCAmelCase : str = 0
for j in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Any = a_i[j] + addend
lowerCAmelCase , lowerCAmelCase : Any = divmod(SCREAMING_SNAKE_CASE , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return diff, i - start_i
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase : Optional[int] = digits[j] + addend
if s >= 1_0:
lowerCAmelCase , lowerCAmelCase : Dict = divmod(SCREAMING_SNAKE_CASE , 1_0 )
lowerCAmelCase : str = addend // 1_0 + quotient
else:
lowerCAmelCase : List[Any] = s
lowerCAmelCase : Dict = addend // 1_0
if addend == 0:
break
while addend > 0:
lowerCAmelCase , lowerCAmelCase : List[str] = divmod(SCREAMING_SNAKE_CASE , 1_0 )
digits.append(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0**1_5 ):
'''simple docstring'''
lowerCAmelCase : Any = [1]
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : Optional[Any] = 0
while True:
lowerCAmelCase , lowerCAmelCase : Any = next_term(SCREAMING_SNAKE_CASE , 2_0 , i + dn , SCREAMING_SNAKE_CASE )
dn += terms_jumped
if dn == n - i:
break
lowerCAmelCase : Any = 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }")
| 108 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase : Optional[int] = 8
# DPR tok
lowerCAmelCase : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase : Dict = os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase : Optional[int] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase : str = {"unk_token": "<unk>"}
lowerCAmelCase : int = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase : int = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase : Dict = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def lowercase__ ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = os.path.join(self.tmpdirname , "rag_tokenizer" )
lowerCAmelCase : List[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowerCAmelCase : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(snake_case__ )
rag_tokenizer.save_pretrained(snake_case__ )
lowerCAmelCase : List[str] = RagTokenizer.from_pretrained(snake_case__ , config=snake_case__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , snake_case__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , snake_case__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
lowerCAmelCase : Dict = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowerCAmelCase : Union[str, Any] = tokenizer(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
lowerCAmelCase : List[str] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowerCAmelCase : str = tokenizer(snake_case__ )
self.assertIsNotNone(snake_case__ )
| 108 | 1 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def lowerCAmelCase (__UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =year % 1_9
__UpperCamelCase =year % 4
__UpperCamelCase =year % 7
__UpperCamelCase =math.floor(year / 1_0_0 )
__UpperCamelCase =math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__UpperCamelCase =leap_day_inhibits / 4
__UpperCamelCase =(
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__UpperCamelCase =(4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__UpperCamelCase =(1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__UpperCamelCase =(
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 1_8 )
else:
return datetime(__UpperCamelCase , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
__lowercase = '''will be''' if year > datetime.now().year else '''was'''
print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
| 85 | """simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = LongformerTokenizer
lowercase__ = True
lowercase__ = LongformerTokenizerFast
lowercase__ = True
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__UpperCamelCase =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCamelCase ={'''unk_token''': '''<unk>'''}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] , **UpperCamelCase__ : str ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , **UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase ='''lower newer'''
__UpperCamelCase ='''lower newer'''
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='''lower newer'''
__UpperCamelCase =['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=UpperCamelCase__ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=UpperCamelCase__ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCamelCase =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase ='''Encode this sequence.'''
__UpperCamelCase =tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing spaces after special tokens
__UpperCamelCase ='''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )} ) # mask token has a left space
__UpperCamelCase =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
__UpperCamelCase ='''Encode <mask> sequence'''
__UpperCamelCase ='''Encode <mask>sequence'''
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
__UpperCamelCase =encoded.index(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
__UpperCamelCase =encoded.index(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase ='''A, <mask> AllenNLP sentence.'''
__UpperCamelCase =tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
__UpperCamelCase =tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCamelCase =json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , UpperCamelCase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , UpperCamelCase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase ='''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCamelCase =f"""{text_of_1_token} {text_of_1_token}"""
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ) + 1, 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
| 85 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_A = random.Random()
def lowerCamelCase__ ( a__ : List[str] , a__ : int=1.0 , a__ : Any=None , a__ : Dict=None ) -> List[Any]:
if rng is None:
UpperCamelCase_ = global_rng
UpperCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=4_0_0 , __UpperCamelCase=2_0_0_0 , __UpperCamelCase=1 , __UpperCamelCase=0.0 , __UpperCamelCase=1_6_0_0_0 , __UpperCamelCase=True , __UpperCamelCase=8_0 , __UpperCamelCase=1_6 , __UpperCamelCase=6_4 , __UpperCamelCase="hann_window" , __UpperCamelCase=8_0 , __UpperCamelCase=7_6_0_0 , __UpperCamelCase=1e-10 , __UpperCamelCase=True , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = min_seq_length
UpperCamelCase_ = max_seq_length
UpperCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase_ = feature_size
UpperCamelCase_ = padding_value
UpperCamelCase_ = sampling_rate
UpperCamelCase_ = do_normalize
UpperCamelCase_ = num_mel_bins
UpperCamelCase_ = hop_length
UpperCamelCase_ = win_length
UpperCamelCase_ = win_function
UpperCamelCase_ = fmin
UpperCamelCase_ = fmax
UpperCamelCase_ = mel_floor
UpperCamelCase_ = return_attention_mask
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowerCamelCase_ ( self , __UpperCamelCase=False , __UpperCamelCase=False ):
"""simple docstring"""
def _flatten(__UpperCamelCase ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
UpperCamelCase_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase_ = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
def lowerCamelCase_ ( self , __UpperCamelCase=False , __UpperCamelCase=False ):
"""simple docstring"""
if equal_length:
UpperCamelCase_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase_ = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Union[str, Any] = SpeechTaFeatureExtractor
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = SpeechTaFeatureExtractionTester(self )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(__UpperCamelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase , axis=0 ) - 1 ) < 1e-3 ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase_ = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
UpperCamelCase_ = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
UpperCamelCase_ = feat_extract(__UpperCamelCase , return_tensors="""np""" ).input_values
UpperCamelCase_ = feat_extract(__UpperCamelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ = ["""longest""", """max_length""", """do_not_pad"""]
UpperCamelCase_ = [None, 1_6_0_0, None]
for max_length, padding in zip(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = feat_extract(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors="""np""" )
UpperCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = range(8_0_0 , 1_4_0_0 , 2_0_0 )
UpperCamelCase_ = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase_ = ["""longest""", """max_length""", """do_not_pad"""]
UpperCamelCase_ = [None, 1_6_0_0, None]
for max_length, padding in zip(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = feat_extract(__UpperCamelCase , max_length=__UpperCamelCase , padding=__UpperCamelCase )
UpperCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ = feat_extract(
__UpperCamelCase , truncation=__UpperCamelCase , max_length=1_0_0_0 , padding="""max_length""" , return_tensors="""np""" )
UpperCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ = feat_extract(
__UpperCamelCase , truncation=__UpperCamelCase , max_length=1_0_0_0 , padding="""longest""" , return_tensors="""np""" )
UpperCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ = feat_extract(
__UpperCamelCase , truncation=__UpperCamelCase , max_length=2_0_0_0 , padding="""longest""" , return_tensors="""np""" )
UpperCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = np.random.rand(1_0_0 ).astype(np.floataa )
UpperCamelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase_ = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase_ = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase_ = feature_extractor(audio_target=__UpperCamelCase , padding=__UpperCamelCase , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCamelCase_ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
UpperCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
UpperCamelCase_ = feature_extractor(__UpperCamelCase , return_tensors="""np""" ).input_values
UpperCamelCase_ = feature_extractor(__UpperCamelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase_ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCamelCase_ = np.asarray(__UpperCamelCase )
UpperCamelCase_ = feature_extractor(__UpperCamelCase , return_tensors="""np""" ).input_values
UpperCamelCase_ = feature_extractor(__UpperCamelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase_ = feat_extract.model_input_names[0]
UpperCamelCase_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__UpperCamelCase ) == len(__UpperCamelCase ) for x, y in zip(__UpperCamelCase , processed_features[input_name] ) ) )
UpperCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCamelCase )
UpperCamelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
UpperCamelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCamelCase )
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase_ = feat_extract.model_input_names[0]
UpperCamelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
UpperCamelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
UpperCamelCase_ = feat_extract.model_input_names[0]
UpperCamelCase_ = BatchFeature({input_name: speech_inputs} )
UpperCamelCase_ = feat_extract.num_mel_bins # hack!
UpperCamelCase_ = feat_extract.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
UpperCamelCase_ = feat_extract.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.feat_extract_dict
UpperCamelCase_ = True
UpperCamelCase_ = self.feature_extraction_class(**__UpperCamelCase )
UpperCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
UpperCamelCase_ = [len(__UpperCamelCase ) for x in speech_inputs]
UpperCamelCase_ = feat_extract.model_input_names[0]
UpperCamelCase_ = BatchFeature({input_name: speech_inputs} )
UpperCamelCase_ = feat_extract.num_mel_bins # hack!
UpperCamelCase_ = feat_extract.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __UpperCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.feat_extract_dict
UpperCamelCase_ = True
UpperCamelCase_ = self.feature_extraction_class(**__UpperCamelCase )
UpperCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
UpperCamelCase_ = [len(__UpperCamelCase ) for x in speech_inputs]
UpperCamelCase_ = feat_extract.model_input_names[0]
UpperCamelCase_ = BatchFeature({input_name: speech_inputs} )
UpperCamelCase_ = min(__UpperCamelCase )
UpperCamelCase_ = feat_extract.num_mel_bins # hack!
UpperCamelCase_ = feat_extract.pad(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __UpperCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase_ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
UpperCamelCase_ = ds.sort("""id""" ).select(range(__UpperCamelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = torch.tensor(
[2.38_04e-03, 2.07_52e-03, 1.98_36e-03, 2.10_57e-03, 1.61_74e-03,
3.05_18e-04, 9.15_53e-05, 3.35_69e-04, 9.76_56e-04, 1.83_11e-03,
2.01_42e-03, 2.10_57e-03, 1.73_95e-03, 4.57_76e-04, -3.96_73e-04,
4.57_76e-04, 1.00_71e-03, 9.15_53e-05, 4.88_28e-04, 1.15_97e-03,
7.32_42e-04, 9.46_04e-04, 1.80_05e-03, 1.83_11e-03, 8.85_01e-04,
4.27_25e-04, 4.88_28e-04, 7.32_42e-04, 1.09_86e-03, 2.10_57e-03] )
# fmt: on
UpperCamelCase_ = self._load_datasamples(1 )
UpperCamelCase_ = SpeechTaFeatureExtractor()
UpperCamelCase_ = feature_extractor(__UpperCamelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , __UpperCamelCase , atol=1e-6 ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
UpperCamelCase_ = self._load_datasamples(1 )
UpperCamelCase_ = SpeechTaFeatureExtractor()
UpperCamelCase_ = feature_extractor(audio_target=__UpperCamelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , __UpperCamelCase , atol=1e-4 ) )
| 122 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '''▁'''
_A = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
_A = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
_A = {'''vinai/bartpho-syllable''': 1_024}
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
UpperCamelCase_ = vocab_file
UpperCamelCase_ = monolingual_vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCamelCase_ = {}
UpperCamelCase_ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCamelCase ) not in self.fairseq_tokens_to_ids:
UpperCamelCase_ = cnt
cnt += 1
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
UpperCamelCase_ = line.strip().split()[0]
UpperCamelCase_ = len(self.fairseq_tokens_to_ids )
if str(__UpperCamelCase ) not in self.fairseq_tokens_to_ids:
UpperCamelCase_ = len(self.fairseq_tokens_to_ids )
UpperCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
UpperCamelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
UpperCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = """""".join(__UpperCamelCase ).replace(__UpperCamelCase , """ """ ).strip()
return out_string
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__UpperCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 122 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_snake_case = get_tests_dir("fixtures")
_snake_case = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_snake_case = get_tests_dir("fixtures/dummy-config.json")
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
_A : Optional[int] = 0
def a__ ( self ) -> List[str]:
_A : int = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Tuple:
_A : Dict = AutoFeatureExtractor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_A : str = AutoFeatureExtractor.from_pretrained(_a ).to_dict()
config_dict.pop("""feature_extractor_type""" )
_A : Optional[int] = WavaVecaFeatureExtractor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_A : Optional[Any] = AutoFeatureExtractor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_A : List[str] = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Optional[Any]:
_A : int = AutoFeatureExtractor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> int:
with self.assertRaisesRegex(
_a , """bert-base is not a local folder and is not a valid model identifier""" ):
_A : Optional[Any] = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def a__ ( self ) -> str:
with self.assertRaisesRegex(
_a , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_A : Tuple = AutoFeatureExtractor.from_pretrained(_a , revision="""aaaaaa""" )
def a__ ( self ) -> Any:
with self.assertRaisesRegex(
_a , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
_A : int = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def a__ ( self ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_A : Tuple = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_A : Dict = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_a )
_A : Dict = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_a )
_A : List[str] = AutoFeatureExtractor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def a__ ( self ) -> List[Any]:
try:
AutoConfig.register("""custom""" , _a )
AutoFeatureExtractor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoFeatureExtractor.register(_a , _a )
# Now that the config is registered, it can be used as any other config with the auto-API
_A : List[str] = CustomFeatureExtractor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_a )
_A : List[Any] = AutoFeatureExtractor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> List[Any]:
class lowercase ( UpperCamelCase__ ):
_a = True
try:
AutoConfig.register("""custom""" , _a )
AutoFeatureExtractor.register(_a , _a )
# If remote code is not set, the default is to use local
_A : List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_A : List[str] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_A : List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(_a , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 366 |
from __future__ import annotations
from collections.abc import Callable
_snake_case = list[list[float | int]]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(size + 1 )] for _ in range(snake_case_ )]
_A : int
_A : int
_A : int
_A : int
_A : int
_A : float
for row in range(snake_case_ ):
for col in range(snake_case_ ):
_A : Dict = matrix[row][col]
_A : List[Any] = vector[row][0]
_A : List[Any] = 0
_A : Optional[Any] = 0
while row < size and col < size:
# pivoting
_A : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case_,snake_case_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_A , _A : Optional[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1,snake_case_ ):
_A : str = augmented[rowa][col] / augmented[row][col]
_A : List[Any] = 0
for cola in range(col + 1,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1,snake_case_ ):
for row in range(snake_case_ ):
_A : int = augmented[row][col] / augmented[col][col]
for cola in range(snake_case_,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row],10 )] for row in range(snake_case_ )
]
def lowerCAmelCase_ ( snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
_A : Matrix = [[0] for _ in range(snake_case_ )]
_A : Matrix
_A : int
_A : int
_A : int
for x_val, y_val in enumerate(snake_case_ ):
for col in range(snake_case_ ):
_A : str = (x_val + 1) ** (size - col - 1)
_A : List[str] = y_val
_A : Any = solve(snake_case_,snake_case_ )
def interpolated_func(snake_case_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case_ ) )
return interpolated_func
def lowerCAmelCase_ ( snake_case_ ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase_ ( snake_case_ = question_function,snake_case_ = 10 ):
_A : list[int] = [func(snake_case_ ) for x_val in range(1,order + 1 )]
_A : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1,order + 1 )
]
_A : int = 0
_A : Callable[[int], int]
_A : int
for poly in polynomials:
_A : Optional[int] = 1
while func(snake_case_ ) == poly(snake_case_ ):
x_val += 1
ret += poly(snake_case_ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : List[str] = "▁"
_lowerCamelCase : Optional[int] = {"vocab_file": "prophetnet.tokenizer"}
_lowerCamelCase : Optional[Any] = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
_lowerCamelCase : Optional[Any] = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
_lowerCamelCase : Optional[Any] = {
"microsoft/xprophetnet-large-wiki100-cased": 5_1_2,
}
def a__ ( UpperCAmelCase : int ) -> List[str]:
UpperCAmelCase : int = collections.OrderedDict()
with open(lowercase__ , '''r''' , encoding='''utf-8''' ) as reader:
UpperCAmelCase : Optional[Any] = reader.readlines()
for index, token in enumerate(lowercase__ ):
UpperCAmelCase : Tuple = token.rstrip('''\n''' )
UpperCAmelCase : Optional[int] = index
return vocab
class __UpperCAmelCase ( __UpperCAmelCase ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any], __A : List[str], __A : Tuple="[SEP]", __A : Union[str, Any]="[SEP]", __A : Tuple="[SEP]", __A : Union[str, Any]="[UNK]", __A : Tuple="[PAD]", __A : List[Any]="[CLS]", __A : List[str]="[MASK]", __A : Tuple = None, **__A : Optional[Any], ):
UpperCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__, eos_token=lowerCamelCase__, sep_token=lowerCamelCase__, unk_token=lowerCamelCase__, pad_token=lowerCamelCase__, cls_token=lowerCamelCase__, mask_token=lowerCamelCase__, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase__, )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
UpperCAmelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
UpperCAmelCase : Dict = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(1_0 ):
UpperCAmelCase : Dict = F'''[unused{i}]'''
UpperCAmelCase : Optional[Any] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
UpperCAmelCase : int = 1_2
UpperCAmelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowerCamelCase__ )
def __getstate__( self : Dict ):
UpperCAmelCase : Optional[int] = self.__dict__.copy()
UpperCAmelCase : Dict = None
return state
def __setstate__( self : List[Any], __A : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
UpperCAmelCase : Union[str, Any] = {}
UpperCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self : List[Any], __A : Tuple, __A : str = None, __A : int = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__, token_ids_a=lowerCamelCase__, already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __magic_name__ ( self : Dict, __A : Dict, __A : Any = None ):
UpperCAmelCase : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __magic_name__ ( self : Optional[Any] ):
return len(self.sp_model ) + self.fairseq_offset
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : str = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self : List[Any], __A : Optional[int] ):
return self.sp_model.encode(lowerCamelCase__, out_type=lowerCamelCase__ )
def __magic_name__ ( self : Tuple, __A : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase : List[Any] = self.sp_model.PieceToId(lowerCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ ( self : Optional[Any], __A : str ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ ( self : Union[str, Any], __A : Optional[Any] ):
UpperCAmelCase : Optional[Any] = ''''''.join(lowerCamelCase__ ).replace(lowerCamelCase__, ''' ''' ).strip()
return out_string
def __magic_name__ ( self : Any, __A : Any, __A : Any = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Union[str, Any] = os.path.join(
lowerCamelCase__, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__, '''wb''' ) as fi:
UpperCAmelCase : int = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
def __magic_name__ ( self : str, __A : int, __A : Optional[int] = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
UpperCAmelCase : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 336 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = str(id_ )
lowercase__ = None
lowercase__ = None
lowercase__ = []
lowercase__ = {} # {vertex:distance}
def __lt__( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> Optional[Any]:
'''simple docstring'''
return self.id
def A__ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
self.neighbors.append(lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__ = weight
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase__ )
graph[b - 1].add_edge(graph[a - 1] , lowercase__ )
def _A ( lowercase__ , lowercase__ ):
lowercase__ = []
for u in graph:
lowercase__ = math.inf
lowercase__ = None
lowercase__ = 0
lowercase__ = graph[:]
while q:
lowercase__ = min(lowercase__ )
q.remove(lowercase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase__ = u
lowercase__ = u.edges[v.id]
for i in range(1 , len(lowercase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _A ( lowercase__ , lowercase__ ):
for u in graph:
lowercase__ = math.inf
lowercase__ = None
lowercase__ = 0
lowercase__ = list(lowercase__ )
hq.heapify(lowercase__ )
while h:
lowercase__ = hq.heappop(lowercase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase__ = u
lowercase__ = u.edges[v.id]
hq.heapify(lowercase__ )
for i in range(1 , len(lowercase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _A ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 0 |
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 359 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _a ( a :Union[str, Any] , a :List[Any] ) -> List[Any]:
a = checkpoint
a = {}
a = vae_state_dict['''encoder.conv_in.weight''']
a = vae_state_dict['''encoder.conv_in.bias''']
a = vae_state_dict['''encoder.conv_out.weight''']
a = vae_state_dict['''encoder.conv_out.bias''']
a = vae_state_dict['''encoder.norm_out.weight''']
a = vae_state_dict['''encoder.norm_out.bias''']
a = vae_state_dict['''decoder.conv_in.weight''']
a = vae_state_dict['''decoder.conv_in.bias''']
a = vae_state_dict['''decoder.conv_out.weight''']
a = vae_state_dict['''decoder.conv_out.bias''']
a = vae_state_dict['''decoder.norm_out.weight''']
a = vae_state_dict['''decoder.norm_out.bias''']
a = vae_state_dict['''quant_conv.weight''']
a = vae_state_dict['''quant_conv.bias''']
a = vae_state_dict['''post_quant_conv.weight''']
a = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
a = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
a = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(a )
}
# Retrieves the keys for the decoder up blocks only
a = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
a = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(a )
}
for i in range(a ):
a = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
a = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
a = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""down.{i}.block""", '''new''': F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
a = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
a = renew_vae_attention_paths(a )
a = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
for i in range(a ):
a = num_up_blocks - 1 - i
a = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
a = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
a = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""up.{block_id}.block""", '''new''': F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
a = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
a = renew_vae_attention_paths(a )
a = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
return new_checkpoint
def _a ( a :str , a :str , ) -> List[str]:
# Only support V1
a = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
a = io.BytesIO(r.content )
a = OmegaConf.load(a )
a = 512
a = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
a = {}
with safe_open(a , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
a = f.get_tensor(a )
else:
a = torch.load(a , map_location=a )['''state_dict''']
# Convert the VAE model.
a = create_vae_diffusers_config(a , image_size=a )
a = custom_convert_ldm_vae_checkpoint(a , a )
a = AutoencoderKL(**a )
vae.load_state_dict(a )
vae.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
UpperCAmelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 26 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''llama'''
__SCREAMING_SNAKE_CASE : str = ['''past_key_values''']
def __init__( self , snake_case=3_2000 , snake_case=4096 , snake_case=1_1008 , snake_case=32 , snake_case=32 , snake_case=None , snake_case="silu" , snake_case=2048 , snake_case=0.02 , snake_case=1e-6 , snake_case=True , snake_case=0 , snake_case=1 , snake_case=2 , snake_case=1 , snake_case=False , snake_case=None , **snake_case , ):
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = intermediate_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
snake_case_ = num_attention_heads
snake_case_ = num_key_value_heads
snake_case_ = hidden_act
snake_case_ = initializer_range
snake_case_ = rms_norm_eps
snake_case_ = pretraining_tp
snake_case_ = use_cache
snake_case_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , tie_word_embeddings=snake_case , **snake_case , )
def a ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'''got {self.rope_scaling}''' )
snake_case_ = self.rope_scaling.get('type' , snake_case )
snake_case_ = self.rope_scaling.get('factor' , snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(snake_case , snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 285 |
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 1 |
import fire
from utils import calculate_rouge, save_json
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ) -> Any:
UpperCamelCase_: Any = [x.strip() for x in open(lowerCamelCase ).readlines()]
UpperCamelCase_: Tuple = [x.strip() for x in open(lowerCamelCase ).readlines()][: len(lowerCamelCase )]
UpperCamelCase_: Optional[Any] = calculate_rouge(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
if save_path is not None:
save_json(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 223 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : int = """ctrl"""
__UpperCamelCase : Dict = ["""past_key_values"""]
__UpperCamelCase : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Dict , snake_case_ : Any=24_6534 , snake_case_ : Dict=256 , snake_case_ : str=1280 , snake_case_ : Optional[int]=8192 , snake_case_ : Union[str, Any]=48 , snake_case_ : Any=16 , snake_case_ : Optional[int]=0.1 , snake_case_ : Any=0.1 , snake_case_ : Any=1e-6 , snake_case_ : Optional[Any]=0.02 , snake_case_ : Optional[int]=True , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Union[str, Any] = vocab_size
UpperCamelCase_: Union[str, Any] = n_positions
UpperCamelCase_: Optional[int] = n_embd
UpperCamelCase_: int = n_layer
UpperCamelCase_: str = n_head
UpperCamelCase_: Optional[int] = dff
UpperCamelCase_: Optional[Any] = resid_pdrop
UpperCamelCase_: Union[str, Any] = embd_pdrop
UpperCamelCase_: List[str] = layer_norm_epsilon
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Optional[Any] = use_cache
super().__init__(**snake_case_ )
| 223 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: List[str] = OpenAIGPTTokenizer
lowerCamelCase__: str = OpenAIGPTTokenizerFast
lowerCamelCase__: Dict = True
lowerCamelCase__: List[Any] = False
def _lowerCamelCase ( self: Any ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
__UpperCAmelCase : List[str] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : Optional[Any] = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
__UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: List[Any] ) -> Tuple:
return "lower newer", "lower newer"
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : Optional[Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__UpperCAmelCase : Any = "lower"
__UpperCAmelCase : List[Any] = ["low", "er</w>"]
__UpperCAmelCase : int = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Tuple = tokens + ["<unk>"]
__UpperCAmelCase : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any]=15 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# Simple input
__UpperCAmelCase : int = "This is a simple input"
__UpperCAmelCase : str = ["This is a simple input 1", "This is a simple input 2"]
__UpperCAmelCase : int = ("This is a simple input", "This is a pair")
__UpperCAmelCase : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , )
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[Any]:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _snake_case ( _lowercase ):
pass
| 157 | import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Tuple=None , **__lowerCamelCase: Union[str, Any] ) -> Dict:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
__UpperCAmelCase : Union[str, Any] = model
__UpperCAmelCase : Optional[Any] = kwargs.get("model_save_dir" , __lowerCamelCase )
__UpperCAmelCase : str = kwargs.get("latest_model_name" , __lowerCamelCase )
def __call__( self: int , **__lowerCamelCase: Optional[Any] ) -> int:
__UpperCAmelCase : Optional[Any] = {k: np.array(__lowerCamelCase ) for k, v in kwargs.items()}
return self.model.run(__lowerCamelCase , __lowerCamelCase )
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Tuple=None ) -> List[str]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
__UpperCAmelCase : Any = "CPUExecutionProvider"
return ort.InferenceSession(__lowerCamelCase , providers=[provider] , sess_options=__lowerCamelCase )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Optional[str] = None , **__lowerCamelCase: Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__UpperCAmelCase : str = self.model_save_dir.joinpath(self.latest_model_name )
__UpperCAmelCase : Any = Path(__lowerCamelCase ).joinpath(__lowerCamelCase )
try:
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__UpperCAmelCase : str = self.model_save_dir.joinpath(__lowerCamelCase )
if src_path.exists():
__UpperCAmelCase : List[str] = Path(__lowerCamelCase ).joinpath(__lowerCamelCase )
try:
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
except shutil.SameFileError:
pass
def _lowerCamelCase ( self: Any , __lowerCamelCase: Union[str, os.PathLike] , **__lowerCamelCase: Any , ) -> List[Any]:
if os.path.isfile(__lowerCamelCase ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
# saving model weights/files
self._save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Optional[Any] , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Optional[Union[bool, str, None]] = None , __lowerCamelCase: Optional[Union[str, None]] = None , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional["ort.SessionOptions"] = None , **__lowerCamelCase: Union[str, Any] , ) -> Optional[Any]:
__UpperCAmelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__lowerCamelCase ):
__UpperCAmelCase : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(__lowerCamelCase , __lowerCamelCase ) , provider=__lowerCamelCase , sess_options=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = Path(__lowerCamelCase )
# load model from hub
else:
# download model
__UpperCAmelCase : Optional[Any] = hf_hub_download(
repo_id=__lowerCamelCase , filename=__lowerCamelCase , use_auth_token=__lowerCamelCase , revision=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , )
__UpperCAmelCase : Any = Path(__lowerCamelCase ).parent
__UpperCAmelCase : List[Any] = Path(__lowerCamelCase ).name
__UpperCAmelCase : Dict = OnnxRuntimeModel.load_model(__lowerCamelCase , provider=__lowerCamelCase , sess_options=__lowerCamelCase )
return cls(model=__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , **__lowerCamelCase: Tuple , ) -> Optional[Any]:
__UpperCAmelCase : int = None
if len(str(__lowerCamelCase ).split("@" ) ) == 2:
__UpperCAmelCase , __UpperCAmelCase : Any = model_id.split("@" )
return cls._from_pretrained(
model_id=__lowerCamelCase , revision=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , use_auth_token=__lowerCamelCase , **__lowerCamelCase , )
| 157 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_lowercase : str = (3, 9, -1_1, 0, 7, 5, 1, -1)
_lowercase : Union[str, Any] = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : Iterable[int] )-> None:
lowerCamelCase__ : Node | None =None
for i in sorted(lowerCamelCase, reverse=lowerCamelCase ):
lowerCamelCase__ : Dict =Node(lowerCamelCase, self.head )
def __iter__( self : int )-> Iterator[int]:
lowerCamelCase__ : int =self.head
while node:
yield node.data
lowerCamelCase__ : Optional[int] =node.next_node
def __len__( self : Optional[int] )-> int:
return sum(1 for _ in self )
def __str__( self : List[Any] )-> str:
return " -> ".join([str(lowerCamelCase ) for node in self] )
def snake_case__ ( __lowerCamelCase : SortedLinkedList , __lowerCamelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(__lowerCamelCase ) + list(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[int] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 272 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'SpeechT5FeatureExtractor'
_a = 'SpeechT5Tokenizer'
def __init__( self : Dict, lowerCamelCase : Optional[int], lowerCamelCase : str )-> Any:
super().__init__(lowerCamelCase, lowerCamelCase )
def __call__( self : Tuple, *lowerCamelCase : List[str], **lowerCamelCase : Optional[int] )-> List[str]:
lowerCamelCase__ : List[Any] =kwargs.pop('''audio''', lowerCamelCase )
lowerCamelCase__ : List[str] =kwargs.pop('''text''', lowerCamelCase )
lowerCamelCase__ : int =kwargs.pop('''text_target''', lowerCamelCase )
lowerCamelCase__ : Dict =kwargs.pop('''audio_target''', lowerCamelCase )
lowerCamelCase__ : Any =kwargs.pop('''sampling_rate''', lowerCamelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] =self.feature_extractor(lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase )
elif text is not None:
lowerCamelCase__ : List[Any] =self.tokenizer(lowerCamelCase, **lowerCamelCase )
else:
lowerCamelCase__ : Any =None
if audio_target is not None:
lowerCamelCase__ : List[str] =self.feature_extractor(audio_target=lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Tuple =targets['''input_values''']
elif text_target is not None:
lowerCamelCase__ : Dict =self.tokenizer(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : int =targets['''input_ids''']
else:
lowerCamelCase__ : List[str] =None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ : Dict =labels
lowerCamelCase__ : Any =targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase__ : Dict =decoder_attention_mask
return inputs
def snake_case ( self : int, *lowerCamelCase : Optional[Any], **lowerCamelCase : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =kwargs.pop('''input_values''', lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =kwargs.pop('''input_ids''', lowerCamelCase )
lowerCamelCase__ : Optional[Any] =kwargs.pop('''labels''', lowerCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowerCamelCase__ : List[str] =self.feature_extractor.pad(lowerCamelCase, *lowerCamelCase, **lowerCamelCase )
elif input_ids is not None:
lowerCamelCase__ : Tuple =self.tokenizer.pad(lowerCamelCase, **lowerCamelCase )
else:
lowerCamelCase__ : Any =None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCamelCase, lowerCamelCase ) and "input_ids" in labels[0]):
lowerCamelCase__ : str =self.tokenizer.pad(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[Any] =targets['''input_ids''']
else:
lowerCamelCase__ : Any =self.feature_extractor.feature_size
lowerCamelCase__ : Optional[Any] =self.feature_extractor.num_mel_bins
lowerCamelCase__ : Optional[int] =self.feature_extractor.pad(lowerCamelCase, *lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[Any] =feature_size_hack
lowerCamelCase__ : Tuple =targets['''input_values''']
else:
lowerCamelCase__ : Optional[Any] =None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ : Tuple =labels
lowerCamelCase__ : Optional[int] =targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase__ : Optional[Any] =decoder_attention_mask
return inputs
def snake_case ( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : List[Any] )-> List[Any]:
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : List[str], *lowerCamelCase : List[Any], **lowerCamelCase : Tuple )-> int:
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
| 272 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCAmelCase : List[Any] = None
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase : str = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : Optional[int] = TaTokenizer
_UpperCAmelCase : List[int] = []
def __init__( self : Any , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : List[Any]="<unk>" , lowerCAmelCase__ : str="<pad>" , lowerCAmelCase__ : List[Any]=100 , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Tuple , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE_: Optional[int] = [F"<extra_id_{i}>" for i in range(lowerCAmelCase__)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE_: List[str] = len(set(filter(lambda lowerCAmelCase__: bool("extra_id_" in str(lowerCAmelCase__)) , lowerCAmelCase__)))
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , extra_ids=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Dict = vocab_file
SCREAMING_SNAKE_CASE_: Dict = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE_: List[str] = extra_ids
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE_: List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowerCAmelCase__ , )
return max_model_length
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Tuple = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file , lowerCAmelCase__)
logger.info(F"Copy vocab file to {out_vocab_file}")
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE_: Optional[int] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: List[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return list(
set(filter(lambda lowerCAmelCase__: bool(re.search(R"<extra_id_\d+>" , lowerCAmelCase__)) is not None , self.additional_special_tokens)))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
return [self.convert_tokens_to_ids(lowerCAmelCase__) for token in self.get_sentinel_tokens()]
| 13 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
UpperCamelCase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
UpperCamelCase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE( ) -> Dict:
A: Dict = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
A: Union[str, Any] = bs[:]
A: List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
A: List[Any] = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: Optional[Any] = set()
A: Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A: List[Any] = char
return pairs
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""]
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="replace" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[str]:
'''simple docstring'''
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
A: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
A: str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
A: str = json.load(SCREAMING_SNAKE_CASE_ )
A: str = {v: k for k, v in self.encoder.items()}
A: Union[str, Any] = errors # how to handle errors in decoding
A: Optional[int] = bytes_to_unicode()
A: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
A: int = merges_handle.read().split('''\n''' )[1:-1]
A: str = [tuple(merge.split() ) for merge in bpe_merges]
A: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Union[str, Any] = {}
A: Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A: Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A: str = tuple(SCREAMING_SNAKE_CASE_ )
A: str = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
A: Dict = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
A , A: Optional[Any] = bigram
A: Tuple = []
A: List[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
A: Union[str, Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A: int = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ )
A: Any = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
A: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
A: str = ''' '''.join(SCREAMING_SNAKE_CASE_ )
A: str = word
return word
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A: Dict = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
A: Tuple = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) )
return bpe_tokens
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE_ )
A: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A: Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
A: int = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
A: Any = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
A: Union[str, Any] = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A: int = [self.cls_token_id]
A: str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A: Dict = [self.sep_token_id]
A: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
'''simple docstring'''
A: Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
A: List[Any] = ''' ''' + text
return (text, kwargs)
| 319 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : str = '▁'
UpperCAmelCase_ : Dict = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : str = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
UpperCAmelCase_ : List[str] = {
'google/pegasus-xsum': 512,
}
UpperCAmelCase_ : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[Any] = VOCAB_FILES_NAMES
snake_case__ : Tuple = VOCAB_FILES_NAMES
snake_case__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<mask_2>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<mask_1>" , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int=1_0_3 , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> None:
a_ : List[str] = offset
if additional_special_tokens is not None:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError(
F"""additional_special_tokens should be of type {type(SCREAMING_SNAKE_CASE__ )}, but is"""
F""" {type(SCREAMING_SNAKE_CASE__ )}""" )
a_ : int = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(SCREAMING_SNAKE_CASE__ ) , self.offset - 1 )
]
if len(set(SCREAMING_SNAKE_CASE__ ) ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
a_ : Dict = additional_special_tokens_extended
else:
a_ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
a_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token_sent=SCREAMING_SNAKE_CASE__ , offset=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
a_ : Optional[Any] = mask_token_sent
a_ : Optional[int] = vocab_file
a_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
# add special tokens to encoder dict
a_ : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
a_ : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return len(self.sp_model ) + self.offset
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict[str, int]:
a_ : Tuple = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> Tuple:
a_ : int = self.__dict__.copy()
a_ : Optional[Any] = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
a_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a_ : List[Any] = {}
a_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
a_ : str = self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
return sp_id + self.offset
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
a_ : Dict = self.sp_model.IdToPiece(index - self.offset )
return token
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
a_ : str = []
a_ : Tuple = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
a_ : int = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : int=False ) -> Any:
return 1
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
a_ : Union[str, Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : List , SCREAMING_SNAKE_CASE__ : Optional[List] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(SCREAMING_SNAKE_CASE__ )
elif token_ids_a is None:
return self._special_token_mask(SCREAMING_SNAKE_CASE__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
a_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 371 |
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 27 |
import re
from filelock import FileLock
try:
import nltk
_snake_case : Any = True
except (ImportError, ModuleNotFoundError):
_snake_case : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def a_ ( lowerCAmelCase_ : str ):
re.sub('<n>', '', lowerCAmelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase_ ) )
| 284 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : Dict , _A : Union[str, Any] , _A : str=7 , _A : Any=3 , _A : Optional[int]=18 , _A : Optional[int]=30 , _A : Optional[Any]=400 , _A : Optional[Any]=True , _A : List[str]=None , _A : List[Any]=True , _A : Dict=None , _A : Optional[Any]=True , ) -> Any:
"""simple docstring"""
lowercase : Union[str, Any] = size if size is not None else {'''shortest_edge''': 20}
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase : List[Any] = parent
lowercase : Union[str, Any] = batch_size
lowercase : Union[str, Any] = num_channels
lowercase : Optional[Any] = image_size
lowercase : Dict = min_resolution
lowercase : Tuple = max_resolution
lowercase : Optional[int] = do_resize
lowercase : int = size
lowercase : int = do_center_crop
lowercase : str = crop_size
lowercase : Tuple = do_flip_channel_order
def __a ( self : Any ) -> int:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Dict = MobileViTImageProcessor if is_vision_available() else None
def __a ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase : List[Any] = MobileViTImageProcessingTester(self )
@property
def __a ( self : List[Any] ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''center_crop''' ) )
self.assertTrue(hasattr(_A , '''do_flip_channel_order''' ) )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowercase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def __a ( self : int ) -> str:
"""simple docstring"""
lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
lowercase : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase : Tuple = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
lowercase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase : Optional[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 116 |
from string import ascii_uppercase
lowerCAmelCase_ = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase_ = dict(enumerate(ascii_uppercase))
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Optional[Any] = len(__magic_name__ )
lowercase : Any = 0
while True:
if x == i:
lowercase : Any = 0
if len(__magic_name__ ) == len(__magic_name__ ):
break
key += key[i]
i += 1
return key
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : str = ''''''
lowercase : Dict = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowercase : Dict = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Any = ''''''
lowercase : str = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowercase : Any = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def snake_case( ) -> None:
'''simple docstring'''
lowercase : Dict = '''THE GERMAN ATTACK'''
lowercase : Dict = '''SECRET'''
lowercase : Union[str, Any] = generate_key(__magic_name__ , __magic_name__ )
lowercase : List[str] = cipher_text(__magic_name__ , __magic_name__ )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(__magic_name__ , __magic_name__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 116 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 212 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list:
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase__ : Optional[Any] = []
def generate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowerCAmelCase__ , lowerCAmelCase__ : str = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
generate(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return res
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr)) | 212 | 1 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a( UpperCamelCase__ : str, UpperCamelCase__ : List[str], UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =0
if start < end:
SCREAMING_SNAKE_CASE__ : List[Any] =randint(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =a[end]
SCREAMING_SNAKE_CASE__ : Dict =a[pivot]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =temp
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =_in_place_partition(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
count += _in_place_quick_sort(UpperCamelCase__, UpperCamelCase__, p - 1 )
count += _in_place_quick_sort(UpperCamelCase__, p + 1, UpperCamelCase__ )
return count
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =0
SCREAMING_SNAKE_CASE__ : Optional[int] =randint(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =a[end]
SCREAMING_SNAKE_CASE__ : Tuple =a[pivot]
SCREAMING_SNAKE_CASE__ : Any =temp
SCREAMING_SNAKE_CASE__ : List[str] =start - 1
for index in range(UpperCamelCase__, UpperCamelCase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
SCREAMING_SNAKE_CASE__ : Dict =new_pivot_index + 1
SCREAMING_SNAKE_CASE__ : List[Any] =a[new_pivot_index]
SCREAMING_SNAKE_CASE__ : Dict =a[index]
SCREAMING_SNAKE_CASE__ : Optional[int] =temp
SCREAMING_SNAKE_CASE__ : List[str] =a[new_pivot_index + 1]
SCREAMING_SNAKE_CASE__ : List[Any] =a[end]
SCREAMING_SNAKE_CASE__ : Optional[int] =temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 1_0_0 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 222 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = StableDiffusionSAGPipeline
snake_case_ = TEXT_TO_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ = False
def __magic_name__ ( self : str ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =CLIPTextModel(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__ : List[Any] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__ ( self : int , __lowercase : Union[str, Any] , __lowercase : Any=0 ) -> Optional[Any]:
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict ={
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__ ( self : int ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any =StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =sag_pipe.to(__lowercase )
sag_pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] ='''.'''
SCREAMING_SNAKE_CASE__ : Tuple =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] =sag_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ : int =output.images
SCREAMING_SNAKE_CASE__ : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : str =np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __magic_name__ ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE__ : List[Any] =sag_pipe.to(__lowercase )
sag_pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] ='''.'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =sag_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ : Tuple =output.images
SCREAMING_SNAKE_CASE__ : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =sag_pipe.to(__lowercase )
sag_pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] ='''.'''
SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : Any =output.images
assert image.shape == (1, 5_12, 7_68, 3) | 222 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase):
@slow
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
_UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_UpperCAmelCase : List[Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
_UpperCAmelCase : str = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
_UpperCAmelCase : Dict = model(_A , labels=_A ).loss
_UpperCAmelCase : Tuple = -tf.math.reduce_mean(_A ).numpy()
_UpperCAmelCase : str = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 246 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase__ : str = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase__ : Union[str, Any] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCamelCase ( _lowerCAmelCase : List[str] ) -> Optional[Any]:
_UpperCAmelCase : str = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ), dtype=_lowerCAmelCase )[0]
@deprecated(_lowerCAmelCase, """Please use tf.data to implement this functionality.""" )
def UpperCamelCase ( _lowerCAmelCase : int ) -> Optional[Any]:
print("""Extracting""", f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
_UpperCAmelCase : Tuple = _readaa(_lowerCAmelCase )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
_UpperCAmelCase : Optional[int] = _readaa(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = _readaa(_lowerCAmelCase )
_UpperCAmelCase : List[str] = _readaa(_lowerCAmelCase )
_UpperCAmelCase : List[str] = bytestream.read(rows * cols * num_images )
_UpperCAmelCase : Optional[Any] = numpy.frombuffer(_lowerCAmelCase, dtype=numpy.uinta )
_UpperCAmelCase : Any = data.reshape(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, 1 )
return data
@deprecated(_lowerCAmelCase, """Please use tf.one_hot on tensors.""" )
def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple ) -> Union[str, Any]:
_UpperCAmelCase : int = labels_dense.shape[0]
_UpperCAmelCase : Any = numpy.arange(_lowerCAmelCase ) * num_classes
_UpperCAmelCase : Tuple = numpy.zeros((num_labels, num_classes) )
_UpperCAmelCase : Dict = 1
return labels_one_hot
@deprecated(_lowerCAmelCase, """Please use tf.data to implement this functionality.""" )
def UpperCamelCase ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Optional[int]=False, _lowerCAmelCase : Optional[Any]=10 ) -> Union[str, Any]:
print("""Extracting""", f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
_UpperCAmelCase : Tuple = _readaa(_lowerCAmelCase )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
_UpperCAmelCase : str = _readaa(_lowerCAmelCase )
_UpperCAmelCase : Dict = bytestream.read(_lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = numpy.frombuffer(_lowerCAmelCase, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_lowerCAmelCase, _lowerCAmelCase )
return labels
class _UpperCAmelCase :
@deprecated(
_A , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self , _A , _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=None , ) -> str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int = random_seed.get_seed(_A )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_UpperCAmelCase : Tuple = dtypes.as_dtype(_A ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
_UpperCAmelCase : Union[str, Any] = 1_00_00
_UpperCAmelCase : Union[str, Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
_UpperCAmelCase : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_UpperCAmelCase : int = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_UpperCAmelCase : Dict = images.astype(numpy.floataa )
_UpperCAmelCase : Any = numpy.multiply(_A , 1.0 / 255.0 )
_UpperCAmelCase : Union[str, Any] = images
_UpperCAmelCase : List[Any] = labels
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Optional[Any] = 0
@property
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return self._images
@property
def __snake_case ( self ) -> Any:
'''simple docstring'''
return self._labels
@property
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
return self._num_examples
@property
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return self._epochs_completed
def __snake_case ( self , _A , _A=False , _A=True ) -> Tuple:
'''simple docstring'''
if fake_data:
_UpperCAmelCase : int = [1] * 7_84
_UpperCAmelCase : str = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_A )],
[fake_label for _ in range(_A )],
)
_UpperCAmelCase : Tuple = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_UpperCAmelCase : str = numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
_UpperCAmelCase : List[Any] = self.images[perma]
_UpperCAmelCase : Union[str, Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_UpperCAmelCase : List[Any] = self._num_examples - start
_UpperCAmelCase : str = self._images[start : self._num_examples]
_UpperCAmelCase : List[str] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_UpperCAmelCase : str = numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
_UpperCAmelCase : Optional[int] = self.images[perm]
_UpperCAmelCase : str = self.labels[perm]
# Start next epoch
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Tuple = batch_size - rest_num_examples
_UpperCAmelCase : Union[str, Any] = self._index_in_epoch
_UpperCAmelCase : Optional[int] = self._images[start:end]
_UpperCAmelCase : str = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_UpperCAmelCase : List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowerCAmelCase, """Please write your own downloading logic.""" )
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
if not gfile.Exists(_lowerCAmelCase ):
gfile.MakeDirs(_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = os.path.join(_lowerCAmelCase, _lowerCAmelCase )
if not gfile.Exists(_lowerCAmelCase ):
urllib.request.urlretrieve(_lowerCAmelCase, _lowerCAmelCase ) # noqa: S310
with gfile.GFile(_lowerCAmelCase ) as f:
_UpperCAmelCase : Optional[int] = f.size()
print("""Successfully downloaded""", _lowerCAmelCase, _lowerCAmelCase, """bytes.""" )
return filepath
@deprecated(
_lowerCAmelCase, """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : str=False, _lowerCAmelCase : List[str]=False, _lowerCAmelCase : Tuple=dtypes.floataa, _lowerCAmelCase : List[str]=True, _lowerCAmelCase : Union[str, Any]=5000, _lowerCAmelCase : Optional[Any]=None, _lowerCAmelCase : int=DEFAULT_SOURCE_URL, ) -> Optional[Any]:
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=_lowerCAmelCase, one_hot=_lowerCAmelCase, dtype=_lowerCAmelCase, seed=_lowerCAmelCase )
_UpperCAmelCase : List[Any] = fake()
_UpperCAmelCase : int = fake()
_UpperCAmelCase : Any = fake()
return _Datasets(train=_lowerCAmelCase, validation=_lowerCAmelCase, test=_lowerCAmelCase )
if not source_url: # empty string check
_UpperCAmelCase : Optional[Any] = DEFAULT_SOURCE_URL
_UpperCAmelCase : Optional[int] = """train-images-idx3-ubyte.gz"""
_UpperCAmelCase : int = """train-labels-idx1-ubyte.gz"""
_UpperCAmelCase : Optional[Any] = """t10k-images-idx3-ubyte.gz"""
_UpperCAmelCase : Tuple = """t10k-labels-idx1-ubyte.gz"""
_UpperCAmelCase : Tuple = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + train_images_file )
with gfile.Open(_lowerCAmelCase, """rb""" ) as f:
_UpperCAmelCase : Optional[int] = _extract_images(_lowerCAmelCase )
_UpperCAmelCase : Any = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + train_labels_file )
with gfile.Open(_lowerCAmelCase, """rb""" ) as f:
_UpperCAmelCase : Optional[int] = _extract_labels(_lowerCAmelCase, one_hot=_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + test_images_file )
with gfile.Open(_lowerCAmelCase, """rb""" ) as f:
_UpperCAmelCase : Union[str, Any] = _extract_images(_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + test_labels_file )
with gfile.Open(_lowerCAmelCase, """rb""" ) as f:
_UpperCAmelCase : List[Any] = _extract_labels(_lowerCAmelCase, one_hot=_lowerCAmelCase )
if not 0 <= validation_size <= len(_lowerCAmelCase ):
_UpperCAmelCase : int = (
"""Validation size should be between 0 and """
f'''{len(_lowerCAmelCase )}. Received: {validation_size}.'''
)
raise ValueError(_lowerCAmelCase )
_UpperCAmelCase : str = train_images[:validation_size]
_UpperCAmelCase : Union[str, Any] = train_labels[:validation_size]
_UpperCAmelCase : Optional[Any] = train_images[validation_size:]
_UpperCAmelCase : Optional[int] = train_labels[validation_size:]
_UpperCAmelCase : Optional[int] = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
_UpperCAmelCase : Tuple = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase )
_UpperCAmelCase : Dict = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase )
_UpperCAmelCase : List[Any] = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase )
return _Datasets(train=_lowerCAmelCase, validation=_lowerCAmelCase, test=_lowerCAmelCase )
| 246 | 1 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase_ ( _lowerCamelCase ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase_ ( ):
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase__ : Dict = [1, 2, 3]
with pytest.raises(_lowerCamelCase ):
with parallel_backend('unsupported backend' ):
map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=2 )
with pytest.raises(_lowerCamelCase ):
with parallel_backend('unsupported backend' ):
map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : str = [1, 2]
lowerCamelCase__ : Tuple = {'a': 1, 'b': 2}
lowerCamelCase__ : List[Any] = {'a': [1, 2], 'b': [3, 4]}
lowerCamelCase__ : Optional[int] = {'a': {'1': 1}, 'b': 2}
lowerCamelCase__ : List[Any] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
lowerCamelCase__ : List[str] = [2, 3]
lowerCamelCase__ : List[Any] = {'a': 2, 'b': 3}
lowerCamelCase__ : Optional[Any] = {'a': [2, 3], 'b': [4, 5]}
lowerCamelCase__ : Any = {'a': {'1': 2}, 'b': 3}
lowerCamelCase__ : List[Any] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
| 363 |
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316 | 0 |