code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Dict = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 133 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=True , snake_case_="pt" ):
'''simple docstring'''
_UpperCAmelCase = {"add_prefix_space": True} if isinstance(snake_case_ , snake_case_ ) and not line.startswith(" " ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=snake_case_ , padding="max_length" if pad_to_max_length else None , truncation=snake_case_ , return_tensors=snake_case_ , add_special_tokens=snake_case_ , **snake_case_ , )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=None , ):
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(snake_case_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str]="train" , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : List[str]="" , ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(snake_case__ ).joinpath(type_path + ".source" )
_UpperCAmelCase = Path(snake_case__ ).joinpath(type_path + ".target" )
_UpperCAmelCase = self.get_char_lens(self.src_file )
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip("\n" )
_UpperCAmelCase = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip("\n" )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
_UpperCAmelCase = encode_line(snake_case__ , snake_case__ , self.max_source_length , "right" )
_UpperCAmelCase = encode_line(snake_case__ , snake_case__ , self.max_target_length , "right" )
_UpperCAmelCase = source_inputs["input_ids"].squeeze()
_UpperCAmelCase = target_inputs["input_ids"].squeeze()
_UpperCAmelCase = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase ( snake_case__ : Optional[Any] ):
"""simple docstring"""
return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()]
def UpperCamelCase ( self : Any , snake_case__ : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = torch.stack([x["input_ids"] for x in batch] )
_UpperCAmelCase = torch.stack([x["attention_mask"] for x in batch] )
_UpperCAmelCase = torch.stack([x["decoder_input_ids"] for x in batch] )
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(snake_case__ , snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ )
_UpperCAmelCase = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase_ : Dict = getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return list(itertools.chain.from_iterable(snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(snake_case_ , os.path.join(snake_case_ , "git_log.json" ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=4 , **snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "w" ) as f:
json.dump(snake_case_ , snake_case_ , indent=snake_case_ , **snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
with open(snake_case_ ) as f:
return json.load(snake_case_ )
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=snake_case_ )
_UpperCAmelCase = {
"repo_id": str(snake_case_ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return list(map(snake_case_ , snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "wb" ) as f:
return pickle.dump(snake_case_ , snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
def remove_articles(snake_case_ ):
return re.sub(R"\b(a|an|the)\b" , " " , snake_case_ )
def white_space_fix(snake_case_ ):
return " ".join(text.split() )
def remove_punc(snake_case_ ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case_ ) ) ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = normalize_answer(snake_case_ ).split()
_UpperCAmelCase = normalize_answer(snake_case_ ).split()
_UpperCAmelCase = Counter(snake_case_ ) & Counter(snake_case_ )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(snake_case_ )
_UpperCAmelCase = 1.0 * num_same / len(snake_case_ )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return normalize_answer(snake_case_ ) == normalize_answer(snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
assert len(snake_case_ ) == len(snake_case_ )
_UpperCAmelCase = 0
for hypo, pred in zip(snake_case_ , snake_case_ ):
em += exact_match_score(snake_case_ , snake_case_ )
if len(snake_case_ ) > 0:
em /= len(snake_case_ )
return {"em": em}
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = "dropout_rate"
for p in extra_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
if not hasattr(snake_case_ , snake_case_ ) and not hasattr(snake_case_ , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(snake_case_ ) )
delattr(snake_case_ , snake_case_ )
continue
_UpperCAmelCase = p if hasattr(snake_case_ , snake_case_ ) else equivalent_param[p]
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
delattr(snake_case_ , snake_case_ )
return hparams, config
| 133 | 1 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowercase__ =version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : tuple , lowerCAmelCase__ : Path , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict=False , ):
output_path.parent.mkdir(parents=_a , exist_ok=_a )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_a , _a , f=output_path.as_posix() , input_names=_a , output_names=_a , dynamic_axes=_a , do_constant_folding=_a , use_external_data_format=_a , enable_onnx_checker=_a , opset_version=_a , )
else:
export(
_a , _a , f=output_path.as_posix() , input_names=_a , output_names=_a , dynamic_axes=_a , do_constant_folding=_a , opset_version=_a , )
@torch.no_grad()
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False ):
__a : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__a : str = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
__a : Any = '''cpu'''
__a : List[str] = Path(_a )
# VAE DECODER
__a : List[str] = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
__a : Optional[int] = vae_decoder.config.latent_channels
# forward only through the decoder part
__a : int = vae_decoder.decode
onnx_export(
_a , model_args=(
torch.randn(1 , _a , 2_5 , 2_5 ).to(device=_a , dtype=_a ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=_a , )
del vae_decoder
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
lowercase__ =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 361 |
import os
import sys
import unittest
lowercase__ =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase__ =os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase__ =os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : List[Any] ):
__a : str = get_test_to_tester_mapping(snake_case_ )
__a : Tuple = get_test_to_tester_mapping(snake_case_ )
__a : Union[str, Any] = {'''BertModelTest''': '''BertModelTester'''}
__a : Tuple = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
def lowerCAmelCase (self : str ):
__a : Optional[int] = get_model_to_test_mapping(snake_case_ )
__a : Any = get_model_to_test_mapping(snake_case_ )
__a : List[Any] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
__a : Dict = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
def lowerCAmelCase (self : int ):
__a : Any = get_model_to_tester_mapping(snake_case_ )
__a : List[str] = get_model_to_tester_mapping(snake_case_ )
__a : Any = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
__a : int = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
| 90 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase__ : str = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
lowerCAmelCase__ : Union[str, Any] = {
'''169M''': 7_68,
'''430M''': 10_24,
'''1B5''': 20_48,
'''3B''': 25_60,
'''7B''': 40_96,
'''14B''': 51_20,
}
def UpperCamelCase__ ( A__ ) -> List[Any]:
snake_case__ : Tuple = list(state_dict.keys() )
for name in state_dict_keys:
snake_case__ : int = state_dict.pop(A__ )
# emb -> embedding
if name.startswith('emb.' ):
snake_case__ : int = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
snake_case__ : Any = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
snake_case__ : List[Any] = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , A__ )
# ffn -> feed_forward
snake_case__ : str = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , A__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
snake_case__ : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
snake_case__ : Any = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
snake_case__ : str = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
snake_case__ : Optional[int] = 'rwkv.' + name
snake_case__ : Any = weight
return state_dict
def UpperCamelCase__ ( A__ , A__ , A__ , A__=None , A__=None , A__=False , A__=None ) -> Union[str, Any]:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
snake_case__ : int = 5_0277
snake_case__ : List[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
snake_case__ : Union[str, Any] = PreTrainedTokenizerFast(tokenizer_file=A__ )
snake_case__ : List[str] = len(A__ )
tokenizer.save_pretrained(A__ )
# 2. Build the config
snake_case__ : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
snake_case__ : int = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
snake_case__ : Dict = RwkvConfig(
vocab_size=A__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(A__ )
# 3. Download model file then convert state_dict
snake_case__ : List[Any] = hf_hub_download(A__ , A__ )
snake_case__ : int = torch.load(A__ , map_location='cpu' )
snake_case__ : Any = convert_state_dict(A__ )
# 4. Split in shards and save
snake_case__ , snake_case__ : Any = shard_checkpoint(A__ )
for shard_file, shard in shards.items():
torch.save(A__ , os.path.join(A__ , A__ ) )
if index is not None:
snake_case__ : str = os.path.join(A__ , A__ )
# Save the index as well
with open(A__ , 'w' , encoding='utf-8' ) as f:
snake_case__ : Union[str, Any] = json.dumps(A__ , indent=2 , sort_keys=A__ ) + '\n'
f.write(A__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
snake_case__ : Optional[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
snake_case__ : Union[str, Any] = torch.load(os.path.join(A__ , A__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(A__ , A__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
snake_case__ : List[Any] = AutoModelForCausalLM.from_pretrained(A__ )
model.push_to_hub(A__ , max_shard_size='2GB' )
tokenizer.push_to_hub(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
lowerCAmelCase__ : int = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 143 | import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __snake_case ( unittest.TestCase ):
@require_torch
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
snake_case__ : Union[str, Any] = load_dataset('ashraq/esc50' )
snake_case__ : List[Any] = dataset['train']['audio'][-1]['array']
snake_case__ : Tuple = audio_classifier(__UpperCamelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [{'score': 0.5_0_1, 'label': 'Sound of a dog'}, {'score': 0.4_9_9, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def __a ( self ) -> List[str]:
'''simple docstring'''
pass
@slow
@require_torch
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Tuple = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
snake_case__ : Dict = load_dataset('ashraq/esc50' )
snake_case__ : Optional[int] = dataset['train']['audio'][-1]['array']
snake_case__ : Tuple = audio_classifier(__UpperCamelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
] , )
snake_case__ : str = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
[
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
snake_case__ : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
[
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def __a ( self ) -> Any:
'''simple docstring'''
pass
| 143 | 1 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , """num_attention_heads""" ) )
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=16 , __UpperCAmelCase=[1_28, 2_56, 3_84] , __UpperCAmelCase=[4, 6, 8] , __UpperCAmelCase=[2, 3, 4] , __UpperCAmelCase=[16, 16, 16] , __UpperCAmelCase=0 , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) -> List[Any]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =kernel_size
_lowerCAmelCase =stride
_lowerCAmelCase =padding
_lowerCAmelCase =hidden_sizes
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =depths
_lowerCAmelCase =key_dim
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =patch_size
_lowerCAmelCase =attention_ratio
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =initializer_range
_lowerCAmelCase =[
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =num_labels
_lowerCAmelCase =initializer_range
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase =self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ) -> str:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
_lowerCAmelCase =LevitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =(self.image_size, self.image_size)
_lowerCAmelCase , _lowerCAmelCase =image_size[0], image_size[1]
for _ in range(4 ):
_lowerCAmelCase =floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
_lowerCAmelCase =floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =LevitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase =model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =LevitModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def _lowerCAmelCase ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self ) -> List[str]:
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def _lowerCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def _lowerCAmelCase ( self ) -> str:
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def _lowerCAmelCase ( self ) -> List[Any]:
pass
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowerCAmelCase =model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase =model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_lowerCAmelCase =outputs.hidden_states
_lowerCAmelCase =len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
_lowerCAmelCase =(self.model_tester.image_size, self.model_tester.image_size)
_lowerCAmelCase , _lowerCAmelCase =image_size[0], image_size[1]
for _ in range(4 ):
_lowerCAmelCase =floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
_lowerCAmelCase =floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase =True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCAmelCase ( self ) -> str:
pass
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[Any]:
_lowerCAmelCase =super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Any:
if not self.model_tester.is_training:
return
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_lowerCAmelCase =model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
_lowerCAmelCase =self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
_lowerCAmelCase =model(**__UpperCAmelCase ).loss
loss.backward()
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCAmelCase =False
_lowerCAmelCase =True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_lowerCAmelCase =model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
_lowerCAmelCase =self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
_lowerCAmelCase =model(**__UpperCAmelCase ).loss
loss.backward()
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =[
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ):
_lowerCAmelCase =problem_type["""title"""]
_lowerCAmelCase =problem_type["""num_labels"""]
_lowerCAmelCase =model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
_lowerCAmelCase =self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if problem_type["num_labels"] > 1:
_lowerCAmelCase =inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
_lowerCAmelCase =inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list:
_lowerCAmelCase =model(**__UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _lowerCAmelCase ( self ) -> str:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =LevitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def _lowerCamelCase() -> Dict:
_lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ) -> Optional[Any]:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCAmelCase )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**__UpperCAmelCase )
# verify the logits
_lowerCAmelCase =torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_lowerCAmelCase =torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['PerceiverFeatureExtractor']
__A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
snake_case_ = len(UpperCAmelCase )
while cur > 1:
# Find the maximum number in arr
snake_case_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
snake_case_ = arr[mi::-1] + arr[mi + 1 : len(UpperCAmelCase )]
# Reverse whole list
snake_case_ = arr[cur - 1 :: -1] + arr[cur : len(UpperCAmelCase )]
cur -= 1
return arr
if __name__ == "__main__":
__UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 69 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Any = logging.get_logger(__name__)
A__: List[str] = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """pegasus"""
UpperCamelCase__ = ["""past_key_values"""]
UpperCamelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: List[str] , __lowerCamelCase: Dict=5_0265 , __lowerCamelCase: int=1024 , __lowerCamelCase: Dict=12 , __lowerCamelCase: Dict=4096 , __lowerCamelCase: str=16 , __lowerCamelCase: List[Any]=12 , __lowerCamelCase: int=4096 , __lowerCamelCase: Optional[Any]=16 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: int=0.0 , __lowerCamelCase: List[str]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: List[Any]="gelu" , __lowerCamelCase: List[Any]=1024 , __lowerCamelCase: int=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Any=0.02 , __lowerCamelCase: Union[str, Any]=0 , __lowerCamelCase: str=False , __lowerCamelCase: Optional[int]=0 , __lowerCamelCase: Optional[Any]=1 , __lowerCamelCase: Optional[int]=1 , **__lowerCamelCase: Union[str, Any] , ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = vocab_size
UpperCamelCase__: List[Any] = max_position_embeddings
UpperCamelCase__: Tuple = d_model
UpperCamelCase__: str = encoder_ffn_dim
UpperCamelCase__: Optional[int] = encoder_layers
UpperCamelCase__: List[Any] = encoder_attention_heads
UpperCamelCase__: Tuple = decoder_ffn_dim
UpperCamelCase__: int = decoder_layers
UpperCamelCase__: List[str] = decoder_attention_heads
UpperCamelCase__: int = dropout
UpperCamelCase__: List[str] = attention_dropout
UpperCamelCase__: Tuple = activation_dropout
UpperCamelCase__: Optional[int] = activation_function
UpperCamelCase__: Dict = init_std
UpperCamelCase__: Optional[Any] = encoder_layerdrop
UpperCamelCase__: Any = decoder_layerdrop
UpperCamelCase__: Optional[int] = use_cache
UpperCamelCase__: Optional[int] = encoder_layers
UpperCamelCase__: List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
@property
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
return self.d_model
| 149 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
a_ = pytest.mark.integration
@require_faiss
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(snake_case ) for x in np.arange(3_0 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
"""simple docstring"""
import faiss
UpperCamelCase_ : Dataset = self._create_dummy_dataset()
UpperCamelCase_ : Optional[Any] = dset.map(
lambda snake_case , snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=snake_case , keep_in_memory=snake_case )
UpperCamelCase_ : List[Any] = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase_, UpperCamelCase_ : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
import faiss
UpperCamelCase_ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
"""simple docstring"""
import faiss
UpperCamelCase_ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=snake_case ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase_, UpperCamelCase_ : Tuple = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
"""simple docstring"""
from elasticsearch import Elasticsearch
UpperCamelCase_ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
UpperCamelCase_ : Any = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
UpperCamelCase_ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
UpperCamelCase_ : List[Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=snake_case )
UpperCamelCase_, UpperCamelCase_ : int = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
"""simple docstring"""
import faiss
UpperCamelCase_ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
UpperCamelCase_ : List[str] = np.zeros(5 , dtype=np.floataa )
UpperCamelCase_ : int = 1
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = index.search(snake_case )
self.assertRaises(snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
UpperCamelCase_ : Any = np.eye(5 , dtype=np.floataa )[::-1]
UpperCamelCase_, UpperCamelCase_ : Optional[Any] = index.search_batch(snake_case )
self.assertRaises(snake_case , index.search_batch , queries[0] )
UpperCamelCase_ : Optional[int] = [scores[0] for scores in total_scores]
UpperCamelCase_ : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
import faiss
UpperCamelCase_ : Tuple = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
UpperCamelCase_ : int = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(snake_case ):
UpperCamelCase_ : Union[str, Any] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
import faiss
UpperCamelCase_ : Any = faiss.IndexFlat(5 )
UpperCamelCase_ : int = FaissIndex(custom_index=snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
"""simple docstring"""
import faiss
UpperCamelCase_ : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=snake_case ) as tmp_file:
index.save(tmp_file.name )
UpperCamelCase_ : str = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase_ : int = np.zeros(5 , dtype=np.floataa )
UpperCamelCase_ : Tuple = 1
UpperCamelCase_, UpperCamelCase_ : Any = index.search(snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __lowercase ( lowerCamelCase : Any ):
import faiss
UpperCamelCase_ : List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCamelCase_ : List[str] = 'index.faiss'
UpperCamelCase_ : Tuple = F"mock://{index_name}"
index.save(lowerCamelCase , storage_options=mockfs.storage_options )
UpperCamelCase_ : List[str] = FaissIndex.load(lowerCamelCase , storage_options=mockfs.storage_options )
UpperCamelCase_ : List[Any] = np.zeros(5 , dtype=np.floataa )
UpperCamelCase_ : List[str] = 1
UpperCamelCase_, UpperCamelCase_ : int = index.search(lowerCamelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
UpperCamelCase_ : str = Elasticsearch()
UpperCamelCase_ : List[str] = {'acknowledged': True}
UpperCamelCase_ : Optional[Any] = ElasticSearchIndex(es_client=snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
UpperCamelCase_ : int = 'foo'
UpperCamelCase_ : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = index.search(snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
UpperCamelCase_ : List[Any] = 'foo'
UpperCamelCase_ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = index.search(snake_case , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
UpperCamelCase_ : List[Any] = ['foo', 'bar', 'foobar']
UpperCamelCase_ : Optional[int] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = index.search_batch(snake_case )
UpperCamelCase_ : Optional[Any] = [scores[0] for scores in total_scores]
UpperCamelCase_ : str = [indices[0] for indices in total_indices]
self.assertGreater(np.min(snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , snake_case )
# batched queries with timeout
UpperCamelCase_ : Any = ['foo', 'bar', 'foobar']
UpperCamelCase_ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
UpperCamelCase_, UpperCamelCase_ : Optional[int] = index.search_batch(snake_case , request_timeout=3_0 )
UpperCamelCase_ : Optional[Any] = [scores[0] for scores in total_scores]
UpperCamelCase_ : Optional[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , snake_case )
| 50 | from __future__ import annotations
import numpy as np
def __lowercase ( lowerCamelCase : list[float] ):
return np.maximum(0 , lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 50 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
A = sum(__lowercase ) / len(__lowercase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = '''▁'''
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowerCamelCase : Optional[int] = {
'''google/pegasus-xsum''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Any = PegasusTokenizer
_UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase : str=None , lowercase : Any=None , lowercase : List[Any]="<pad>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<unk>" , lowercase : Any="<mask_2>" , lowercase : List[str]="<mask_1>" , lowercase : List[Any]=None , lowercase : Dict=103 , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase )}, but is'''
f''' {type(lowercase )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase , tokenizer_file=lowercase , pad_token=lowercase , eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , **lowercase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self : List[Any] , lowercase : List , lowercase : Optional[List] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self : Any , lowercase : Tuple , lowercase : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,) | 282 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
'do_convert_rgb': True,
}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def _A ( self , **_A ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_A )
def _A ( self , **_A ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_A )
def _A ( self , **_A ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_A )
def _A ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=_A )
__SCREAMING_SNAKE_CASE = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(_A , return_tensors='np' )
__SCREAMING_SNAKE_CASE = processor(images=_A , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE = 'Alexandra,T-shirt的价格是15便士。'
__SCREAMING_SNAKE_CASE = processor(text=_A )
__SCREAMING_SNAKE_CASE = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE = 'Alexandra,T-shirt的价格是15便士。'
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(_A )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE = 'Alexandra,T-shirt的价格是15便士。'
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 118 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[int] =logging.get_logger(__name__)
def __lowercase ( a__ , a__=False ) -> Tuple:
__SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def __lowercase ( a__ , a__ , a__=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
__SCREAMING_SNAKE_CASE = ''
else:
__SCREAMING_SNAKE_CASE = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
__SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowercase ( a__ , a__ , a__ ) -> str:
__SCREAMING_SNAKE_CASE = dct.pop(a__ )
__SCREAMING_SNAKE_CASE = val
def __lowercase ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def __lowercase ( a__ , a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = DeiTConfig()
# all deit models have fine-tuned heads
__SCREAMING_SNAKE_CASE = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__SCREAMING_SNAKE_CASE = 10_00
__SCREAMING_SNAKE_CASE = 'huggingface/label-files'
__SCREAMING_SNAKE_CASE = 'imagenet-1k-id2label.json'
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
__SCREAMING_SNAKE_CASE = {int(a__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = int(deit_name[-6:-4] )
__SCREAMING_SNAKE_CASE = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
__SCREAMING_SNAKE_CASE = 1_92
__SCREAMING_SNAKE_CASE = 7_68
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
elif deit_name[9:].startswith('small' ):
__SCREAMING_SNAKE_CASE = 3_84
__SCREAMING_SNAKE_CASE = 15_36
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
__SCREAMING_SNAKE_CASE = 10_24
__SCREAMING_SNAKE_CASE = 40_96
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
# load original model from timm
__SCREAMING_SNAKE_CASE = timm.create_model(a__ , pretrained=a__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__SCREAMING_SNAKE_CASE = timm_model.state_dict()
__SCREAMING_SNAKE_CASE = create_rename_keys(a__ , a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ , a__ )
# load HuggingFace model
__SCREAMING_SNAKE_CASE = DeiTForImageClassificationWithTeacher(a__ ).eval()
model.load_state_dict(a__ )
# Check outputs on an image, prepared by DeiTImageProcessor
__SCREAMING_SNAKE_CASE = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__SCREAMING_SNAKE_CASE = DeiTImageProcessor(size=a__ , crop_size=config.image_size )
__SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors='pt' )
__SCREAMING_SNAKE_CASE = encoding['pixel_values']
__SCREAMING_SNAKE_CASE = model(a__ )
__SCREAMING_SNAKE_CASE = timm_model(a__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a__ , outputs.logits , atol=1E-3 )
Path(a__ ).mkdir(exist_ok=a__ )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ : str =parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 118 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCamelCase__ = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __lowerCAmelCase ():
_UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_UpperCAmelCase : Tuple = bs[:]
_UpperCAmelCase : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCAmelCase )
cs.append(2**8 + n )
n += 1
_UpperCAmelCase : Dict = [chr(__lowerCAmelCase ) for n in cs]
return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Dict = set()
_UpperCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase : List[Any] = char
return pairs
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = VOCAB_FILES_NAMES
lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Tuple = ["input_ids", "attention_mask"]
def __init__( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any]="replace" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : Optional[Any]="</s>" , lowerCamelCase__ : str="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Optional[Any]="<pad>" , lowerCamelCase__ : Dict="<mask>" , lowerCamelCase__ : int=False , **lowerCamelCase__ : int , ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
_UpperCAmelCase : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
_UpperCAmelCase : Tuple = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
_UpperCAmelCase : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
_UpperCAmelCase : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
_UpperCAmelCase : int = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase : Tuple = json.load(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase : Optional[int] = errors # how to handle errors in decoding
_UpperCAmelCase : int = bytes_to_unicode()
_UpperCAmelCase : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding="utf-8" ) as merges_handle:
_UpperCAmelCase : Tuple = merges_handle.read().split("\n" )[1:-1]
_UpperCAmelCase : str = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCAmelCase : Dict = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCAmelCase : str = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCAmelCase__ ( self : str ) ->Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[Any] ) ->Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_UpperCAmelCase : Optional[int] = tuple(lowerCamelCase__ )
_UpperCAmelCase : int = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
_UpperCAmelCase : Union[str, Any] = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase : List[str] = bigram
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Optional[Any] = 0
while i < len(lowerCamelCase__ ):
try:
_UpperCAmelCase : str = word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase : Any = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase : Any = tuple(lowerCamelCase__ )
_UpperCAmelCase : Tuple = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
_UpperCAmelCase : Any = get_pairs(lowerCamelCase__ )
_UpperCAmelCase : str = " ".join(lowerCamelCase__ )
_UpperCAmelCase : int = word
return word
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = []
for token in re.findall(self.pat , lowerCamelCase__ ):
_UpperCAmelCase : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(" " ) )
return bpe_tokens
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict ) ->str:
'''simple docstring'''
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = "".join(lowerCamelCase__ )
_UpperCAmelCase : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : str = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : int = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + "\n" )
_UpperCAmelCase : List[str] = 0
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
_UpperCAmelCase : int = token_index
writer.write(" ".join(lowerCamelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
_UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [self.sep_token_id]
_UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any]=False , **lowerCamelCase__ : Union[str, Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : int = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
_UpperCAmelCase : Optional[int] = " " + text
return (text, kwargs)
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ) ->dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
_UpperCAmelCase : Tuple = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCAmelCase : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ )
if needs_to_be_padded:
_UpperCAmelCase : Optional[Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCAmelCase : str = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCAmelCase : str = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 234 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCamelCase__ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
lowerCamelCase__ = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
lowerCamelCase__ = '▁'
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any]="<s>" , lowerCamelCase__ : Dict="</s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : List[Any]="<s>" , lowerCamelCase__ : List[str]="<unk>" , lowerCamelCase__ : str="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Optional[Dict[str, Any]] = None , **lowerCamelCase__ : Dict , ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
_UpperCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
_UpperCAmelCase : Union[str, Any] = vocab_file
_UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[int] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_UpperCAmelCase : List[str] = len(self.sp_model ) - 1
_UpperCAmelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
_UpperCAmelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [self.sep_token_id]
_UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase__ ( self : int ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Optional[Any] = self.sp_model.PieceToId(lowerCamelCase__ )
return spm_id if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Any ) ->Optional[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Optional[Any] = ""
_UpperCAmelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Union[str, Any] = []
else:
current_sub_tokens.append(lowerCamelCase__ )
_UpperCAmelCase : int = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def __getstate__( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.__dict__.copy()
_UpperCAmelCase : Optional[int] = None
return state
def __setstate__( self : Any , lowerCamelCase__ : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , "wb" ) as fi:
_UpperCAmelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 234 | 1 |
from itertools import permutations
def UpperCamelCase__ ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
SCREAMING_SNAKE_CASE = [7, 11, 13, 17]
for i, test in enumerate(_SCREAMING_SNAKE_CASE ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCamelCase__ ( _SCREAMING_SNAKE_CASE = 10 ) -> int:
'''simple docstring'''
return sum(
int("""""".join(map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
for num in permutations(range(_SCREAMING_SNAKE_CASE ) )
if is_substring_divisible(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 363 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : str = AltDiffusionPipeline
__snake_case : int = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5002 ,)
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
SCREAMING_SNAKE_CASE = 77
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int=0 ) -> Any:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = text_encoder
SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A photo of an astronaut"""
SCREAMING_SNAKE_CASE = alt_pipe(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = text_encoder
SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" ,safety_checker=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe([prompt] ,generator=lowerCamelCase__ ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type="""np""" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" ,subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" ,scheduler=lowerCamelCase__ ,safety_checker=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe([prompt] ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""numpy""" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 193 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( snake_case ):
def __init__( self , _a , _a , _a , _a = None , ):
super().__init__()
self.register_modules(transformer=_lowerCAmelCase , vae=_lowerCAmelCase , scheduler=_lowerCAmelCase )
# create a imagenet -> id dictionary for easier use
__magic_name__ : Dict = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
__magic_name__ : List[Any] = int(_lowerCAmelCase )
__magic_name__ : str = dict(sorted(self.labels.items() ) )
def SCREAMING_SNAKE_CASE ( self , _a ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__magic_name__ : Dict = list(_lowerCAmelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , _a , _a = 4.0 , _a = None , _a = 50 , _a = "pil" , _a = True , ):
__magic_name__ : Optional[Any] = len(_lowerCAmelCase )
__magic_name__ : str = self.transformer.config.sample_size
__magic_name__ : Dict = self.transformer.config.in_channels
__magic_name__ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowerCAmelCase , device=self.device , dtype=self.transformer.dtype , )
__magic_name__ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__magic_name__ : Optional[Any] = torch.tensor(_lowerCAmelCase , device=self.device ).reshape(-1 )
__magic_name__ : List[Any] = torch.tensor([1_000] * batch_size , device=self.device )
__magic_name__ : Optional[int] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__magic_name__ : Dict = latent_model_input[: len(_lowerCAmelCase ) // 2]
__magic_name__ : List[str] = torch.cat([half, half] , dim=0 )
__magic_name__ : Optional[int] = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
__magic_name__ : Tuple = t
if not torch.is_tensor(_lowerCAmelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__magic_name__ : str = latent_model_input.device.type == "mps"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__magic_name__ : str = torch.floataa if is_mps else torch.floataa
else:
__magic_name__ : List[Any] = torch.intaa if is_mps else torch.intaa
__magic_name__ : List[str] = torch.tensor([timesteps] , dtype=_lowerCAmelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__magic_name__ : Optional[int] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ : List[str] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__magic_name__ : Optional[int] = self.transformer(
_lowerCAmelCase , timestep=_lowerCAmelCase , class_labels=_lowerCAmelCase ).sample
# perform guidance
if guidance_scale > 1:
__magic_name__ , __magic_name__ : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__magic_name__ , __magic_name__ : Optional[Any] = torch.split(_lowerCAmelCase , len(_lowerCAmelCase ) // 2 , dim=0 )
__magic_name__ : Optional[int] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__magic_name__ : List[Any] = torch.cat([half_eps, half_eps] , dim=0 )
__magic_name__ : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__magic_name__ , __magic_name__ : List[str] = torch.split(_lowerCAmelCase , _lowerCAmelCase , dim=1 )
else:
__magic_name__ : Tuple = noise_pred
# compute previous image: x_t -> x_t-1
__magic_name__ : List[Any] = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
if guidance_scale > 1:
__magic_name__ , __magic_name__ : Dict = latent_model_input.chunk(2 , dim=0 )
else:
__magic_name__ : Optional[int] = latent_model_input
__magic_name__ : List[Any] = 1 / self.vae.config.scaling_factor * latents
__magic_name__ : List[Any] = self.vae.decode(_lowerCAmelCase ).sample
__magic_name__ : str = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__magic_name__ : Optional[int] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__magic_name__ : List[Any] = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 281 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _lowerCAmelCase ( )->Any:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=lowerCAmelCase_ , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=lowerCAmelCase_ , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=lowerCAmelCase_ , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=lowerCAmelCase_ , default=0 , help="cuda_id." , )
snake_case_ = parser.parse_args()
return args
def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :Union[str, Any] )->Union[str, Any]:
'''simple docstring'''
if not len(lowerCAmelCase_ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
snake_case_ , snake_case_ = imgs[0].size
snake_case_ = Image.new("RGB" , size=(cols * w, rows * h) )
snake_case_ , snake_case_ = grid.size
for i, img in enumerate(lowerCAmelCase_ ):
grid.paste(lowerCAmelCase_ , box=(i % cols * w, i // cols * h) )
return grid
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Union[str, Any]="robotic cat with wings" , lowerCAmelCase_ :Any=7.5 , lowerCAmelCase_ :Dict=50 , lowerCAmelCase_ :int=1 , lowerCAmelCase_ :Union[str, Any]=42 , )->str:
'''simple docstring'''
snake_case_ = torch.Generator(pipeline.device ).manual_seed(lowerCAmelCase_ )
snake_case_ = pipeline(
lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , ).images
snake_case_ = int(math.sqrt(lowerCAmelCase_ ) )
snake_case_ = image_grid(lowerCAmelCase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
SCREAMING_SNAKE_CASE :Dict = parse_args()
# Load models and create wrapper for stable diffusion
SCREAMING_SNAKE_CASE :Optional[int] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
SCREAMING_SNAKE_CASE :Tuple = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
SCREAMING_SNAKE_CASE :List[str] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
SCREAMING_SNAKE_CASE :Optional[int] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
SCREAMING_SNAKE_CASE :List[Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
SCREAMING_SNAKE_CASE :Dict = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
SCREAMING_SNAKE_CASE :Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = unet.to(torch.device('''cuda''', args.cuda_id))
SCREAMING_SNAKE_CASE :Optional[int] = pipeline.to(unet.device)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Optional[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
SCREAMING_SNAKE_CASE :Optional[Any] = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 159 | 0 |
"""simple docstring"""
def lowercase__(A , A ) ->float:
"""simple docstring"""
_validate_point(__A )
_validate_point(__A )
if len(__A ) != len(__A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(__A , __A ) ) )
def lowercase__(A ) ->None:
"""simple docstring"""
if point:
if isinstance(__A , __A ):
for item in point:
if not isinstance(__A , (int, float) ):
lowercase__ : str= (
"Expected a list of numbers as input, found "
f'''{type(__A ).__name__}'''
)
raise TypeError(__A )
else:
lowercase__ : Any= f'''Expected a list of numbers as input, found {type(__A ).__name__}'''
raise TypeError(__A )
else:
raise ValueError("Missing an input" )
def lowercase__(A , A ) ->float:
"""simple docstring"""
_validate_point(__A )
_validate_point(__A )
if len(__A ) != len(__A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(__A , __A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : Optional[Any] = """pt"""
elif is_tf_available():
a : Union[str, Any] = """tf"""
else:
a : Any = """jax"""
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = PerceiverTokenizer
__lowerCamelCase = False
def UpperCAmelCase_ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ : str= PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=False , snake_case__=20 , snake_case__=5 ):
'''simple docstring'''
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowercase__ : Union[str, Any]= []
for i in range(len(snake_case__ ) ):
try:
lowercase__ : Any= tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ : int= list(filter(lambda snake_case__ : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case__ ) )
lowercase__ : Union[str, Any]= list(filter(lambda snake_case__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case__ ) , snake_case__ ) )
if max_length is not None and len(snake_case__ ) > max_length:
lowercase__ : int= toks[:max_length]
if min_length is not None and len(snake_case__ ) < min_length and len(snake_case__ ) > 0:
while len(snake_case__ ) < min_length:
lowercase__ : List[str]= toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ : str= [t[0] for t in toks]
# Ensure consistency
lowercase__ : Optional[Any]= tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ )
if " " not in output_txt and len(snake_case__ ) > 1:
lowercase__ : Dict= (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case__ )
)
if with_prefix_space:
lowercase__ : List[Any]= " " + output_txt
lowercase__ : Any= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
return output_txt, output_ids
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.perceiver_tokenizer
lowercase__ : Union[str, Any]= "Unicode €."
lowercase__ : Tuple= tokenizer(snake_case__ )
lowercase__ : Dict= [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , snake_case__ )
# decoding
lowercase__ : List[str]= tokenizer.decode(snake_case__ )
self.assertEqual(snake_case__ , "[CLS]Unicode €.[SEP]" )
lowercase__ : List[str]= tokenizer("e è é ê ë" )
lowercase__ : int= [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , snake_case__ )
# decoding
lowercase__ : Any= tokenizer.decode(snake_case__ )
self.assertEqual(snake_case__ , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.perceiver_tokenizer
lowercase__ : Tuple= ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ : List[Any]= [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowercase__ : Dict= tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
if FRAMEWORK != "jax":
lowercase__ : List[Any]= list(batch.input_ids.numpy()[0] )
else:
lowercase__ : str= list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case__ , snake_case__ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.perceiver_tokenizer
lowercase__ : int= ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : Union[str, Any]= tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , snake_case__ )
self.assertIn("attention_mask" , snake_case__ )
self.assertNotIn("decoder_input_ids" , snake_case__ )
self.assertNotIn("decoder_attention_mask" , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= self.perceiver_tokenizer
lowercase__ : int= [
"Summary of the text.",
"Another summary.",
]
lowercase__ : List[Any]= tokenizer(
text_target=snake_case__ , max_length=32 , padding="max_length" , truncation=snake_case__ , return_tensors=snake_case__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# safety check on max_len default value so we are sure the test works
lowercase__ : Union[str, Any]= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowercase__ : List[str]= self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ : str= tempfile.mkdtemp()
lowercase__ : str= " He is very happy, UNwant\u00E9d,running"
lowercase__ : int= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
tokenizer.save_pretrained(snake_case__ )
lowercase__ : List[str]= tokenizer.__class__.from_pretrained(snake_case__ )
lowercase__ : Tuple= after_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
shutil.rmtree(snake_case__ )
lowercase__ : List[str]= self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ : Tuple= tempfile.mkdtemp()
lowercase__ : List[Any]= " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ : Tuple= tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ : List[str]= tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
tokenizer.save_pretrained(snake_case__ )
lowercase__ : Tuple= tokenizer.__class__.from_pretrained(snake_case__ )
lowercase__ : Tuple= after_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowercase__ : Tuple= tokenizer.__class__.from_pretrained(snake_case__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowercase__ : Optional[Any]= json.load(snake_case__ )
with open(os.path.join(snake_case__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowercase__ : Optional[Any]= json.load(snake_case__ )
lowercase__ : List[str]= [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ : Optional[int]= added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ : Union[str, Any]= added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(snake_case__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case__ , snake_case__ )
with open(os.path.join(snake_case__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case__ , snake_case__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ : Optional[int]= tokenizer_class.from_pretrained(
snake_case__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ : int= added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case__ )]
lowercase__ : int= tokenizer_class.from_pretrained(
snake_case__ , additional_special_tokens=snake_case__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowercase__ : Optional[int]= self.get_tokenizers(fast=snake_case__ , do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ : Optional[int]= ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
lowercase__ : Optional[int]= tokenizer.convert_tokens_to_string(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
| 150 | 0 |
import numpy as np
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = '''blip_2_vision_model'''
def __init__( self , lowerCAmelCase__=1_4_0_8 , lowerCAmelCase__=6_1_4_4 , lowerCAmelCase__=3_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0_00_01 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = qkv_bias
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__):
cls._set_token_in_kwargs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__)
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""") == "blip-2":
__SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Tuple = '''blip_2_qformer'''
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=2 , lowerCAmelCase__=1_4_0_8 , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = cross_attention_frequency
__SCREAMING_SNAKE_CASE = encoder_hidden_size
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__):
cls._set_token_in_kwargs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__)
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""") == "blip-2":
__SCREAMING_SNAKE_CASE = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = '''blip-2'''
__lowercase : Any = True
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=3_2 , **lowerCAmelCase__):
super().__init__(**lowerCAmelCase__)
if vision_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""")
if qformer_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""")
if text_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""")
__SCREAMING_SNAKE_CASE = BlipaVisionConfig(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = BlipaQFormerConfig(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[text_model_type](**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.text_config.tie_word_embeddings
__SCREAMING_SNAKE_CASE = self.text_config.is_encoder_decoder
__SCREAMING_SNAKE_CASE = num_query_tokens
__SCREAMING_SNAKE_CASE = self.vision_config.hidden_size
__SCREAMING_SNAKE_CASE = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__SCREAMING_SNAKE_CASE = 1.0
__SCREAMING_SNAKE_CASE = 0.02
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
__SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE = self.qformer_config.to_dict()
__SCREAMING_SNAKE_CASE = self.text_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 100 | 0 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A :
def __init__( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int=13 , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=99 , lowerCAmelCase_ : Any=32 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : List[str]=37 , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : int=50 , lowerCAmelCase_ : Union[str, Any]=0.0_2 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : str=None , ) -> Optional[int]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = use_labels
_a = scope
def __lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.prepare_config_and_inputs()
_a = True
_a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
_a = BertGenerationEncoder(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Any , ) -> Dict:
"""simple docstring"""
_a = True
_a = BertGenerationEncoder(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
_a = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , **lowerCAmelCase_ : Dict , ) -> List[Any]:
"""simple docstring"""
_a = True
_a = True
_a = BertGenerationDecoder(config=lowerCAmelCase_ ).to(lowerCAmelCase_ ).eval()
# first forward pass
_a = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ , )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , )['''hidden_states'''][0]
_a = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , *lowerCAmelCase_ : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
_a = BertGenerationDecoder(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
_a , _a , _a , _a = self.prepare_config_and_inputs()
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( _a ,_a ,_a ,unittest.TestCase ):
lowercase_ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase_ = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase_ = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_a = BertGenerationEncoderTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
_a = '''bert'''
self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_a = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_a = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_a = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
_a = model(lowerCAmelCase_ )[0]
_a = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape , lowerCAmelCase_ )
_a = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_a = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
_a = model(lowerCAmelCase_ )[0]
_a = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape , lowerCAmelCase_ )
_a = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 363 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class A :
def __init__( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : Optional[Any]=10 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : int=37 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Dict=10 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Union[str, Any]=0.9 , lowerCAmelCase_ : str=None , ) -> int:
"""simple docstring"""
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = patch_size
_a = tubelet_size
_a = num_frames
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = mask_ratio
_a = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_a = (image_size // patch_size) ** 2
_a = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_a = int(mask_ratio * self.seq_length )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_a = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_a = VideoMAEModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = VideoMAEForPreTraining(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_a = torch.ones((self.num_masks,) )
_a = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_a = mask.expand(self.batch_size , -1 ).bool()
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# model only returns predictions for masked patches
_a = mask.sum().item()
_a = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( _a ,_a ,unittest.TestCase ):
lowercase_ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowercase_ = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = VideoMAEModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False ) -> Tuple:
"""simple docstring"""
_a = copy.deepcopy(lowerCAmelCase_ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_a = torch.ones((self.model_tester.num_masks,) )
_a = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_a = mask.expand(self.model_tester.batch_size , -1 ).bool()
_a = bool_masked_pos.to(lowerCAmelCase_ )
if return_labels:
if model_class in [
*get_values(lowerCAmelCase_ ),
]:
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def __lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = VideoMAEModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
if not self.has_attentions:
pass
else:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
for model_class in self.all_model_classes:
_a = self.model_tester.seq_length - self.model_tester.num_masks
_a = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_a = True
_a = False
_a = True
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_a = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_a = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_a = len(lowerCAmelCase_ )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase_ ) )
_a = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ):
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_a = outputs.hidden_states
_a = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_a = self.model_tester.seq_length - self.model_tester.num_masks
_a = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
_a = np.load(UpperCamelCase )
return list(UpperCamelCase )
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_a = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
lowerCAmelCase_ )
_a = self.default_image_processor
_a = prepare_video()
_a = image_processor(lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_a = model(**lowerCAmelCase_ )
# verify the logits
_a = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_a = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(lowerCAmelCase_ )
_a = self.default_image_processor
_a = prepare_video()
_a = image_processor(lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
# add boolean mask, indicating which patches to mask
_a = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
_a = torch.load(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_a = model(**lowerCAmelCase_ )
# verify the logits
_a = torch.Size([1, 14_08, 15_36] )
_a = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=lowerCAmelCase_ )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_a = torch.tensor([0.5_1_4_2] , device=lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_a = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=lowerCAmelCase_ ).to(
lowerCAmelCase_ )
with torch.no_grad():
_a = model(**lowerCAmelCase_ )
_a = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase_ , atol=1e-4 ) )
| 179 | 0 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
A_ : List[Any] = logging.get_logger(__name__)
class A_ ( a__ ):
'''simple docstring'''
def __init__(self , *lowercase__ , **lowercase__ ) -> Optional[int]:
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 333 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Any = logging.get_logger(__name__)
def _snake_case ( _snake_case : List[Any] , _snake_case : Tuple=False ):
lowerCAmelCase : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _snake_case ( _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Tuple=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Optional[int] = ''''''
else:
lowerCAmelCase : Union[str, Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : List[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCAmelCase : Tuple = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : Tuple = in_proj_bias[: config.hidden_size]
lowerCAmelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def _snake_case ( _snake_case : Tuple ):
lowerCAmelCase : List[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[Any] ):
lowerCAmelCase : Optional[int] = dct.pop(_snake_case )
lowerCAmelCase : Union[str, Any] = val
def _snake_case ( ):
lowerCAmelCase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Any = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any] ):
lowerCAmelCase : Any = ViTConfig()
lowerCAmelCase : Any = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase : List[str] = True
lowerCAmelCase : int = int(vit_name[-12:-10] )
lowerCAmelCase : List[Any] = int(vit_name[-9:-6] )
else:
lowerCAmelCase : str = 1000
lowerCAmelCase : Optional[int] = '''huggingface/label-files'''
lowerCAmelCase : Any = '''imagenet-1k-id2label.json'''
lowerCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase : Optional[Any] = {int(_snake_case ): v for k, v in idalabel.items()}
lowerCAmelCase : Dict = idalabel
lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase : List[str] = int(vit_name[-6:-4] )
lowerCAmelCase : int = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
lowerCAmelCase : str = 192
lowerCAmelCase : int = 768
lowerCAmelCase : List[str] = 12
lowerCAmelCase : str = 3
elif vit_name[9:].startswith('''small''' ):
lowerCAmelCase : List[str] = 384
lowerCAmelCase : Optional[int] = 1536
lowerCAmelCase : int = 12
lowerCAmelCase : str = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
lowerCAmelCase : List[str] = 768
lowerCAmelCase : Dict = 2304
lowerCAmelCase : Dict = 8
lowerCAmelCase : Tuple = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
lowerCAmelCase : Union[str, Any] = 1024
lowerCAmelCase : List[Any] = 4096
lowerCAmelCase : Union[str, Any] = 24
lowerCAmelCase : Any = 16
elif vit_name[4:].startswith('''huge''' ):
lowerCAmelCase : Any = 1280
lowerCAmelCase : str = 5120
lowerCAmelCase : Tuple = 32
lowerCAmelCase : Tuple = 16
# load original model from timm
lowerCAmelCase : Any = timm.create_model(_snake_case , pretrained=_snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase : int = timm_model.state_dict()
if base_model:
remove_classification_head_(_snake_case )
lowerCAmelCase : Optional[Any] = create_rename_keys(_snake_case , _snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case , _snake_case )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase : Any = ViTModel(_snake_case ).eval()
else:
lowerCAmelCase : Any = ViTForImageClassification(_snake_case ).eval()
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase : Dict = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
lowerCAmelCase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase : Dict = encoding['''pixel_values''']
lowerCAmelCase : List[Any] = model(_snake_case )
if base_model:
lowerCAmelCase : Dict = timm_model.forward_features(_snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_snake_case , outputs.pooler_output , atol=1E-3 )
else:
lowerCAmelCase : Dict = timm_model(_snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_snake_case , outputs.logits , atol=1E-3 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
snake_case__ : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 60 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '''▁'''
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''spiece.model'''}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
_SCREAMING_SNAKE_CASE = {
'''google/reformer-crime-and-punishment''': 5_2_4_2_8_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[str] = VOCAB_FILES_NAMES
a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Any = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase ,_lowerCamelCase="</s>" ,_lowerCamelCase="<unk>" ,_lowerCamelCase=[] ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase ,unk_token=_lowerCamelCase ,additional_special_tokens=_lowerCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCamelCase ,)
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _UpperCAmelCase (self ) -> Dict[str, int]:
'''simple docstring'''
__lowercase = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Any:
'''simple docstring'''
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__(self ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCamelCase ,out_type=_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__lowercase = self.sp_model.IdToPiece(_lowerCamelCase )
return token
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = []
__lowercase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
__lowercase = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
_lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 368 |
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = 1
__lowercase = 2
while i * i <= n:
__lowercase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _lowerCAmelCase ( ):
__lowercase = 1
__lowercase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase_ ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 217 | 0 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : int = 'hf-internal-testing/tiny-random-t5'
a__ : List[str] = AutoTokenizer.from_pretrained(lowercase)
a__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowercase)
a__ : List[str] = tokenizer('This is me' , return_tensors='pt')
a__ : List[Any] = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules()))
a__ : List[str] = model.generate(**lowercase)
a__ : Tuple = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules()))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase)
a__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(lowercase)
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules()))
a__ : Tuple = model_reloaded.generate(**lowercase)
self.assertTrue(torch.allclose(lowercase , lowercase))
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = 'hf-internal-testing/tiny-random-t5'
a__ : str = AutoModelForSeqaSeqLM.from_pretrained(lowercase)
a__ : Optional[Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase):
model.save_pretrained(lowercase)
a__ : str = model.reverse_bettertransformer()
model.save_pretrained(lowercase)
| 99 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCAmelCase__ = logging.getLogger(__name__)
class lowerCAmelCase__ ( A_ ):
__a = """masked_bert"""
def __init__( self : Union[str, Any] , _lowerCamelCase : Any=30522 , _lowerCamelCase : Union[str, Any]=768 , _lowerCamelCase : Tuple=12 , _lowerCamelCase : Any=12 , _lowerCamelCase : str=3072 , _lowerCamelCase : str="gelu" , _lowerCamelCase : int=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Dict=512 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : int=0.0_2 , _lowerCamelCase : Union[str, Any]=1e-12 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : List[str]="topK" , _lowerCamelCase : Optional[Any]="constant" , _lowerCamelCase : Optional[Any]=0.0 , **_lowerCamelCase : str , ):
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = pruning_method
_snake_case = mask_init
_snake_case = mask_scale
| 288 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( __a ):
snake_case : Dict = ["""input_values""", """padding_mask"""]
def __init__(self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 2_4_0_0_0 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Dict = chunk_length_s
_UpperCAmelCase : Dict = overlap
@property
def snake_case_ (self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case_ (self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__(self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Union[str, Any] = bool(
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_UpperCAmelCase : Optional[int] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
_UpperCAmelCase : str = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Any = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : Union[str, Any] = [np.asarray(lowerCAmelCase__ ).T]
# verify inputs are valid
for idx, example in enumerate(lowerCAmelCase__ ):
if example.ndim > 2:
raise ValueError(F"Expected input shape (channels, length) but got shape {example.shape}" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"Expected mono audio but example has {example.shape[-1]} channels" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"Expected stereo audio but example has {example.shape[-1]} channels" )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_UpperCAmelCase : List[str] = min(array.shape[0] for array in raw_audio )
_UpperCAmelCase : List[Any] = int(np.floor(max_length / self.chunk_stride ) )
_UpperCAmelCase : List[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_UpperCAmelCase : Union[str, Any] = max(array.shape[0] for array in raw_audio )
_UpperCAmelCase : Tuple = int(np.ceil(max_length / self.chunk_stride ) )
_UpperCAmelCase : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
_UpperCAmelCase : List[str] = """max_length"""
else:
_UpperCAmelCase : List[Any] = input_values
# normal padding on batch
if padded_inputs is None:
_UpperCAmelCase : Any = self.pad(
lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
if padding:
_UpperCAmelCase : Any = padded_inputs.pop("""attention_mask""" )
_UpperCAmelCase : Dict = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
_UpperCAmelCase : List[Any] = example[..., None]
input_values.append(example.T )
_UpperCAmelCase : List[Any] = input_values
if return_tensors is not None:
_UpperCAmelCase : int = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
| 170 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __lowerCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = None , ):
super().__init__()
_UpperCAmelCase : str = initial_learning_rate
_UpperCAmelCase : str = warmup_steps
_UpperCAmelCase : str = power
_UpperCAmelCase : Any = decay_schedule_fn
_UpperCAmelCase : List[Any] = name
def __call__(self , lowerCAmelCase__ ):
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_UpperCAmelCase : List[Any] = tf.cast(lowerCAmelCase__ , tf.floataa )
_UpperCAmelCase : List[Any] = tf.cast(self.warmup_steps , tf.floataa )
_UpperCAmelCase : Union[str, Any] = global_step_float / warmup_steps_float
_UpperCAmelCase : List[Any] = self.initial_learning_rate * tf.math.pow(lowerCAmelCase__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCAmelCase__ , )
def snake_case_ (self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 0.9 , lowerCAmelCase_ = 0.999 , lowerCAmelCase_ = 1e-8 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = None , ):
_UpperCAmelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowerCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowerCAmelCase_ , )
if num_warmup_steps:
_UpperCAmelCase : Optional[int] = WarmUp(
initial_learning_rate=lowerCAmelCase_ , decay_schedule_fn=lowerCAmelCase_ , warmup_steps=lowerCAmelCase_ , )
if weight_decay_rate > 0.0:
_UpperCAmelCase : Any = AdamWeightDecay(
learning_rate=lowerCAmelCase_ , weight_decay_rate=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , epsilon=lowerCAmelCase_ , clipnorm=lowerCAmelCase_ , global_clipnorm=lowerCAmelCase_ , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=lowerCAmelCase_ , )
else:
_UpperCAmelCase : str = tf.keras.optimizers.Adam(
learning_rate=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , epsilon=lowerCAmelCase_ , clipnorm=lowerCAmelCase_ , global_clipnorm=lowerCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __lowerCAmelCase ( __a ):
def __init__(self , lowerCAmelCase__ = 0.0_0_1 , lowerCAmelCase__ = 0.9 , lowerCAmelCase__ = 0.9_9_9 , lowerCAmelCase__ = 1e-7 , lowerCAmelCase__ = False , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "AdamWeightDecay" , **lowerCAmelCase__ , ):
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = weight_decay_rate
_UpperCAmelCase : Tuple = include_in_weight_decay
_UpperCAmelCase : List[Any] = exclude_from_weight_decay
@classmethod
def snake_case_ (cls , lowerCAmelCase__ ):
_UpperCAmelCase : str = {"""WarmUp""": WarmUp}
return super(lowerCAmelCase__ , cls ).from_config(lowerCAmelCase__ , custom_objects=lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
super(lowerCAmelCase__ , self )._prepare_local(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Dict = list(zip(*lowerCAmelCase__ ) )
return super(lowerCAmelCase__ , self ).apply_gradients(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , name=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_UpperCAmelCase : List[Any] = apply_state or {}
_UpperCAmelCase : Optional[int] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_UpperCAmelCase : Dict = self._fallback_apply_state(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self._decay_weights_op(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCAmelCase__ , self )._resource_apply_dense(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = self._decay_weights_op(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCAmelCase__ , self )._resource_apply_sparse(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def snake_case_ (self , lowerCAmelCase__ ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCAmelCase__ , lowerCAmelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCAmelCase__ , lowerCAmelCase__ ) is not None:
return False
return True
class __lowerCAmelCase ( __a ):
def __init__(self ):
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : List[Any] = None
@property
def snake_case_ (self ):
if self._accum_steps is None:
_UpperCAmelCase : str = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCAmelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def snake_case_ (self ):
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__(self , lowerCAmelCase__ ):
if not self._gradients:
_UpperCAmelCase : Optional[int] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCAmelCase__ ) , trainable=lowerCAmelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCAmelCase__ ) != len(self._gradients ):
raise ValueError(F"Expected {len(self._gradients )} gradients, but got {len(lowerCAmelCase__ )}" )
for accum_gradient, gradient in zip(self._gradients , lowerCAmelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCAmelCase__ )
self._accum_steps.assign_add(1 )
def snake_case_ (self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCAmelCase__ ) )
| 170 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCAmelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
__lowercase= self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def _A (self ):
__lowercase= 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
__lowercase= comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
__lowercase= comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
__lowercase= black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
__lowercase= black.format_str(SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ )
__lowercase= os.path.join(self.diffusers_dir , 'new_code.py' )
with open(SCREAMING_SNAKE_CASE_ , 'w' , newline='\n' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , 'r' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE_ )
def _A (self ):
__lowercase= check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _A (self ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , SCREAMING_SNAKE_CASE_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , SCREAMING_SNAKE_CASE_ ) , )
# Copy consistency with a really long name
__lowercase= 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , f'{long_class_name}SchedulerOutput' , re.sub('Bert' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , SCREAMING_SNAKE_CASE_ , overwrite_result=re.sub('DDPM' , 'Test' , SCREAMING_SNAKE_CASE_ ) , )
| 295 |
from __future__ import annotations
from typing import Generic, TypeVar
lowerCamelCase_ = TypeVar('''T''')
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self ):
# map from node name to the node object
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# merge 2 disjoint sets
self.link(self.find_set(SCREAMING_SNAKE_CASE_ ) , self.find_set(SCREAMING_SNAKE_CASE_ ) )
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self ):
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# add an edge with the given weight
self.add_node(SCREAMING_SNAKE_CASE_ )
self.add_node(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def UpperCAmelCase_ (self ):
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(SCREAMING_SNAKE_CASE_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
disjoint_set.union(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return graph
| 244 | 0 |
'''simple docstring'''
from __future__ import annotations
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =len(_lowerCAmelCase )
# We need to create solution object to save path.
__lowercase =[[0 for _ in range(_lowerCAmelCase )] for _ in range(_lowerCAmelCase )]
__lowercase =run_maze(_lowerCAmelCase , 0 , 0 , _lowerCAmelCase )
if solved:
print('\n'.join(str(_lowerCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =len(_lowerCAmelCase )
# Final check point.
if i == j == (size - 1):
__lowercase =1
return True
__lowercase =(not i < 0) and (not j < 0) # Check lower bounds
__lowercase =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowercase =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowercase =1
# check for directions
if (
run_maze(_lowerCAmelCase , i + 1 , _lowerCAmelCase , _lowerCAmelCase )
or run_maze(_lowerCAmelCase , _lowerCAmelCase , j + 1 , _lowerCAmelCase )
or run_maze(_lowerCAmelCase , i - 1 , _lowerCAmelCase , _lowerCAmelCase )
or run_maze(_lowerCAmelCase , _lowerCAmelCase , j - 1 , _lowerCAmelCase )
):
return True
__lowercase =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Dict=False ):
if isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case ):
lowerCAmelCase : str = len(set_a.intersection(_snake_case ) )
if alternative_union:
lowerCAmelCase : Union[str, Any] = len(_snake_case ) + len(_snake_case )
else:
lowerCAmelCase : List[Any] = len(set_a.union(_snake_case ) )
return intersection / union
if isinstance(_snake_case , (list, tuple) ) and isinstance(_snake_case , (list, tuple) ):
lowerCAmelCase : Dict = [element for element in set_a if element in set_b]
if alternative_union:
lowerCAmelCase : Optional[Any] = len(_snake_case ) + len(_snake_case )
return len(_snake_case ) / union
else:
lowerCAmelCase : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(_snake_case ) / len(_snake_case )
return len(_snake_case ) / len(_snake_case )
return None
if __name__ == "__main__":
snake_case__ : List[Any] = {'''a''', '''b''', '''c''', '''d''', '''e'''}
snake_case__ : int = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 60 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case__ : Dict = logging.getLogger(__name__)
def _snake_case ( _snake_case : Any , _snake_case : Any ):
return (preds == labels).mean()
@dataclass
class snake_case_:
__UpperCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class snake_case_:
__UpperCamelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
__UpperCamelCase = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
__UpperCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _snake_case )
# Set seed
set_seed(training_args.seed )
try:
lowerCAmelCase : Tuple = processors[data_args.task_name]()
lowerCAmelCase : Any = processor.get_labels()
lowerCAmelCase : Union[str, Any] = len(_snake_case )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase : List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCAmelCase : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCAmelCase : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_snake_case : EvalPrediction ) -> Dict:
lowerCAmelCase : int = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_snake_case , p.label_ids )}
# Data collator
lowerCAmelCase : List[Any] = DataCollatorWithPadding(_snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase : Union[str, Any] = Trainer(
model=_snake_case , args=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , compute_metrics=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase : Any = trainer.evaluate()
lowerCAmelCase : int = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(_snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _snake_case , _snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_snake_case )
return results
def _snake_case ( _snake_case : List[str] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 60 | 1 |
from __future__ import annotations
from fractions import Fraction
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Tuple = 11
UpperCAmelCase_ : int = int('''1''' + '''0''' * digit_len )
for num in range(_lowercase , _lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowercase , _lowercase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
UpperCAmelCase_ : Any = 10
return solutions
def lowerCamelCase__ ( _lowercase = 2 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 1.0
for fraction in fraction_list(_lowercase ):
UpperCAmelCase_ : Optional[Any] = Fraction(_lowercase )
result *= frac.denominator / frac.numerator
return int(_lowercase )
if __name__ == "__main__":
print(solution()) | 352 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
lowerCAmelCase = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
lowerCAmelCase = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
lowerCAmelCase = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
lowerCAmelCase = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
lowerCAmelCase = field(
default=1_0000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
lowerCAmelCase = field(default=2E-4 , metadata={'''help''': '''Learning rate fo training.'''} )
lowerCAmelCase = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
lowerCAmelCase = field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
lowerCAmelCase = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
lowerCAmelCase = field(default=5_0000 , metadata={'''help''': '''Maximum number of training steps.'''} )
lowerCAmelCase = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
lowerCAmelCase = field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
lowerCAmelCase = field(default=1 , metadata={'''help''': '''Training seed.'''} )
lowerCAmelCase = field(
default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
lowerCAmelCase = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
lowerCAmelCase = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
lowerCAmelCase = field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
lowerCAmelCase = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
lowerCAmelCase = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
lowerCAmelCase = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
lowerCAmelCase = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
lowerCAmelCase = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
lowerCAmelCase = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
lowerCAmelCase = field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
lowerCAmelCase = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
lowerCAmelCase = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
lowerCAmelCase = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
lowerCAmelCase = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default=_a , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
lowerCAmelCase = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
lowerCAmelCase = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
lowerCAmelCase = field(
default=10_0000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
lowerCAmelCase = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
lowerCAmelCase = field(
default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
lowerCAmelCase = field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
lowerCAmelCase = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
lowerCAmelCase = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
lowerCAmelCase = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
lowerCAmelCase = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
lowerCAmelCase = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
lowerCAmelCase = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
lowerCAmelCase = field(default=20_0000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
lowerCAmelCase = field(
default=3_2768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
lowerCAmelCase = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
lowerCAmelCase = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
lowerCAmelCase = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''Push saved tokenizer to the hub.'''} ) | 235 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
_lowercase : Optional[int] = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def lowercase__ ( ):
__UpperCAmelCase = Github(os.environ['''GITHUB_TOKEN'''] )
__UpperCAmelCase = g.get_repo('''huggingface/diffusers''' )
__UpperCAmelCase = repo.get_issues(state='''open''' )
for issue in open_issues:
__UpperCAmelCase = sorted(issue.get_comments() , key=lambda snake_case_ : i.created_at , reverse=snake_case_ )
__UpperCAmelCase = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 332 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list[float] , snake_case_ :list[float] ):
__UpperCAmelCase = sorted(numsa + numsa )
__UpperCAmelCase , __UpperCAmelCase = divmod(len(snake_case_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : int = [float(x) for x in input('Enter the elements of first array: ').split()]
_lowercase : Tuple = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 332 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _a ( _snake_case ):
_lowercase : Dict = '''poolformer'''
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=16 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Any=4.0 , UpperCamelCase_: Tuple=[2, 2, 6, 2] , UpperCamelCase_: Union[str, Any]=[64, 128, 320, 512] , UpperCamelCase_: Union[str, Any]=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[2, 1, 1, 1] , UpperCamelCase_: int=4 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Optional[Any]="gelu" , UpperCamelCase_: Dict=True , UpperCamelCase_: Any=1E-5 , UpperCamelCase_: List[str]=0.02 , **UpperCamelCase_: Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = pool_size
lowercase__ = hidden_sizes
lowercase__ = mlp_ratio
lowercase__ = depths
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = num_encoder_blocks
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_layer_scale
lowercase__ = layer_scale_init_value
lowercase__ = initializer_range
super().__init__(**UpperCamelCase__ )
class _a ( _snake_case ):
_lowercase : Union[str, Any] = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self: List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase_ ( self: Dict ) -> float:
"""simple docstring"""
return 2E-3
| 362 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : int = '''git_vision_model'''
def __init__( self: Tuple , UpperCamelCase_: Optional[int]=768 , UpperCamelCase_: Optional[int]=3_072 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: Any=3 , UpperCamelCase_: str=224 , UpperCamelCase_: int=16 , UpperCamelCase_: Any="quick_gelu" , UpperCamelCase_: Union[str, Any]=1E-5 , UpperCamelCase_: Tuple=0.0 , UpperCamelCase_: Tuple=0.02 , **UpperCamelCase_: str , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: List[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(UpperCamelCase_ )
lowercase__ , lowercase__ = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
lowercase__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''git'''
def __init__( self: Optional[Any] , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: List[str]=30_522 , UpperCamelCase_: List[Any]=768 , UpperCamelCase_: List[Any]=6 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: Any=3_072 , UpperCamelCase_: Any="gelu" , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: str=1_024 , UpperCamelCase_: Any=0.02 , UpperCamelCase_: str=1E-1_2 , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: Optional[Any]="absolute" , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: List[Any]=False , UpperCamelCase_: List[Any]=101 , UpperCamelCase_: int=102 , UpperCamelCase_: List[str]=None , **UpperCamelCase_: int , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
if vision_config is None:
lowercase__ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
lowercase__ = GitVisionConfig(**UpperCamelCase_ )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 93 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__a: Union[str, Any] = logging.get_logger(__name__)
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : str = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
lowercase__ : Union[str, Any] = DetaConfig(
backbone_config=lowerCamelCase__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=lowerCamelCase__ , with_box_refine=lowerCamelCase__ , two_stage=lowerCamelCase__ , )
# set labels
lowercase__ : Union[str, Any] = """huggingface/label-files"""
if "o365" in model_name:
lowercase__ : List[Any] = 366
lowercase__ : str = """object365-id2label.json"""
else:
lowercase__ : int = 91
lowercase__ : Optional[Any] = """coco-detection-id2label.json"""
lowercase__ : Dict = num_labels
lowercase__ : str = json.load(open(cached_download(hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase__ : Tuple = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : int = idalabel
lowercase__ : Any = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Dict = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.reduction.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.bias""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", F"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", F"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", F"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", F"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", F"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", F"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.weight""", F"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.weight""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.weight""", F"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.bias""", F"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[Any] = dct.pop(lowerCamelCase__ )
lowercase__ : Optional[Any] = val
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase__ : int = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
lowercase__ : str = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Any = in_proj_weight[:dim, :]
lowercase__ : Tuple = in_proj_bias[: dim]
lowercase__ : int = in_proj_weight[
dim : dim * 2, :
]
lowercase__ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
lowercase__ : Union[str, Any] = in_proj_weight[
-dim :, :
]
lowercase__ : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
lowercase__ : List[Any] = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowercase__ : Optional[Any] = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : List[Any] = in_proj_weight[:hidden_size, :]
lowercase__ : Any = in_proj_bias[:hidden_size]
lowercase__ : List[Any] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
lowercase__ : str = in_proj_bias[hidden_size : hidden_size * 2]
lowercase__ : Dict = in_proj_weight[-hidden_size:, :]
lowercase__ : List[Any] = in_proj_bias[-hidden_size:]
def __UpperCamelCase ( ):
lowercase__ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase__ : List[Any] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : int = get_deta_config(lowerCamelCase__ )
# load original state dict
if model_name == "deta-swin-large":
lowercase__ : Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location='''cpu''' )["""model"""]
# original state dict
for name, param in state_dict.items():
print(lowerCamelCase__ , param.shape )
# rename keys
lowercase__ : List[str] = create_rename_keys(lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_swin_q_k_v(lowerCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
lowercase__ : Dict = state_dict.pop(lowerCamelCase__ )
lowercase__ : Dict = val
if "input_proj" in key:
lowercase__ : List[Any] = state_dict.pop(lowerCamelCase__ )
lowercase__ : Optional[int] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
lowercase__ : Optional[int] = state_dict.pop(lowerCamelCase__ )
lowercase__ : Tuple = val
# finally, create HuggingFace model and load state dict
lowercase__ : List[str] = DetaForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
lowercase__ : List[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(lowerCamelCase__ )
# load image processor
lowercase__ : Dict = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
lowercase__ : List[str] = prepare_img()
lowercase__ : List[Any] = processor(images=lowerCamelCase__ , return_tensors='''pt''' )
lowercase__ : int = encoding["""pixel_values"""]
lowercase__ : Optional[Any] = model(pixel_values.to(lowerCamelCase__ ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
lowercase__ : str = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
lowercase__ : List[str] = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
lowercase__ : Dict = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
lowercase__ : Union[str, Any] = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowerCamelCase__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowerCamelCase__ ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(F"""jozhang97/{model_name}""" )
processor.push_to_hub(F"""jozhang97/{model_name}""" )
if __name__ == "__main__":
__a: Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__a: List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 198 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=0.2 , lowercase=0.2 ):
A_ : str = bp_numa
A_ : Optional[int] = bp_numa
A_ : Optional[Any] = bp_numa
A_ : str = conva_get[:2]
A_ : Union[str, Any] = conva_get[2]
A_ : Union[str, Any] = size_pa
A_ : List[str] = rate_w
A_ : Dict = rate_t
A_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
A_ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A_ : Tuple = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A_ : List[Any] = -2 * np.random.rand(self.conva[1] ) + 1
A_ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
A_ : Any = -2 * np.random.rand(self.num_bpa ) + 1
def _a (self , lowercase ):
# save model dict with pickle
A_ : Union[str, Any] = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowercase , """wb""" ) as f:
pickle.dump(lowercase , lowercase )
print(F'Model saved: {save_path}' )
@classmethod
def _a (cls , lowercase ):
# read saved model
with open(lowercase , """rb""" ) as f:
A_ : Optional[int] = pickle.load(lowercase ) # noqa: S301
A_ : Optional[int] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
A_ : Tuple = model_dic.get("""size_pooling1""" )
A_ : Optional[Any] = model_dic.get("""num_bp1""" )
A_ : List[str] = model_dic.get("""num_bp2""" )
A_ : Dict = model_dic.get("""num_bp3""" )
A_ : Tuple = model_dic.get("""rate_weight""" )
A_ : List[Any] = model_dic.get("""rate_thre""" )
# create model instance
A_ : List[str] = CNN(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
# modify model parameter
A_ : int = model_dic.get("""w_conv1""" )
A_ : str = model_dic.get("""wkj""" )
A_ : str = model_dic.get("""vji""" )
A_ : int = model_dic.get("""thre_conv1""" )
A_ : Union[str, Any] = model_dic.get("""thre_bp2""" )
A_ : List[Any] = model_dic.get("""thre_bp3""" )
return conv_ins
def _a (self , lowercase ):
return 1 / (1 + np.exp(-1 * x ))
def _a (self , lowercase ):
return round(lowercase , 3 )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase ):
# convolution process
A_ : Dict = convs[0]
A_ : Any = convs[1]
A_ : Tuple = np.shape(lowercase )[0]
# get the data slice of original image data, data_focus
A_ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , lowercase ):
A_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
A_ : int = []
A_ : List[str] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowercase ):
A_ : List[Any] = []
for i_focus in range(len(lowercase ) ):
A_ : List[str] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowercase ) )
A_ : Tuple = np.asmatrix(lowercase ).reshape(
lowercase , lowercase )
data_featuremap.append(lowercase )
# expanding the data slice to One dimenssion
A_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowercase ) )
A_ : Dict = np.asarray(lowercase )
return focus_list, data_featuremap
def _a (self , lowercase , lowercase , lowercase="average_pool" ):
# pooling process
A_ : Union[str, Any] = len(featuremaps[0] )
A_ : str = int(size_map / size_pooling )
A_ : List[str] = []
for i_map in range(len(lowercase ) ):
A_ : Any = featuremaps[i_map]
A_ : Any = []
for i_focus in range(0 , lowercase , lowercase ):
for j_focus in range(0 , lowercase , lowercase ):
A_ : Tuple = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowercase ) )
A_ : List[str] = np.asmatrix(lowercase ).reshape(lowercase , lowercase )
featuremap_pooled.append(lowercase )
return featuremap_pooled
def _a (self , lowercase ):
# expanding three dimension data to one dimension list
A_ : List[Any] = []
for i in range(len(lowercase ) ):
A_ : Tuple = np.shape(data[i] )
A_ : str = data[i].reshape(1 , shapes[0] * shapes[1] )
A_ : Tuple = data_listed.getA().tolist()[0]
data_expanded.extend(lowercase )
A_ : Optional[Any] = np.asarray(lowercase )
return data_expanded
def _a (self , lowercase ):
# expanding matrix to one dimension list
A_ : str = np.asarray(lowercase )
A_ : Any = np.shape(lowercase )
A_ : int = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : List[str] = []
A_ : Union[str, Any] = 0
for i_map in range(lowercase ):
A_ : Union[str, Any] = np.ones((size_map, size_map) )
for i in range(0 , lowercase , lowercase ):
for j in range(0 , lowercase , lowercase ):
A_ : str = pd_pool[
i_pool
]
A_ : Optional[Any] = i_pool + 1
A_ : Union[str, Any] = np.multiply(
lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowercase )
return pd_all
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=bool ):
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowercase )) )
print((""" - - Shape: Teach_Data """, np.shape(lowercase )) )
A_ : Optional[Any] = 0
A_ : Dict = []
A_ : List[Any] = 10000
while rp < n_repeat and mse >= error_accuracy:
A_ : List[Any] = 0
print(F'-------------Learning Time {rp}--------------' )
for p in range(len(lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
A_ : Optional[Any] = np.asmatrix(datas_train[p] )
A_ : str = np.asarray(datas_teach[p] )
A_, A_ : Dict = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ : Any = self.pooling(lowercase , self.size_poolinga )
A_ : int = np.shape(lowercase )
A_ : Union[str, Any] = self._expand(lowercase )
A_ : Dict = data_bp_input
A_ : int = np.dot(lowercase , self.vji.T ) - self.thre_bpa
A_ : Any = self.sig(lowercase )
A_ : Optional[int] = np.dot(lowercase , self.wkj.T ) - self.thre_bpa
A_ : List[str] = self.sig(lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
A_ : Optional[Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(lowercase , (1 - bp_outa) ) )
A_ : Tuple = np.multiply(
np.dot(lowercase , self.wkj ) , np.multiply(lowercase , (1 - bp_outa) ) )
A_ : Union[str, Any] = np.dot(lowercase , self.vji )
A_ : int = pd_i_all / (self.size_poolinga * self.size_poolinga)
A_ : str = pd_conva_pooled.T.getA().tolist()
A_ : List[Any] = self._calculate_gradient_from_pool(
lowercase , lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
A_ : int = self._expand_mat(pd_conva_all[k_conv] )
A_ : Any = self.rate_weight * np.dot(lowercase , lowercase )
A_ : Union[str, Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
A_ : Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
A_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
A_ : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
A_ : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
A_ : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
A_ : Optional[int] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
A_ : List[Any] = rp + 1
A_ : Union[str, Any] = error_count / patterns
all_mse.append(lowercase )
def draw_error():
A_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowercase , """+-""" )
plt.plot(lowercase , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowercase , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def _a (self , lowercase ):
# model predict
A_ : Tuple = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowercase )) )
for p in range(len(lowercase ) ):
A_ : int = np.asmatrix(datas_test[p] )
A_, A_ : str = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ : Dict = self.pooling(lowercase , self.size_poolinga )
A_ : Tuple = self._expand(lowercase )
A_ : int = data_bp_input
A_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
A_ : List[str] = self.sig(lowercase )
A_ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
A_ : List[Any] = self.sig(lowercase )
produce_out.extend(bp_outa.getA().tolist() )
A_ : Any = [list(map(self.do_round , lowercase ) ) for each in produce_out]
return np.asarray(lowercase )
def _a (self , lowercase ):
# return the data of image after convoluting process so we can check it out
A_ : Optional[Any] = np.asmatrix(lowercase )
A_, A_ : Optional[Any] = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ : Union[str, Any] = self.pooling(lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 206 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__UpperCAmelCase : Dict = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __snake_case :
'''simple docstring'''
def __init__( self : Optional[int] , A : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
__snake_case: Any = primes[group]["""prime"""]
__snake_case: List[str] = primes[group]["""generator"""]
__snake_case: str = int(hexlify(urandom(32 ) ) , base=16 )
def UpperCAmelCase__ ( self : List[str] ):
return hex(self.__private_key )[2:]
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def UpperCAmelCase__ ( self : Any , A : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase__ ( self : Dict , A : str ):
__snake_case: Tuple = int(A , base=16 )
if not self.is_valid_public_key(A ):
raise ValueError("""Invalid public key""" )
__snake_case: List[str] = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase__ ( A : int , A : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def UpperCAmelCase__ ( A : str , A : str , A : int = 14 ):
__snake_case: Optional[int] = int(A , base=16 )
__snake_case: Any = int(A , base=16 )
__snake_case: List[str] = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError("""Invalid public key""" )
__snake_case: List[str] = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A__ ( SCREAMING_SNAKE_CASE__ = 3) -> qiskit.result.counts.Counts:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
raise TypeError("""number of qubits must be a integer.""")
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""")
if math.floor(SCREAMING_SNAKE_CASE__) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""")
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""")
__snake_case: int = QuantumRegister(SCREAMING_SNAKE_CASE__ , """qr""")
__snake_case: List[str] = ClassicalRegister(SCREAMING_SNAKE_CASE__ , """cr""")
__snake_case: Optional[Any] = QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.h(number_of_qubits - i - 1)
counter -= 1
for j in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
for k in range(number_of_qubits // 2):
quantum_circuit.swap(SCREAMING_SNAKE_CASE__ , number_of_qubits - k - 1)
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# simulate with 10000 shots
__snake_case: Union[str, Any] = Aer.get_backend("""qasm_simulator""")
__snake_case: Optional[Any] = execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1_0000)
return job.result().get_counts(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 293 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __A ( unittest.TestCase ):
@property
def _lowercase (self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def _lowercase (self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def _lowercase (self : str ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__a )
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.dummy_uncond_unet
UpperCAmelCase_ = DDIMScheduler()
UpperCAmelCase_ = self.dummy_vq_model
UpperCAmelCase_ = LDMPipeline(unet=__a , vqvae=__a , scheduler=__a )
ldm.to(__a )
ldm.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = ldm(generator=__a , num_inference_steps=2 , output_type="numpy" ).images
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = ldm(generator=__a , num_inference_steps=2 , output_type="numpy" , return_dict=__a )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
UpperCAmelCase_ = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __A ( unittest.TestCase ):
def _lowercase (self : str ):
UpperCAmelCase_ = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(__a )
ldm.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = ldm(generator=__a , num_inference_steps=5 , output_type="numpy" ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
UpperCAmelCase_ = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 1 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str , **_lowercase : Optional[Any]) -> Optional[int]:
"""simple docstring"""
a__ : List[Any] = AutoConfig.from_pretrained(_lowercase , **_lowercase)
a__ : Dict = AutoModelForSeqaSeqLM.from_config(_lowercase)
model.save_pretrained(_lowercase)
AutoTokenizer.from_pretrained(_lowercase).save_pretrained(_lowercase)
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 170 | 0 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ ( __lowercase , unittest.TestCase ):
_lowerCamelCase = PhobertTokenizer
_lowerCamelCase = False
def UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Optional[int] = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
_snake_case : Tuple = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_snake_case : int = ['''#version: 0.2''', '''l à</w>''']
_snake_case : List[Any] = {'''unk_token''': '''<unk>'''}
_snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case_ ) )
def UpperCamelCase ( self , **lowercase_ ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Union[str, Any] = '''Tôi là VinAI Research'''
_snake_case : int = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def UpperCamelCase ( self ):
_snake_case : Optional[int] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case : Any = '''Tôi là VinAI Research'''
_snake_case : Union[str, Any] = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
_snake_case : List[str] = tokenizer.tokenize(snake_case_ )
print(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
_snake_case : str = tokens + [tokenizer.unk_token]
_snake_case : str = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ ) | 352 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
@dataclass
class lowercase_ ( __snake_case ):
_lowerCamelCase = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **lowercase_ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_snake_case : List[str] = deprecated_arg[3:]
_snake_case : Optional[int] = not kwargs.pop(lowercase_ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
_snake_case : Tuple = kwargs.pop("tpu_name" , self.tpu_name )
_snake_case : Any = kwargs.pop("device_idx" , self.device_idx )
_snake_case : List[str] = kwargs.pop("eager_mode" , self.eager_mode )
_snake_case : List[str] = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**lowercase_ )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'Name of TPU'} , )
_lowerCamelCase = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
_lowerCamelCase = field(default=__snake_case , metadata={'help': 'Benchmark models in eager model.'} )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
_snake_case : str = None
if self.tpu:
try:
if self.tpu_name:
_snake_case : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_snake_case : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_snake_case : Union[str, Any] = None
return tpu
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_snake_case : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase ( self ):
return self.n_gpu > 0 | 284 | 0 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class a :
__lowerCAmelCase : int
__lowerCAmelCase : TreeNode | None = None
__lowerCAmelCase : TreeNode | None = None
A__ = namedtuple('''CoinsDistribResult''', '''moves excess''')
def _lowerCAmelCase ( __lowerCAmelCase ) -> int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(__lowerCAmelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__lowerCAmelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__lowerCAmelCase ) != count_coins(__lowerCAmelCase ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__lowerCAmelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
snake_case__ , snake_case__ : Union[str, Any] = get_distrib(node.left )
snake_case__ , snake_case__ : str = get_distrib(node.right )
snake_case__ : List[str] = 1 - left_distrib_excess
snake_case__ : Any = 1 - right_distrib_excess
snake_case__ : Optional[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__lowerCAmelCase )
+ abs(__lowerCAmelCase )
)
snake_case__ : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__lowerCAmelCase , __lowerCAmelCase )
return get_distrib(__lowerCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class a ( unittest.TestCase ):
__lowerCAmelCase : Any = MODEL_FOR_MASKED_LM_MAPPING
__lowerCAmelCase : Optional[Any] = TF_MODEL_FOR_MASKED_LM_MAPPING
def __lowerCamelCase ( self :str ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[Any] = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,top_k=2 ,framework='''tf''' )
snake_case__ : int = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__lowercase ,decimals=6 ) ,[
{'''sequence''': '''My name is grouped''', '''score''': 2.1e-0_5, '''token''': 3_8_0_1_5, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1e-0_5, '''token''': 2_5_5_0_6, '''token_str''': ''' accuser'''},
] ,)
snake_case__ : int = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__lowercase ,decimals=6 ) ,[
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1e-0_5,
'''token''': 3_8_0_1_5,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1e-0_5,
'''token''': 2_5_5_0_6,
'''token_str''': ''' accuser''',
},
] ,)
snake_case__ : Optional[int] = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 )
self.assertEqual(
nested_simplify(__lowercase ,decimals=6 ) ,[
{'''sequence''': '''My name is Clara''', '''score''': 2e-0_5, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2e-0_5, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9e-0_5, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] ,)
@require_torch
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : str = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,top_k=2 ,framework='''pt''' )
snake_case__ : str = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__lowercase ,decimals=6 ) ,[
{'''sequence''': '''My name is Maul''', '''score''': 2.2e-0_5, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] ,)
snake_case__ : List[str] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__lowercase ,decimals=6 ) ,[
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2e-0_5,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] ,)
snake_case__ : Union[str, Any] = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 )
self.assertEqual(
nested_simplify(__lowercase ,decimals=6 ) ,[
{'''sequence''': '''My name is Patrick''', '''score''': 2.1e-0_5, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2e-0_5, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2e-0_5, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
] ,)
snake_case__ : Optional[int] = unmasker('''My name is <mask> <mask>''' ,top_k=2 )
self.assertEqual(
nested_simplify(__lowercase ,decimals=6 ) ,[
[
{
'''score''': 2.2e-0_5,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2e-0_5,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] ,)
@require_torch_gpu
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[int] = pipeline('''fill-mask''' ,model='''hf-internal-testing/tiny-random-distilbert''' ,device=0 ,framework='''pt''' )
# convert model to fp16
pipe.model.half()
snake_case__ : List[str] = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__lowercase ,__lowercase )
@slow
@require_torch
def __lowerCamelCase ( self :str ):
snake_case__ : List[str] = pipeline(task='''fill-mask''' ,model='''distilroberta-base''' ,top_k=2 ,framework='''pt''' )
self.run_large_test(__lowercase )
@slow
@require_tf
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[Any] = pipeline(task='''fill-mask''' ,model='''distilroberta-base''' ,top_k=2 ,framework='''tf''' )
self.run_large_test(__lowercase )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[Any] ):
snake_case__ : Optional[Any] = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__lowercase ) ,[
{'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 6_1_0, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 1_5_7_3, '''token_str''': ''' Chris'''},
] ,)
snake_case__ : str = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__lowercase ) ,[
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.251,
'''token''': 2_2_0_1,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.214,
'''token''': 1_2_7_9_0,
'''token_str''': ''' Lyon''',
},
] ,)
snake_case__ : Dict = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 )
self.assertEqual(
nested_simplify(__lowercase ) ,[
{'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] ,)
@require_torch
def __lowerCamelCase ( self :List[str] ):
snake_case__ : List[Any] = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,framework='''pt''' )
snake_case__ : str = None
snake_case__ : int = None
self.run_pipeline_test(__lowercase ,[] )
@require_tf
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[int] = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,framework='''tf''' )
snake_case__ : int = None
snake_case__ : List[str] = None
self.run_pipeline_test(__lowercase ,[] )
def __lowerCamelCase ( self :Any ,__lowercase :Any ,__lowercase :str ,__lowercase :Union[str, Any] ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
snake_case__ : Optional[int] = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase )
snake_case__ : List[str] = [
F"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ):
snake_case__ : List[str] = fill_masker.tokenizer
snake_case__ : List[Any] = fill_masker.model
snake_case__ : Dict = fill_masker(
F"""This is a {tokenizer.mask_token}""" ,)
self.assertEqual(
__lowercase ,[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
] ,)
snake_case__ : Tuple = fill_masker([F"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
__lowercase ,[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
] ,)
snake_case__ : List[str] = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
__lowercase ,[
[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
],
[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
],
] ,)
with self.assertRaises(__lowercase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__lowercase ):
fill_masker('''This is''' )
self.run_test_top_k(__lowercase ,__lowercase )
self.run_test_targets(__lowercase ,__lowercase )
self.run_test_top_k_targets(__lowercase ,__lowercase )
self.fill_mask_with_duplicate_targets_and_top_k(__lowercase ,__lowercase )
self.fill_mask_with_multiple_masks(__lowercase ,__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :int ):
snake_case__ : int = tokenizer.get_vocab()
snake_case__ : Dict = sorted(vocab.keys() )[:2]
# Pipeline argument
snake_case__ : List[Any] = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ,targets=__lowercase )
snake_case__ : str = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__lowercase ,[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
] ,)
snake_case__ : Optional[Any] = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} ,__lowercase )
snake_case__ : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} ,set(__lowercase ) )
# Call argument
snake_case__ : str = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase )
snake_case__ : int = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=__lowercase )
self.assertEqual(
__lowercase ,[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
] ,)
snake_case__ : str = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} ,__lowercase )
snake_case__ : Optional[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} ,set(__lowercase ) )
# Score equivalence
snake_case__ : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=__lowercase )
snake_case__ : Union[str, Any] = [top_mask['''token_str'''] for top_mask in outputs]
snake_case__ : Tuple = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__lowercase ) == set(__lowercase ):
snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=__lowercase )
snake_case__ : int = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__lowercase ) ,nested_simplify(__lowercase ) )
# Raises with invalid
with self.assertRaises(__lowercase ):
snake_case__ : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__lowercase ):
snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=[''''''] )
with self.assertRaises(__lowercase ):
snake_case__ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets='''''' )
def __lowerCamelCase ( self :Any ,__lowercase :Union[str, Any] ,__lowercase :Dict ):
snake_case__ : int = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ,top_k=2 )
snake_case__ : Tuple = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__lowercase ,[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
] ,)
snake_case__ : Any = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase )
snake_case__ : Optional[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=2 )
self.assertEqual(
__lowercase ,[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
] ,)
self.assertEqual(nested_simplify(__lowercase ) ,nested_simplify(__lowercase ) )
def __lowerCamelCase ( self :List[Any] ,__lowercase :Tuple ,__lowercase :str ):
snake_case__ : Optional[int] = tokenizer.get_vocab()
snake_case__ : int = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase )
# top_k=2, ntargets=3
snake_case__ : int = sorted(vocab.keys() )[:3]
snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=2 ,targets=__lowercase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
snake_case__ : Dict = [el['''token_str'''] for el in sorted(__lowercase ,key=lambda __lowercase : x["score"] ,reverse=__lowercase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__lowercase ).issubset(__lowercase ):
snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=3 ,targets=__lowercase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__lowercase ) ,nested_simplify(__lowercase ) )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Dict ,__lowercase :Dict ):
snake_case__ : Union[str, Any] = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase )
snake_case__ : str = tokenizer.get_vocab()
# String duplicates + id duplicates
snake_case__ : int = sorted(vocab.keys() )[:3]
snake_case__ : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
snake_case__ : Optional[Any] = fill_masker(F"""My name is {tokenizer.mask_token}""" ,targets=__lowercase ,top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__lowercase ) ,3 )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ):
snake_case__ : Any = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase )
snake_case__ : Tuple = fill_masker(
F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" ,top_k=2 )
self.assertEqual(
__lowercase ,[
[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
],
[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
],
[
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
{'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )},
],
] ,)
| 230 | 1 |
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = RoFormerTokenizer
_snake_case : str = RoFormerTokenizerFast
_snake_case : List[Any] = True
_snake_case : int = True
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
super().setUp()
def snake_case__ ( self : int , **lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **lowerCAmelCase__ )
def snake_case__ ( self : List[Any] , **lowerCAmelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = '''永和服装饰品有限公司,今天天气非常好'''
_UpperCamelCase = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase , _UpperCamelCase = self.get_chinese_input_output_texts()
_UpperCamelCase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , output_text.split() )
_UpperCamelCase = tokens + [tokenizer.unk_token]
_UpperCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : List[str] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase , _UpperCamelCase = self.get_chinese_input_output_texts()
_UpperCamelCase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , output_text.split() )
_UpperCamelCase = tokens + [tokenizer.unk_token]
_UpperCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def snake_case__ ( self : int ) -> List[str]:
'''simple docstring'''
pass
| 287 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def a__ ( lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowercase ):
for j in range(lowercase ):
_UpperCamelCase = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowercase__ : Optional[int] = imread('image_data/lena.jpg', 1)
# convert to its negative
lowercase__ : Union[str, Any] = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 287 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self) -> Optional[Any]:
snake_case_ = (3, 32, 128)
snake_case_ = tempfile.mkdtemp()
# fmt: off
snake_case_ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
snake_case_ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__))))
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(lowerCAmelCase__) + '\n')
snake_case_ = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
snake_case_ = os.path.join(self.tmpdirname, lowerCAmelCase__)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Union[str, Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self) -> Tuple:
shutil.rmtree(self.tmpdirname)
def a_ ( self) -> Any:
snake_case_ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)
snake_case_ = Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1))
return image_input
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor.save_pretrained(self.tmpdirname)
snake_case_ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCAmelCase__)
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer, lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor.image_processor, lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor.save_pretrained(self.tmpdirname)
snake_case_ = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
snake_case_ = self.get_image_processor(do_normalize=lowerCAmelCase__, padding_value=1.0)
snake_case_ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCAmelCase__, padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer, lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, lowerCAmelCase__)
def a_ ( self) -> Any:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(lowerCAmelCase__, return_tensors='np')
snake_case_ = processor(images=lowerCAmelCase__, return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2)
def a_ ( self) -> str:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'test'
snake_case_ = processor(text=lowerCAmelCase__)
snake_case_ = tokenizer(lowerCAmelCase__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def a_ ( self) -> Tuple:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'test'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), ['pixel_values', 'labels'])
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__):
processor()
def a_ ( self) -> List[Any]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.char_decode(lowerCAmelCase__)
snake_case_ = tokenizer.batch_decode(lowerCAmelCase__)
snake_case_ = [seq.replace(' ', '') for seq in decoded_tok]
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = None
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
def a_ ( self) -> List[Any]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = MgpstrProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = torch.randn(1, 27, 38)
snake_case_ = torch.randn(1, 27, 5_0257)
snake_case_ = torch.randn(1, 27, 3_0522)
snake_case_ = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()), ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'])
| 69 | """simple docstring"""
def UpperCAmelCase ( UpperCAmelCase ) -> list:
if len(UpperCAmelCase ) <= 1:
return [tuple(UpperCAmelCase )]
snake_case_ = []
def generate(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = [0] * n
res.append(tuple(UpperCAmelCase ) )
snake_case_ = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
snake_case_ , snake_case_ = arr[i], arr[0]
else:
snake_case_ , snake_case_ = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase ) )
c[i] += 1
snake_case_ = 0
else:
snake_case_ = 0
i += 1
generate(len(UpperCAmelCase ) , UpperCAmelCase )
return res
if __name__ == "__main__":
__UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 69 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : str ):
"""simple docstring"""
if n_term == "":
return []
UpperCAmelCase_ : list = []
for temp in range(int(lowerCamelCase_ ) ):
series.append(F'''1/{temp + 1}''' if series else '1' )
return series
if __name__ == "__main__":
snake_case__ : int = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 357 | '''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : List[Any] = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , snake_case_=None , snake_case_=None , *snake_case_ , **snake_case_ ):
'''simple docstring'''
super().__init__(*snake_case_ , **snake_case_ )
if config is None:
assert isinstance(self.model , snake_case_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
UpperCAmelCase_ : Tuple = self.model.config
else:
UpperCAmelCase_ : Optional[Any] = config
UpperCAmelCase_ : Optional[Any] = data_args
UpperCAmelCase_ : Dict = self.config.tgt_vocab_size if isinstance(self.config , snake_case_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
' padding..' )
if self.args.label_smoothing == 0:
UpperCAmelCase_ : Dict = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
UpperCAmelCase_ : Union[str, Any] = label_smoothed_nll_loss
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if self.optimizer is None:
UpperCAmelCase_ : Optional[Any] = ['bias', 'LayerNorm.weight']
UpperCAmelCase_ : Union[str, Any] = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
UpperCAmelCase_ : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
UpperCAmelCase_ : List[str] = Adafactor
UpperCAmelCase_ : int = {'scale_parameter': False, 'relative_step': False}
else:
UpperCAmelCase_ : Union[str, Any] = AdamW
UpperCAmelCase_ : Optional[int] = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
UpperCAmelCase_ : Optional[int] = self.args.learning_rate
if self.sharded_ddp:
UpperCAmelCase_ : Optional[Any] = OSS(
params=snake_case_ , optim=snake_case_ , **snake_case_ , )
else:
UpperCAmelCase_ : Tuple = optimizer_cls(snake_case_ , **snake_case_ )
if self.lr_scheduler is None:
UpperCAmelCase_ : int = self._get_lr_scheduler(snake_case_ )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
UpperCAmelCase_ : List[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
UpperCAmelCase_ : Optional[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
UpperCAmelCase_ : int = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case_ )
return scheduler
def _UpperCamelCase ( self ):
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
UpperCAmelCase_ : Any = model(**snake_case_ , use_cache=snake_case_ )[0]
UpperCAmelCase_ : int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = model(**snake_case_ , labels=snake_case_ , use_cache=snake_case_ )[:2]
else:
# compute label smoothed loss
UpperCAmelCase_ : List[str] = model(**snake_case_ , use_cache=snake_case_ )[0]
UpperCAmelCase_ : Optional[int] = torch.nn.functional.log_softmax(snake_case_ , dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.loss_fn(snake_case_ , snake_case_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = inputs.pop('labels' )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self._compute_loss(snake_case_ , snake_case_ , snake_case_ )
return loss
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self._prepare_inputs(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
UpperCAmelCase_ : Tuple = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **snake_case_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase_ : Tuple = self._pad_tensors_to_max_len(snake_case_ , gen_kwargs['max_length'] )
UpperCAmelCase_ : List[str] = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self._compute_loss(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ : Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
UpperCAmelCase_ : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase_ : List[Any] = self._pad_tensors_to_max_len(snake_case_ , gen_kwargs['max_length'] )
return (loss, logits, labels)
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F''' padded to `max_length`={max_length}''' )
UpperCAmelCase_ : Tuple = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
UpperCAmelCase_ : Dict = tensor
return padded_tensor
| 274 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
lowerCamelCase__: Dict =get_activation("gelu")
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase_) , torch_builtin(UpperCAmelCase_)))
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase_) , gelu_new(UpperCAmelCase_)))
def SCREAMING_SNAKE_CASE_ (self : str) ->str:
'''simple docstring'''
lowerCamelCase__: List[str] =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
lowerCamelCase__: str =get_activation("gelu")
lowerCamelCase__: Union[str, Any] =get_activation("gelu_10")
lowerCamelCase__: Dict =torch_builtin(UpperCAmelCase_)
lowerCamelCase__: Any =geluaa(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =torch.where(y_gelu_aa < 10.0 , 1 , 0)
self.assertTrue(torch.max(UpperCAmelCase_).item() == 10.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask))
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
get_activation("gelu")
get_activation("gelu_10")
get_activation("gelu_fast")
get_activation("gelu_new")
get_activation("gelu_python")
get_activation("gelu_pytorch_tanh")
get_activation("linear")
get_activation("mish")
get_activation("quick_gelu")
get_activation("relu")
get_activation("sigmoid")
get_activation("silu")
get_activation("swish")
get_activation("tanh")
with self.assertRaises(UpperCAmelCase_):
get_activation("bogus")
with self.assertRaises(UpperCAmelCase_):
get_activation(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =get_activation("gelu")
lowerCamelCase__: str =1
lowerCamelCase__: Union[str, Any] =get_activation("gelu")
self.assertEqual(acta.a , 1)
with self.assertRaises(UpperCAmelCase_):
lowerCamelCase__: Union[str, Any] =acta.a
| 10 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = ShapEImgaImgPipeline
__UpperCamelCase = ['image']
__UpperCamelCase = ['image']
__UpperCamelCase = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
__UpperCamelCase = False
@property
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
return 32
@property
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
return 8
@property
def a_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
_A = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_A = CLIPVisionModel(a__ )
return model
@property
def a_ ( self : Dict ) -> Tuple:
'''simple docstring'''
_A = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=a__ , do_normalize=a__ , do_resize=a__ , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_24 , )
return image_processor
@property
def a_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_A = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_A = PriorTransformer(**a__ )
return model
@property
def a_ ( self : str ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
_A = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
_A = ShapERenderer(**a__ )
return model
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = self.dummy_prior
_A = self.dummy_image_encoder
_A = self.dummy_image_processor
_A = self.dummy_renderer
_A = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_A = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def a_ ( self : Optional[Any] , a__ : Dict , a__ : str=0 ) -> Any:
'''simple docstring'''
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def a_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_A = "cpu"
_A = self.get_dummy_components()
_A = self.pipeline_class(**a__ )
_A = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = pipe(**self.get_dummy_inputs(a__ ) )
_A = output.images[0]
_A = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_A = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_A = torch_device == "cpu"
_A = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def a_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_A = self.get_dummy_components()
_A = self.pipeline_class(**a__ )
_A = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = 1
_A = 2
_A = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_A = batch_size * [inputs[key]]
_A = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : str ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
_A = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
_A = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = torch.Generator(device=a__ ).manual_seed(0 )
_A = pipe(
a__ , generator=a__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ ) | 163 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = GPTSanJapaneseTokenizer
__UpperCamelCase = False
__UpperCamelCase = {'do_clean_text': False, 'add_prefix_space': False}
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
_A = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
_A = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
_A = {"unk_token": "<unk>"}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(a__ ) )
def a_ ( self : str , **a__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self : Dict , a__ : str ) -> int:
'''simple docstring'''
_A = "こんにちは、世界。 \nこんばんは、㔺界。😀"
_A = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def a_ ( self : Union[str, Any] , a__ : Optional[int] ) -> Any:
'''simple docstring'''
_A , _A = self.get_input_output_texts(a__ )
_A = tokenizer.encode(a__ , add_special_tokens=a__ )
_A = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
return text, ids
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
pass # TODO add if relevant
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
pass # TODO add if relevant
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def a_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = self.get_tokenizer()
# Testing tokenization
_A = "こんにちは、世界。 こんばんは、㔺界。"
_A = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
_A = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_A = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_A = tokens + [tokenizer.unk_token]
_A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_A = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , a__ )
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
_A = self.get_tokenizer()
# Testing tokenization
_A = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
_A = "こんにちは、、、、世界。こんばんは、、、、世界。"
_A = tokenizer.encode(a__ )
_A = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
@slow
def a_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_A = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_A = "こんにちは、世界。"
_A = "こんばんは、㔺界。😀"
_A = "こんにちは、世界。こんばんは、世界。😀"
_A = tokenizer.encode(prefix_text + input_text )
_A = tokenizer.encode("" , prefix_text=prefix_text + input_text )
_A = tokenizer.encode(a__ , prefix_text=a__ )
_A = tokenizer.decode(a__ )
_A = tokenizer.decode(a__ )
_A = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , a__ )
@slow
def a_ ( self : str ) -> str:
'''simple docstring'''
_A = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_A = "こんにちは、世界。"
_A = "こんばんは、㔺界。😀"
_A = len(tokenizer.encode(a__ ) ) - 2
_A = len(tokenizer.encode(a__ ) ) - 2
_A = [1] + [0] * (len_prefix + len_text + 1)
_A = [1] * (len_prefix + len_text + 1) + [0]
_A = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_A = tokenizer(prefix_text + input_text ).token_type_ids
_A = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
_A = tokenizer(a__ , prefix_text=a__ ).token_type_ids
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
@slow
def a_ ( self : int ) -> Dict:
'''simple docstring'''
_A = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_A = tokenizer.encode("あンいワ" )
_A = tokenizer.encode("" , prefix_text="あンいワ" )
_A = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(a__ ) , tokenizer.decode(a__ ) )
self.assertEqual(tokenizer.decode(a__ ) , tokenizer.decode(a__ ) )
self.assertNotEqual(a__ , a__ )
self.assertNotEqual(a__ , a__ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_A = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
_A = tokenizer(a__ , padding=a__ )
_A = tokenizer.batch_encode_plus(a__ , padding=a__ )
# fmt: off
_A = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
_A = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_A = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , a__ )
self.assertListEqual(x_token.token_type_ids , a__ )
self.assertListEqual(x_token.attention_mask , a__ )
self.assertListEqual(x_token_a.input_ids , a__ )
self.assertListEqual(x_token_a.token_type_ids , a__ )
self.assertListEqual(x_token_a.attention_mask , a__ )
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass | 163 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
UpperCamelCase_ = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCamelCase_ = model(_A )['''last_hidden_state''']
UpperCamelCase_ = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , _A )
# compare the actual values for a slice.
UpperCamelCase_ = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 122 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = tokenizer
lowercase : List[Any] = tokenizer.bos_token_id
lowercase : Union[str, Any] = dataset
lowercase : Union[str, Any] = seq_length
lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ) -> int:
"""simple docstring"""
lowercase : Dict = iter(self.dataset )
lowercase : Union[str, Any] = True
while more_examples:
lowercase , lowercase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase : List[str] = False
break
lowercase : str = tokenizer(_A , truncation=_A )['''input_ids''']
lowercase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
lowercase : int = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = {'''streaming''': True}
lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ )
lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
model.eval()
lowercase : str = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
lowercase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) )
try:
lowercase : Tuple = torch.exp(__magic_name__ )
except OverflowError:
lowercase : List[str] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''') | 308 | 0 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
"""simple docstring"""
def __init__( self :Tuple , snake_case :Tuple , snake_case :Optional[Any]=13 , snake_case :List[str]=7 , snake_case :str=True , snake_case :List[str]=True , snake_case :Optional[int]=True , snake_case :Union[str, Any]=True , snake_case :Union[str, Any]=True , snake_case :Union[str, Any]=False , snake_case :Union[str, Any]=False , snake_case :List[Any]=False , snake_case :str=2 , snake_case :List[str]=99 , snake_case :Tuple=0 , snake_case :List[str]=32 , snake_case :Dict=5 , snake_case :Union[str, Any]=4 , snake_case :Union[str, Any]=0.1 , snake_case :Tuple=0.1 , snake_case :Union[str, Any]=512 , snake_case :Optional[int]=2 , snake_case :Any=0.02 , snake_case :List[str]=2 , snake_case :Dict=4 , snake_case :int="last" , snake_case :Optional[Any]=True , snake_case :str=None , snake_case :str=0 , ):
'''simple docstring'''
A_ : Dict = parent
A_ : Tuple = batch_size
A_ : List[Any] = seq_length
A_ : Any = is_training
A_ : List[Any] = use_input_lengths
A_ : Optional[Any] = use_token_type_ids
A_ : Tuple = use_labels
A_ : Dict = gelu_activation
A_ : int = sinusoidal_embeddings
A_ : int = causal
A_ : Any = asm
A_ : Dict = n_langs
A_ : str = vocab_size
A_ : Optional[int] = n_special
A_ : List[Any] = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : List[str] = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : Any = num_labels
A_ : Optional[int] = num_choices
A_ : List[str] = summary_type
A_ : Optional[Any] = use_proj
A_ : Dict = scope
A_ : List[Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : int = None
if self.use_input_lengths:
A_ : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A_ : Optional[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A_ : Any = None
A_ : List[Any] = None
A_ : Any = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Optional[Any] = ids_tensor([self.batch_size] , 2 ).float()
A_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
A_ : str = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Any , snake_case :str , snake_case :str , snake_case :List[Any] , snake_case :Optional[Any] , snake_case :Tuple , snake_case :List[Any] , snake_case :Optional[Any] , snake_case :List[Any] , ):
'''simple docstring'''
A_ : List[str] = XLMModel(config=_a )
model.to(_a )
model.eval()
A_ : Union[str, Any] = model(_a , lengths=_a , langs=_a )
A_ : List[str] = model(_a , langs=_a )
A_ : Any = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Any , snake_case :int , snake_case :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :Optional[int] , snake_case :List[str] , snake_case :Dict , snake_case :str , ):
'''simple docstring'''
A_ : Union[str, Any] = XLMWithLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Tuple , snake_case :List[str] , snake_case :Optional[int] , snake_case :Optional[Any] , snake_case :Any , snake_case :int , snake_case :Dict , snake_case :Optional[int] , snake_case :Union[str, Any] , ):
'''simple docstring'''
A_ : str = XLMForQuestionAnsweringSimple(_a )
model.to(_a )
model.eval()
A_ : int = model(_a )
A_ : int = model(_a , start_positions=_a , end_positions=_a )
A_ : Optional[int] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Optional[Any] , snake_case :Optional[int] , snake_case :Any , snake_case :Tuple , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :Union[str, Any] , snake_case :str , snake_case :List[Any] , ):
'''simple docstring'''
A_ : Tuple = XLMForQuestionAnswering(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a )
A_ : Tuple = model(
_a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , p_mask=_a , )
A_ : Tuple = model(
_a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , )
(A_ ) : str = result_with_labels.to_tuple()
A_ : int = model(_a , start_positions=_a , end_positions=_a )
(A_ ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Dict , snake_case :Optional[Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :str , ):
'''simple docstring'''
A_ : Dict = XLMForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a )
A_ : Optional[int] = model(_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Optional[Any] , snake_case :int , snake_case :Optional[int] , snake_case :Dict , snake_case :int , snake_case :int , snake_case :Optional[int] , snake_case :Optional[Any] , snake_case :Union[str, Any] , ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : Optional[int] = XLMForTokenClassification(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[Any] , snake_case :Tuple , snake_case :List[Any] , snake_case :Optional[Any] , snake_case :List[str] , snake_case :Any , snake_case :Optional[int] , snake_case :int , snake_case :Tuple , ):
'''simple docstring'''
A_ : Tuple = self.num_choices
A_ : Optional[Any] = XLMForMultipleChoice(config=_a )
model.to(_a )
model.eval()
A_ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : Optional[int] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Dict = self.prepare_config_and_inputs()
(
A_
) : List[str] = config_and_inputs
A_ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__UpperCamelCase = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self :int , snake_case :str , snake_case :Union[str, Any] , snake_case :Optional[int] , snake_case :Any , snake_case :Any ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Optional[Any] , snake_case :Tuple , snake_case :Optional[Any]=False ):
'''simple docstring'''
A_ : Union[str, Any] = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
A_ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Dict = XLMModelTester(self )
A_ : Union[str, Any] = ConfigTester(self , config_class=_a , emb_dim=37 )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_a )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_a )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_a )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_a )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_a )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_a )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_a )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Tuple , snake_case :Dict , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :Dict , snake_case :List[str]=False , snake_case :str=1 ):
'''simple docstring'''
self.assertIsInstance(_a , _a )
self.assertListEqual(
[isinstance(_a , _a ) for iter_attentions in attentions] , [True] * len(_a ) )
self.assertEqual(len(_a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_a ):
# adds PAD dummy token
A_ : Optional[Any] = min_length + idx + 1
A_ : Optional[Any] = min_length + idx + 1
A_ : List[Any] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_a ) )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :int , snake_case :int , snake_case :List[str] , snake_case :Tuple , snake_case :Union[str, Any] , snake_case :Optional[int]=False , snake_case :List[Any]=1 ):
'''simple docstring'''
self.assertIsInstance(_a , _a )
self.assertListEqual(
[isinstance(_a , _a ) for iter_hidden_states in hidden_states] , [True] * len(_a ) , )
self.assertEqual(len(_a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_a ):
# adds PAD dummy token
A_ : Dict = min_length + idx + 1
A_ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_a ) , )
pass
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = XLMModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[Any] = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(_a )
A_ : Optional[int] = torch.tensor([[14, 447]] , dtype=torch.long , device=_a ) # the president
A_ : int = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A_ : Dict = model.generate(_a , do_sample=_a )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _a )
| 355 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''git_vision_model'''
def __init__( self :Union[str, Any] , snake_case :str=768 , snake_case :str=3_072 , snake_case :Optional[Any]=12 , snake_case :Any=12 , snake_case :Dict=3 , snake_case :Union[str, Any]=224 , snake_case :Optional[int]=16 , snake_case :Union[str, Any]="quick_gelu" , snake_case :Optional[int]=1e-5 , snake_case :List[str]=0.0 , snake_case :Any=0.02 , **snake_case :str , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : Optional[int] = hidden_size
A_ : Optional[Any] = intermediate_size
A_ : Dict = num_hidden_layers
A_ : int = num_attention_heads
A_ : int = num_channels
A_ : Tuple = patch_size
A_ : Dict = image_size
A_ : Optional[int] = initializer_range
A_ : str = attention_dropout
A_ : Tuple = layer_norm_eps
A_ : List[str] = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls :Any , snake_case :Union[str, os.PathLike] , **snake_case :List[str] ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case )
A_ , A_ : Optional[Any] = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
A_ : int = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(snake_case , **snake_case )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''git'''
def __init__( self :List[str] , snake_case :Any=None , snake_case :int=30_522 , snake_case :Dict=768 , snake_case :List[Any]=6 , snake_case :Any=12 , snake_case :Any=3_072 , snake_case :List[Any]="gelu" , snake_case :Union[str, Any]=0.1 , snake_case :Any=0.1 , snake_case :Optional[int]=1_024 , snake_case :str=0.02 , snake_case :int=1e-12 , snake_case :Optional[int]=0 , snake_case :int="absolute" , snake_case :Tuple=True , snake_case :List[str]=False , snake_case :List[str]=101 , snake_case :int=102 , snake_case :str=None , **snake_case :List[Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , pad_token_id=snake_case , **snake_case )
if vision_config is None:
A_ : Union[str, Any] = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
A_ : List[Any] = GitVisionConfig(**snake_case )
A_ : Optional[int] = vocab_size
A_ : List[str] = hidden_size
A_ : int = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[str] = hidden_act
A_ : Dict = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : List[str] = initializer_range
A_ : int = layer_norm_eps
A_ : Dict = position_embedding_type
A_ : str = use_cache
A_ : str = tie_word_embeddings
A_ : Optional[Any] = num_image_with_embedding
A_ : int = bos_token_id
A_ : Optional[int] = eos_token_id
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = copy.deepcopy(self.__dict__ )
A_ : Optional[int] = self.vision_config.to_dict()
A_ : Optional[Any] = self.__class__.model_type
return output
| 70 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __lowercase : Any , __lowercase : int=99 , __lowercase : Dict=13 , __lowercase : Tuple=16 , __lowercase : Union[str, Any]=7 , __lowercase : List[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : int=False , __lowercase : Optional[Any]=True , __lowercase : Any=2 , __lowercase : Optional[int]=32 , __lowercase : Dict=4 , __lowercase : Any=4 , __lowercase : Tuple=30 , __lowercase : int=0 , __lowercase : Any=1 , __lowercase : Dict=2 , __lowercase : Optional[Any]=None , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = decoder_seq_length
# For common tests
snake_case_ = self.decoder_seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = d_model
snake_case_ = d_model
snake_case_ = decoder_layers
snake_case_ = decoder_layers
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = eos_token_id
snake_case_ = bos_token_id
snake_case_ = pad_token_id
snake_case_ = decoder_start_token_id
snake_case_ = use_cache
snake_case_ = max_position_embeddings
snake_case_ = None
snake_case_ = decoder_seq_length
snake_case_ = 2
snake_case_ = 1
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def snake_case__ ( self : str , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Optional[Any] , ):
"""simple docstring"""
snake_case_ = True
snake_case_ = TrOCRDecoder(config=_A ).to(_A ).eval()
snake_case_ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case_ = model(_A , use_cache=_A )
snake_case_ = model(_A )
snake_case_ = model(_A , use_cache=_A )
self.parent.assertTrue(len(_A ) == len(_A ) )
self.parent.assertTrue(len(_A ) == len(_A ) + 1 )
snake_case_ = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = model(_A )['last_hidden_state']
snake_case_ = model(_A , past_key_values=_A )['last_hidden_state']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_A , _A , atol=1E-3 )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCAmelCase_ = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
lowerCAmelCase_ = True
lowerCAmelCase_ = False
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = TrOCRStandaloneDecoderModelTester(self , is_training=_A )
snake_case_ = ConfigTester(self , config_class=_A )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ):
"""simple docstring"""
pass
def snake_case__ ( self : Any ):
"""simple docstring"""
pass
def snake_case__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_A )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
return
@unittest.skip("The model doesn\'t support left padding" ) # and it's not used enough to be worth fixing :)
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
pass
| 187 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = (DDPMScheduler,)
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str:
__magic_name__ : str = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_A )
return config
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Dict = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self : Tuple ) -> int:
__magic_name__ : Tuple = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : str = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Union[str, Any] = self.dummy_model()
__magic_name__ : List[Any] = self.dummy_sample_deter
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : Tuple = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : Dict = pred_prev_sample
__magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) )
__magic_name__ : Dict = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
__magic_name__ : Any = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Dict = self.dummy_model()
__magic_name__ : str = self.dummy_sample_deter
__magic_name__ : str = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : List[Any] = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : List[Any] = pred_prev_sample
__magic_name__ : int = torch.sum(torch.abs(_A ) )
__magic_name__ : Any = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : Dict = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**_A )
__magic_name__ : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_A )
__magic_name__ : List[str] = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
__magic_name__ : Optional[int] = -1
else:
__magic_name__ : List[Any] = timesteps[i + 1]
__magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A )
__magic_name__ : Any = prev_t.item()
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> str:
__magic_name__ : str = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 1, 0]
__magic_name__ : Tuple = len(_A )
with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_A ) | 331 | 0 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Dict, __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
A__ : List[str] =0
if start < end:
A__ : Any =randint(__snake_case, __snake_case )
A__ : List[Any] =a[end]
A__ : Union[str, Any] =a[pivot]
A__ : Union[str, Any] =temp
A__ : Optional[int] =_in_place_partition(__snake_case, __snake_case, __snake_case )
count += _in_place_quick_sort(__snake_case, __snake_case, p - 1 )
count += _in_place_quick_sort(__snake_case, p + 1, __snake_case )
return count
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Union[str, Any], __snake_case : int ) -> Optional[Any]:
"""simple docstring"""
A__ : List[str] =0
A__ : Tuple =randint(__snake_case, __snake_case )
A__ : Tuple =a[end]
A__ : Dict =a[pivot]
A__ : Union[str, Any] =temp
A__ : str =start - 1
for index in range(__snake_case, __snake_case ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
A__ : Dict =new_pivot_index + 1
A__ : Dict =a[new_pivot_index]
A__ : Dict =a[index]
A__ : str =temp
A__ : Optional[int] =a[new_pivot_index + 1]
A__ : str =a[end]
A__ : str =temp
return new_pivot_index + 1, count
__snake_case : List[Any] = TemporaryFile()
__snake_case : Dict = 100 # 1000 elements are to be sorted
__snake_case : Tuple = 0, 1 # mean and standard deviation
__snake_case : Union[str, Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
__snake_case : str = np.load(outfile)
__snake_case : Optional[Any] = len(M) - 1
__snake_case : Optional[Any] = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 360 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__snake_case : Any = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__snake_case : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 136 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = ["pixel_values"]
def __init__( self : Any ,_snake_case : bool = True ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : bool = True ,_snake_case : Union[int, float] = 1 / 255 ,_snake_case : bool = True ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : bool = True ,**_snake_case : Union[str, Any] ,) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
lowercase__ : Optional[Any] = get_size_dict(_snake_case ,default_to_square=_snake_case )
lowercase__ : Optional[int] = do_resize
lowercase__ : str = size
lowercase__ : Optional[Any] = resample
lowercase__ : Optional[Any] = do_rescale
lowercase__ : Union[str, Any] = rescale_factor
lowercase__ : Dict = do_normalize
lowercase__ : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Optional[Any] = do_convert_rgb
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[Any] ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
lowercase__ : Tuple = (size['''height'''], size['''width'''])
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : np.ndarray ,_snake_case : Union[int, float] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[Any] ,) -> int:
"""simple docstring"""
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Any ,_snake_case : np.ndarray ,_snake_case : Union[float, List[float]] ,_snake_case : Union[float, List[float]] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Union[str, Any] ,) -> np.ndarray:
"""simple docstring"""
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Any ,_snake_case : ImageInput ,_snake_case : Optional[bool] = None ,_snake_case : Optional[Dict[str, int]] = None ,_snake_case : PILImageResampling = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[float] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[str, TensorType]] = None ,_snake_case : bool = None ,_snake_case : ChannelDimension = ChannelDimension.FIRST ,**_snake_case : Dict ,) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ : List[str] = do_resize if do_resize is not None else self.do_resize
lowercase__ : Tuple = resample if resample is not None else self.resample
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Tuple = image_mean if image_mean is not None else self.image_mean
lowercase__ : str = image_std if image_std is not None else self.image_std
lowercase__ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : List[Any] = size if size is not None else self.size
lowercase__ : str = get_size_dict(_snake_case ,default_to_square=_snake_case )
lowercase__ : Union[str, Any] = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : List[str] = [convert_to_rgb(_snake_case ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : List[Any] = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowercase__ : Optional[Any] = [self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) for image in images]
if do_rescale:
lowercase__ : Tuple = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_normalize:
lowercase__ : Dict = [self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) for image in images]
lowercase__ : List[str] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
lowercase__ : Optional[Any] = BatchFeature(data={'''pixel_values''': images} ,tensor_type=_snake_case )
return encoded_outputs
| 16 |
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306 | 0 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
UpperCAmelCase_ : str = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
UpperCAmelCase_ : Any = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A (__a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE_ : str = numpy_to_pil(__a )
return images
def _A (__a ) -> List[str]:
"""simple docstring"""
if images.ndim == 3:
SCREAMING_SNAKE_CASE_ : Tuple = images[None, ...]
SCREAMING_SNAKE_CASE_ : Optional[Any] = (images * 2_55).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
SCREAMING_SNAKE_CASE_ : List[Any] = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
SCREAMING_SNAKE_CASE_ : str = [Image.fromarray(__a ) for image in images]
return pil_images
| 351 |
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : str = pad_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = max_length
SCREAMING_SNAKE_CASE_ : Dict = vocab
SCREAMING_SNAKE_CASE_ : Dict = merges
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Optional[Any] , **lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE_ : str = tokenizer.get_vocab()
return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , lowercase_ : Union[str, os.PathLike] , *lowercase_ : List[str] , **lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_)
return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : List[Any]):
'''simple docstring'''
return cls(**lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any] , lowercase_ : int = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.tf_tokenizer(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones_like(lowercase_)
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = pad_model_inputs(
lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id)
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
A : Dict = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( _snake_case : int , _snake_case : int ):
"""simple docstring"""
__a =old_name
if "patch_embed" in old_name:
__a , __a , __a =old_name.split('.' )
if layer == "0":
__a =old_name.replace('0' , 'convolution1' )
elif layer == "1":
__a =old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
__a =old_name.replace('3' , 'convolution2' )
else:
__a =old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d' , _snake_case ):
__a =r'\b\d{2}\b'
if bool(re.search(_snake_case , _snake_case ) ):
__a =re.search(r'\d\.\d\d.' , _snake_case ).group()
else:
__a =re.search(r'\d\.\d.' , _snake_case ).group()
if int(match[0] ) < 6:
__a =old_name.replace(_snake_case , '' )
__a =trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
__a ='intermediate_stages.' + trimmed_name
else:
__a =old_name.replace(_snake_case , '' )
if int(match[2] ) < num_meta4D_last_stage:
__a =trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
__a =str(int(match[2] ) - num_meta4D_last_stage )
__a =trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
__a =trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
__a =trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
__a =trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
__a =trimmed_name.replace('fc2' , 'linear_out' )
__a ='last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.' , _snake_case ):
__a =old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
__a =new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__a =new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__a =new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
__a =new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
__a =new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
__a =new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
__a ='efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__a =new_name.replace('norm' , 'layernorm' )
__a ='efficientformer.' + new_name
else:
__a ='efficientformer.encoder.' + new_name
return new_name
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Dict ):
"""simple docstring"""
for key in checkpoint.copy().keys():
__a =checkpoint.pop(_snake_case )
__a =val
return checkpoint
def UpperCamelCase_( ):
"""simple docstring"""
__a ='http://images.cocodataset.org/val2017/000000039769.jpg'
__a =Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
def UpperCamelCase_( _snake_case : Path , _snake_case : Path , _snake_case : Path , _snake_case : bool ):
"""simple docstring"""
__a =torch.load(_snake_case , map_location='cpu' )['model']
__a =EfficientFormerConfig.from_json_file(_snake_case )
__a =EfficientFormerForImageClassificationWithTeacher(_snake_case )
__a ='_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
__a =config.depths[-1] - config.num_metaad_blocks + 1
__a =convert_torch_checkpoint(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
model.eval()
__a ={
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
__a =prepare_img()
__a =256
__a =224
__a =EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
__a =processor(images=_snake_case , return_tensors='pt' ).pixel_values
# original processing pipeline
__a =Compose(
[
Resize(_snake_case , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(_snake_case ),
ToTensor(),
Normalize(_snake_case , _snake_case ),
] )
__a =image_transforms(_snake_case ).unsqueeze(0 )
assert torch.allclose(_snake_case , _snake_case )
__a =model(_snake_case )
__a =outputs.logits
__a =(1, 1000)
if "l1" in model_name:
__a =torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , _snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__a =torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , _snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__a =torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(_snake_case )
print(F'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add model' , use_temp_dir=_snake_case , )
processor.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add image processor' , use_temp_dir=_snake_case , )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 218 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = []
for part_id in partition_order:
__lowerCamelCase = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(UpperCamelCase__ ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(100 ).repartition(1 )
__lowerCamelCase = Spark(UpperCamelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(10 ).repartition(2 )
__lowerCamelCase = [1, 0]
__lowerCamelCase = _generate_iterable_examples(UpperCamelCase__ , UpperCamelCase__ ) # Reverse the partitions.
__lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase__ , UpperCamelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__lowerCamelCase , __lowerCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(10 ).repartition(1 )
__lowerCamelCase = SparkExamplesIterable(UpperCamelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(UpperCamelCase__ ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
__lowerCamelCase = lambda UpperCamelCase__ : x.reverse()
__lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase__ , [2, 1, 0] )
__lowerCamelCase = SparkExamplesIterable(UpperCamelCase__ ).shuffle_data_sources(UpperCamelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(UpperCamelCase__ ):
__lowerCamelCase , __lowerCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__lowerCamelCase = SparkExamplesIterable(UpperCamelCase__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(UpperCamelCase__ ):
__lowerCamelCase , __lowerCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__lowerCamelCase = SparkExamplesIterable(UpperCamelCase__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(UpperCamelCase__ ):
__lowerCamelCase , __lowerCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
__lowerCamelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowerCamelCase = spark.range(100 ).repartition(1 )
__lowerCamelCase = Spark(UpperCamelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 348 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = [False] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : int , UpperCamelCase__ : int ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__ , 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__ , 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 348 | 1 |
'''simple docstring'''
def lowercase ( __magic_name__ = 5000_0000 ):
'''simple docstring'''
UpperCAmelCase : Any = set()
UpperCAmelCase : Optional[Any] = int((limit - 24) ** (1 / 2) )
UpperCAmelCase : str = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCamelCase__ ) ) )
for primea in primes:
UpperCAmelCase : List[str] = primea * primea
for primea in primes:
UpperCAmelCase : int = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCAmelCase : Any = primea * primea * primea * primea
UpperCAmelCase : List[Any] = square + cube + tetr
if total >= limit:
break
ret.add(lowerCamelCase__ )
return len(lowerCamelCase__ )
if __name__ == "__main__":
print(F'{solution() = }')
| 311 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase__ = '''path-to-your-trained-model'''
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase__ = pipe.to(device)
# to channels last
lowerCAmelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase__ = torch.randn(2, 4, 6_4, 6_4)
lowerCAmelCase__ = torch.rand(1) * 9_9_9
lowerCAmelCase__ = torch.randn(2, 7_7, 7_6_8)
lowerCAmelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase__ = 6_6_6
lowerCAmelCase__ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase__ = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 130 | 0 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase__ = '''src/transformers'''
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase__ = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase__ = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCAmelCase__ = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase__ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase__ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase__ = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCAmelCase__ = re.compile(R'''^\s*try:''')
# Catches a line with else:
lowerCAmelCase__ = re.compile(R'''^\s*else:''')
def snake_case_ ( A_ : Dict ):
'''simple docstring'''
if _re_test_backend.search(A_ ) is None:
return None
_lowerCamelCase : int = [b[0] for b in _re_backend.findall(A_ )]
backends.sort()
return "_and_".join(A_ )
def snake_case_ ( A_ : Dict ):
'''simple docstring'''
with open(A_, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
_lowerCamelCase : Dict = f.readlines()
_lowerCamelCase : Tuple = 0
while line_index < len(A_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A_ ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCamelCase : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_lowerCamelCase : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A_ ):
_lowerCamelCase : str = _re_one_line_import_struct.search(A_ ).groups()[0]
_lowerCamelCase : int = re.findall('''\[([^\]]+)\]''', A_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_lowerCamelCase : Optional[int] = _re_import_struct_key_value.search(A_ )
if single_line_import_search is not None:
_lowerCamelCase : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(A_ ) > 0]
objects.extend(A_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_lowerCamelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCamelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_lowerCamelCase : Any = lines[line_index]
if _re_import_struct_add_one.search(A_ ) is not None:
objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] )
elif _re_import_struct_add_many.search(A_ ) is not None:
_lowerCamelCase : List[str] = _re_import_struct_add_many.search(A_ ).groups()[0].split(''', ''' )
_lowerCamelCase : Dict = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_between_brackets.search(A_ ) is not None:
_lowerCamelCase : Optional[int] = _re_between_brackets.search(A_ ).groups()[0].split(''', ''' )
_lowerCamelCase : Optional[int] = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_quote_object.search(A_ ) is not None:
objects.append(_re_quote_object.search(A_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_lowerCamelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCamelCase : Tuple = []
while (
line_index < len(A_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_lowerCamelCase : str = lines[line_index]
_lowerCamelCase : Any = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCamelCase : int = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(A_ ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCamelCase : Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_lowerCamelCase : Union[str, Any] = lines[line_index]
_lowerCamelCase : Optional[Any] = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowerCamelCase : Any = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case_ ( A_ : int, A_ : str ):
'''simple docstring'''
def find_duplicates(A_ : List[Any] ):
return [k for k, v in collections.Counter(A_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCamelCase : int = []
for key in import_dict_objects.keys():
_lowerCamelCase : Optional[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_lowerCamelCase : Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCamelCase : List[str] = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : str = []
for root, _, files in os.walk(A_ ):
if "__init__.py" in files:
_lowerCamelCase : Any = os.path.join(A_, '''__init__.py''' )
_lowerCamelCase : Union[str, Any] = parse_init(A_ )
if objects is not None:
_lowerCamelCase : str = analyze_results(*A_ )
if len(A_ ) > 0:
_lowerCamelCase : List[str] = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(A_ ) )
if len(A_ ) > 0:
raise ValueError('''\n\n'''.join(A_ ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = []
for path, directories, files in os.walk(A_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(A_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_lowerCamelCase : Tuple = str((Path(A_ ) / folder).relative_to(A_ ) )
_lowerCamelCase : Tuple = short_path.replace(os.path.sep, '''.''' )
submodules.append(A_ )
for fname in files:
if fname == "__init__.py":
continue
_lowerCamelCase : Tuple = str((Path(A_ ) / fname).relative_to(A_ ) )
_lowerCamelCase : Any = short_path.replace('''.py''', '''''' ).replace(os.path.sep, '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(A_ )
return submodules
lowerCAmelCase__ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = importlib.util.spec_from_file_location(
'''transformers''', os.path.join(A_, '''__init__.py''' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
_lowerCamelCase : Union[str, Any] = spec.loader.load_module()
_lowerCamelCase : List[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(A_ ) > 0:
_lowerCamelCase : str = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 175 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def snake_case_ ( A_ : int ):
'''simple docstring'''
if not isinstance(A_, A_ ):
_lowerCamelCase : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(A_ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(A_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 | 1 |
"""simple docstring"""
from collections import deque
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = process_name # process name
lowerCamelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase = arrival_time
lowerCamelCase = burst_time # remaining burst time
lowerCamelCase = 0 # total time of the process wait in ready queue
lowerCamelCase = 0 # time from arrival time to completion time
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , ):
"""simple docstring"""
# total number of mlfq's queues
lowerCamelCase = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase = time_slices
# unfinished process is in this ready_queue
lowerCamelCase = queue
# current time
lowerCamelCase = current_time
# finished process is in this sequence queue
lowerCamelCase = deque()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = []
for i in range(len(__lowerCamelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = []
for i in range(len(__lowerCamelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = []
for i in range(len(__lowerCamelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return [q.burst_time for q in queue]
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = deque() # sequence deque of finished process
while len(__lowerCamelCase ) != 0:
lowerCamelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__lowerCamelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase = 0
# set the process's turnaround time because it is finished
lowerCamelCase = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase = self.current_time
# add the process to queue that has finished queue
finished.append(__lowerCamelCase )
self.finish_queue.extend(__lowerCamelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__lowerCamelCase ) ):
lowerCamelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__lowerCamelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__lowerCamelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase = 0
# set the finish time
lowerCamelCase = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__lowerCamelCase )
self.finish_queue.extend(__lowerCamelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _lowerCAmelCase ( self ):
"""simple docstring"""
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCamelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase : List[str] = Process("""P1""", 0, 53)
lowerCAmelCase : List[str] = Process("""P2""", 0, 17)
lowerCAmelCase : Dict = Process("""P3""", 0, 68)
lowerCAmelCase : Any = Process("""P4""", 0, 24)
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : List[str] = [17, 25]
lowerCAmelCase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase : Dict = Process("""P1""", 0, 53)
lowerCAmelCase : Any = Process("""P2""", 0, 17)
lowerCAmelCase : Optional[Any] = Process("""P3""", 0, 68)
lowerCAmelCase : List[Any] = Process("""P4""", 0, 24)
lowerCAmelCase : List[str] = 3
lowerCAmelCase : Optional[Any] = [17, 25]
lowerCAmelCase : Any = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase : str = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 291 | import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class _snake_case ( _lowercase ):
lowerCamelCase__: Tuple = ["input_features"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any]=80 , __lowerCamelCase: Optional[Any]=1_60_00 , __lowerCamelCase: Any=1_60 , __lowerCamelCase: Optional[int]=30 , __lowerCamelCase: List[str]=4_00 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Union[str, Any]=False , **__lowerCamelCase: Dict , ) -> Any:
super().__init__(
feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : int = n_fft
__UpperCAmelCase : List[str] = hop_length
__UpperCAmelCase : Optional[Any] = chunk_length
__UpperCAmelCase : Union[str, Any] = chunk_length * sampling_rate
__UpperCAmelCase : Any = self.n_samples // hop_length
__UpperCAmelCase : Tuple = sampling_rate
__UpperCAmelCase : List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCamelCase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__lowerCamelCase , norm="slaney" , mel_scale="slaney" , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: np.array ) -> np.ndarray:
__UpperCAmelCase : List[Any] = spectrogram(
__lowerCamelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
__UpperCAmelCase : Union[str, Any] = log_spec[:, :-1]
__UpperCAmelCase : List[Any] = np.maximum(__lowerCamelCase , log_spec.max() - 8.0 )
__UpperCAmelCase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowerCamelCase ( __lowerCamelCase: List[np.ndarray] , __lowerCamelCase: List[np.ndarray] , __lowerCamelCase: float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
__UpperCAmelCase : Tuple = np.array(__lowerCamelCase , np.intaa )
__UpperCAmelCase : Dict = []
for vector, length in zip(__lowerCamelCase , attention_mask.sum(-1 ) ):
__UpperCAmelCase : Union[str, Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__UpperCAmelCase : Dict = padding_value
normed_input_values.append(__lowerCamelCase )
else:
__UpperCAmelCase : Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self: Dict , __lowerCamelCase: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[Union[str, TensorType]] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[str] = "max_length" , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , **__lowerCamelCase: Dict , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__UpperCAmelCase : List[Any] = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__UpperCAmelCase : Optional[int] = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase : Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : str = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase : Optional[Any] = [np.asarray([raw_speech] ).T]
__UpperCAmelCase : List[Any] = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
__UpperCAmelCase : List[str] = self.pad(
__lowerCamelCase , padding=__lowerCamelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__UpperCAmelCase : List[Any] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
__UpperCAmelCase : str = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
__UpperCAmelCase : Any = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
__UpperCAmelCase : Dict = [self._np_extract_fbank_features(__lowerCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCamelCase ):
__UpperCAmelCase : str = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in input_features]
else:
__UpperCAmelCase : List[str] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__UpperCAmelCase : int = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
__UpperCAmelCase : List[str] = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
def _lowerCamelCase ( self: str ) -> Dict[str, Any]:
__UpperCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 157 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
SCREAMING_SNAKE_CASE : str = """http://www.mocksite.com/file1.txt"""
SCREAMING_SNAKE_CASE : Tuple = """\"text\": [\"foo\", \"foo\"]"""
SCREAMING_SNAKE_CASE : int = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =200
lowerCamelCase__ ={'Content-Length': '100'}
lowerCamelCase__ ={}
def SCREAMING_SNAKE_CASE (self , **a_ ):
'''simple docstring'''
return [bytes(a_ , '''utf-8''' )]
def lowercase ( *_snake_case : Union[str, Any] , **_snake_case : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def lowercase ( _snake_case : str , _snake_case : List[str] , _snake_case : int ) ->Optional[int]:
"""simple docstring"""
import requests
monkeypatch.setattr(_snake_case , '''request''' , _snake_case )
__snake_case : Any = URL
if issubclass(_snake_case , _snake_case ):
__snake_case : Any = url
elif issubclass(_snake_case , _snake_case ):
__snake_case : List[Any] = [url]
elif issubclass(_snake_case , _snake_case ):
__snake_case : Optional[int] = {'''train''': url}
__snake_case : Union[str, Any] = '''dummy'''
__snake_case : str = '''downloads'''
__snake_case : Union[str, Any] = tmp_path
__snake_case : Tuple = DownloadConfig(
cache_dir=os.path.join(_snake_case , _snake_case ) , use_etag=_snake_case , )
__snake_case : Optional[int] = DownloadManager(dataset_name=_snake_case , download_config=_snake_case )
__snake_case : Union[str, Any] = dl_manager.download(_snake_case )
__snake_case : Union[str, Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_snake_case , _snake_case ):
__snake_case : Dict = [downloaded_paths]
__snake_case : Optional[int] = [urls]
elif isinstance(_snake_case , _snake_case ):
assert "train" in downloaded_paths.keys()
__snake_case : str = downloaded_paths.values()
__snake_case : List[Any] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_snake_case , _snake_case ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : Any = Path(_snake_case )
__snake_case : Dict = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : List[Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : int = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
__snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def lowercase ( _snake_case : Any , _snake_case : Tuple , _snake_case : List[Any] ) ->List[str]:
"""simple docstring"""
__snake_case : Optional[int] = str(_snake_case )
if issubclass(_snake_case , _snake_case ):
__snake_case : List[str] = filename
elif issubclass(_snake_case , _snake_case ):
__snake_case : Tuple = [filename]
elif issubclass(_snake_case , _snake_case ):
__snake_case : Tuple = {'''train''': filename}
__snake_case : Tuple = '''dummy'''
__snake_case : Optional[Any] = xz_file.parent
__snake_case : str = '''extracted'''
__snake_case : int = DownloadConfig(
cache_dir=_snake_case , use_etag=_snake_case , )
__snake_case : Dict = DownloadManager(dataset_name=_snake_case , download_config=_snake_case )
__snake_case : int = dl_manager.extract(_snake_case )
__snake_case : Tuple = paths
for extracted_paths in [extracted_paths]:
if isinstance(_snake_case , _snake_case ):
__snake_case : Optional[Any] = [extracted_paths]
__snake_case : Optional[Any] = [paths]
elif isinstance(_snake_case , _snake_case ):
assert "train" in extracted_paths.keys()
__snake_case : Dict = extracted_paths.values()
__snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_snake_case , _snake_case ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : List[Any] = Path(_snake_case )
__snake_case : List[str] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_snake_case , etag=_snake_case )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : List[Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowercase ( _snake_case : int , _snake_case : List[str] ) ->Optional[Any]:
"""simple docstring"""
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(_snake_case , start=1 ):
__snake_case : List[str] = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def lowercase ( _snake_case : int , _snake_case : List[Any] ) ->Dict:
"""simple docstring"""
__snake_case : List[str] = request.getfixturevalue(_snake_case )
__snake_case : Union[str, Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_snake_case ) , start=1 ):
_test_jsonl(_snake_case , _snake_case )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def lowercase ( _snake_case : Any , _snake_case : List[Any] ) ->str:
"""simple docstring"""
__snake_case : Dict = request.getfixturevalue(_snake_case )
__snake_case : str = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_snake_case ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_snake_case ) , start=1 ):
_test_jsonl(_snake_case , _snake_case )
assert num_tar == 1
assert num_jsonl == 2
def lowercase ( _snake_case : List[Any] ) ->Dict:
"""simple docstring"""
__snake_case : Optional[int] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_snake_case ) , start=1 ):
assert os.path.basename(_snake_case ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 24 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : str = seq_length
__snake_case : Any = is_training
__snake_case : Any = use_input_mask
__snake_case : str = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : int = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : str = num_labels
__snake_case : Dict = num_choices
__snake_case : Optional[int] = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Tuple = None
__snake_case : List[str] = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = DistilBertModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(a_ , a_ )
__snake_case : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[Any] = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = DistilBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.num_choices
__snake_case : Any = DistilBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[int] = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs
__snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = DistilBertModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = DistilBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__snake_case : List[str] = True
__snake_case : Tuple = model_class(config=a_ )
__snake_case : Any = self._prepare_for_class(a_ , a_ )
__snake_case : Dict = torch.jit.trace(
a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) )
__snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ )
loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case : List[Any] = model(a_ , attention_mask=a_ )[0]
__snake_case : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , a_ )
__snake_case : Optional[int] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 24 | 1 |
'''simple docstring'''
# Imports
import numpy as np
class A_ :
def __init__( self : str , snake_case_ : int=None , snake_case_ : Any=None , snake_case_ : str=None , snake_case_ : Tuple=None , snake_case_ : Any=None ):
self.set_matricies(red=snake_case_ , green=snake_case_ , blue=snake_case_ , red_edge=snake_case_ , nir=snake_case_ )
def lowercase ( self : List[Any] , snake_case_ : Dict=None , snake_case_ : int=None , snake_case_ : Any=None , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=None ):
if red is not None:
_UpperCAmelCase = red
if green is not None:
_UpperCAmelCase = green
if blue is not None:
_UpperCAmelCase = blue
if red_edge is not None:
_UpperCAmelCase = red_edge
if nir is not None:
_UpperCAmelCase = nir
return True
def lowercase ( self : Any , snake_case_ : Optional[int]="" , snake_case_ : Any=None , snake_case_ : Tuple=None , snake_case_ : Optional[Any]=None , snake_case_ : Dict=None , snake_case_ : Union[str, Any]=None ):
self.set_matricies(red=snake_case_ , green=snake_case_ , blue=snake_case_ , red_edge=snake_case_ , nir=snake_case_ )
_UpperCAmelCase = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def lowercase ( self : Tuple ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def lowercase ( self : str ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowercase ( self : Optional[Any] ):
return self.nir * (self.red / (self.green**2))
def lowercase ( self : Tuple ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowercase ( self : Optional[int] ):
return (self.nir - self.red) / (self.nir + self.red)
def lowercase ( self : List[Any] ):
return (self.nir - self.blue) / (self.nir + self.blue)
def lowercase ( self : List[str] ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowercase ( self : str ):
return (self.nir - self.green) / (self.nir + self.green)
def lowercase ( self : str ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowercase ( self : str ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowercase ( self : int ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowercase ( self : List[str] ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowercase ( self : int , snake_case_ : List[Any]=0.0_8 , snake_case_ : Any=1.2_2 , snake_case_ : List[str]=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowercase ( self : str ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowercase ( self : Optional[Any] ):
return (self.nir / self.green) - 1
def lowercase ( self : List[str] ):
return (self.nir / self.redEdge) - 1
def lowercase ( self : str ):
return (self.red - self.blue) / self.red
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def lowercase ( self : Dict ):
return self.nir - self.green
def lowercase ( self : List[Any] ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def lowercase ( self : Optional[int] , snake_case_ : Optional[int]=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def lowercase ( self : Optional[int] , snake_case_ : int=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowercase ( self : Optional[int] ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def lowercase ( self : Any , snake_case_ : Tuple=None , snake_case_ : Tuple=None ):
return (self.nir - b) / (a * self.red)
def lowercase ( self : Optional[Any] ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowercase ( self : List[Any] ):
return (self.red + self.green + self.blue) / 3_0.5
def lowercase ( self : str ):
return self.nir / self.red
def lowercase ( self : Union[str, Any] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def lowercase ( self : Optional[int] ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowercase ( self : Dict ):
return self.green / (self.nir + self.red + self.green)
def lowercase ( self : List[Any] ):
return self.nir / (self.nir + self.red + self.green)
def lowercase ( self : Union[str, Any] ):
return self.red / (self.nir + self.red + self.green)
def lowercase ( self : List[str] ):
return (self.green - self.red) / (self.green + self.red)
def lowercase ( self : List[str] ):
return (self.red - self.green) / (self.red + self.green)
def lowercase ( self : Tuple ):
_UpperCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
_UpperCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def lowercase ( self : List[Any] ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowercase ( self : Tuple ):
return self.nir / self.red
def lowercase ( self : Union[str, Any] ):
return (self.ndvi() + 0.5) ** (1 / 2)
def lowercase ( self : Any ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 22 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = _distribute_shards(**__lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = _split_gen_kwargs(__lowercase , __lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> List[Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__lowercase ):
_number_of_shards_in_gen_kwargs(__lowercase )
else:
_UpperCAmelCase = _number_of_shards_in_gen_kwargs(__lowercase )
assert out == expected
| 22 | 1 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , )
def snake_case_ ( self , UpperCamelCase__ = "auto" ) -> Any:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
self.enable_attention_slicing(UpperCamelCase__ )
@torch.no_grad()
def __call__( self , UpperCamelCase__ , UpperCamelCase__ = 512 , UpperCamelCase__ = 512 , UpperCamelCase__ = 50 , UpperCamelCase__ = 7.5 , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = 1
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = len(UpperCamelCase__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(UpperCamelCase__ )}.''' )
# get prompt text embeddings
A_ = self.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
A_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
A_ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ = text_embeddings.shape
A_ = text_embeddings.repeat(1 , UpperCamelCase__ , 1 )
A_ = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCamelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ = 42
if negative_prompt is None:
A_ = [""""""]
elif type(UpperCamelCase__ ) is not type(UpperCamelCase__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase__ )} !='''
f''' {type(UpperCamelCase__ )}.''' )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = [negative_prompt]
elif batch_size != len(UpperCamelCase__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
A_ = negative_prompt
A_ = text_input_ids.shape[-1]
A_ = self.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" , )
A_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ = uncond_embeddings.shape[1]
A_ = uncond_embeddings.repeat(UpperCamelCase__ , UpperCamelCase__ , 1 )
A_ = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCamelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ = torch.randn(
UpperCamelCase__ , generator=UpperCamelCase__ , device="""cpu""" , dtype=UpperCamelCase__ ).to(self.device )
A_ = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device="""cpu""" , dtype=UpperCamelCase__ ).to(
self.device )
else:
A_ = torch.randn(
UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
A_ = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
A_ = latents_reference.to(self.device )
A_ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ = 0 if dx < 0 else dx
A_ = 0 if dy < 0 else dy
A_ = max(-dx , 0 )
A_ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ = {}
if accepts_eta:
A_ = eta
for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
A_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
A_ = self.unet(UpperCamelCase__ , UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ = noise_pred.chunk(2 )
A_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A_ = 1 / 0.18215 * latents
A_ = self.vae.decode(UpperCamelCase__ ).sample
A_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ = self.feature_extractor(self.numpy_to_pil(UpperCamelCase__ ) , return_tensors="""pt""" ).to(
self.device )
A_ , A_ = self.safety_checker(
images=UpperCamelCase__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ = None
if output_type == "pil":
A_ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=UpperCamelCase__ , nsfw_content_detected=UpperCamelCase__ )
| 101 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class A__ ( _snake_case , unittest.TestCase ):
lowercase = ReformerTokenizer
lowercase = ReformerTokenizerFast
lowercase = True
lowercase = False
lowercase = True
def snake_case_ ( self ) -> str:
'''simple docstring'''
super().setUp()
A_ = ReformerTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = """<s>"""
A_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(UpperCamelCase__ ) , 1000 )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = """I was born in 92000, and this is falsé."""
A_ = tokenizer.tokenize(UpperCamelCase__ )
A_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.get_rust_tokenizer()
A_ = tokenizer.encode(UpperCamelCase__ )
A_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__=15 ) -> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# Simple input
A_ = """This is a simple input"""
A_ = ["""This is a simple input 1""", """This is a simple input 2"""]
A_ = ("""This is a simple input""", """This is a pair""")
A_ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = ReformerTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
A_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [285, 46, 10, 170, 382] , )
A_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = """Hello World!"""
A_ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
A_ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@require_torch
@slow
def snake_case_ ( self ) -> str:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
A_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
A_ = """ """.join(UpperCamelCase__ )
A_ = self.big_tokenizer.encode_plus(UpperCamelCase__ , return_tensors="""pt""" )
A_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
A_ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
A_ = encoded_sequence["""input_ids"""].shape
A_ = ReformerModel(UpperCamelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCamelCase__ )
model(**UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
# fmt: off
A_ = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
A_ = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=UpperCamelCase__ , sequences=UpperCamelCase__ , )
| 101 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a = ["""note_seq"""]
def __init__( self : Optional[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""note_seq"""] )
@classmethod
def lowerCamelCase__ ( cls : Optional[Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
@classmethod
def lowerCamelCase__ ( cls : Any , *UpperCamelCase : Any , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
| 115 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase : Union[str, Any] = [8, 5, 9, 7]
_lowerCAmelCase : Union[str, Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase : Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , snake_case__ : list[int] , snake_case__ : list[list[int]] , snake_case__ : list[list[int]] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = claim_vector
UpperCAmelCase__ : Tuple = allocated_resources_table
UpperCAmelCase__ : List[Any] = maximum_claim_table
def __a ( self : Optional[int] ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __a ( self : Tuple ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __a ( self : Optional[int] ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __a ( self : Optional[Any] ):
'''simple docstring'''
return {self.__need().index(snake_case__ ): i for i in self.__need()}
def __a ( self : Union[str, Any] , **snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.__need()
UpperCAmelCase__ : List[str] = self.__allocated_resources_table
UpperCAmelCase__ : List[Any] = self.__available_resources()
UpperCAmelCase__ : Any = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 5_0 + "\n" )
while need_list:
UpperCAmelCase__ : Dict = False
for each_need in need_list:
UpperCAmelCase__ : str = True
for index, need in enumerate(snake_case__ ):
if need > available_resources[index]:
UpperCAmelCase__ : Tuple = False
break
if execution:
UpperCAmelCase__ : Optional[Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCAmelCase__ : Union[str, Any] = original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(snake_case__ )
# update available/freed resources stack
UpperCAmelCase__ : int = np.array(snake_case__ ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(snake_case__ ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __a ( self : Dict ):
'''simple docstring'''
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(snake_case__ ) + 1}'
+ " ".join(f'{it:>8}' for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(snake_case__ ) + 1}'
+ " ".join(f'{it:>8}' for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(snake_case__ ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(snake_case__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss
UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy()
UpperCAmelCase__ : List[Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 298 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class _snake_case :
snake_case__ = field(
metadata={"help": "The output directory where the model will be written."} , )
snake_case__ = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} , )
snake_case__ = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} , )
snake_case__ = field(
default=_UpperCamelCase , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
snake_case__ = field(
default=_UpperCamelCase , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = HfArgumentParser((ModelArguments,) )
((__lowerCamelCase ) , ) : Optional[Any] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__lowerCamelCase : List[str] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__lowerCamelCase : Dict = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__lowerCamelCase : str = True
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : int = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=_lowerCamelCase , decoder_config=_lowerCamelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__lowerCamelCase : Tuple = decoder_config.decoder_start_token_id
__lowerCamelCase : Union[str, Any] = decoder_config.pad_token_id
if decoder_start_token_id is None:
__lowerCamelCase : Dict = decoder_config.bos_token_id
if pad_token_id is None:
__lowerCamelCase : List[Any] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__lowerCamelCase : Union[str, Any] = decoder_config.eos_token_id
__lowerCamelCase : Dict = decoder_start_token_id
__lowerCamelCase : Dict = pad_token_id
__lowerCamelCase : List[Any] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__lowerCamelCase : Dict = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__lowerCamelCase : str = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main() | 135 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase : List[str] = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 107 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
a : Any = logging.get_logger(__name__)
a : int = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class UpperCamelCase_ ( lowerCAmelCase_ ):
def __init__( self , A=None , A=None , *A , **A ) -> Union[str, Any]:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
UpperCAmelCase : Union[str, Any] = self.model.config
else:
UpperCAmelCase : List[Any] = config
UpperCAmelCase : str = data_args
UpperCAmelCase : str = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
UpperCAmelCase : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
UpperCAmelCase : List[Any] = label_smoothed_nll_loss
def _lowercase( self , A ) -> Tuple:
if self.optimizer is None:
UpperCAmelCase : Optional[int] = ["""bias""", """LayerNorm.weight"""]
UpperCAmelCase : Dict = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
UpperCAmelCase : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
UpperCAmelCase : Optional[int] = Adafactor
UpperCAmelCase : List[str] = {"""scale_parameter""": False, """relative_step""": False}
else:
UpperCAmelCase : Tuple = AdamW
UpperCAmelCase : Tuple = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
UpperCAmelCase : Union[str, Any] = self.args.learning_rate
if self.sharded_ddp:
UpperCAmelCase : Dict = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
UpperCAmelCase : List[str] = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
UpperCAmelCase : List[Any] = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def _lowercase( self , A ) -> List[str]:
UpperCAmelCase : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
UpperCAmelCase : int = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
UpperCAmelCase : str = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
UpperCAmelCase : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def _lowercase( self ) -> List[Any]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _lowercase( self , A , A , A ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
UpperCAmelCase : Dict = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
UpperCAmelCase : Tuple = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
UpperCAmelCase , UpperCAmelCase : Any = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
UpperCAmelCase : int = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
UpperCAmelCase : Dict = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
UpperCAmelCase , UpperCAmelCase : int = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _lowercase( self , A , A ) -> Tuple:
UpperCAmelCase : Any = inputs.pop("""labels""" )
UpperCAmelCase , UpperCAmelCase : str = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def _lowercase( self , A , A , A , A = None , ) -> List[Any]:
UpperCAmelCase : Dict = self._prepare_inputs(__lowerCAmelCase )
UpperCAmelCase : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
UpperCAmelCase : Union[str, Any] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase : str = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
UpperCAmelCase : int = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
UpperCAmelCase , UpperCAmelCase : List[Any] = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
UpperCAmelCase : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase : Optional[Any] = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def _lowercase( self , A , A ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
UpperCAmelCase : Dict = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
UpperCAmelCase : int = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
UpperCAmelCase : Dict = tensor
return padded_tensor
| 358 |
'''simple docstring'''
import numpy as np
class UpperCamelCase_ :
def __init__( self ) -> int:
UpperCAmelCase : str = (0, 0)
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Any = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Optional[int] = 0
def __eq__( self , A ) -> Optional[Any]:
return self.position == cell.position
def _lowercase( self ) -> Tuple:
print(self.position )
class UpperCamelCase_ :
def __init__( self , A=(5, 5) ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = np.zeros(A )
UpperCAmelCase : int = world_size[0]
UpperCAmelCase : List[str] = world_size[1]
def _lowercase( self ) -> List[Any]:
print(self.w )
def _lowercase( self , A ) -> Dict:
UpperCAmelCase : Optional[Any] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCAmelCase : List[Any] = cell.position[0]
UpperCAmelCase : Union[str, Any] = cell.position[1]
UpperCAmelCase : Optional[int] = []
for n in neughbour_cord:
UpperCAmelCase : Any = current_x + n[0]
UpperCAmelCase : Tuple = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCAmelCase : str = Cell()
UpperCAmelCase : List[str] = (x, y)
UpperCAmelCase : Dict = cell
neighbours.append(A )
return neighbours
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int:
UpperCAmelCase : List[Any] = []
UpperCAmelCase : Optional[int] = []
_open.append(_lowercase )
while _open:
UpperCAmelCase : Any = np.argmin([n.f for n in _open] )
UpperCAmelCase : Optional[int] = _open[min_f]
_closed.append(_open.pop(_lowercase ) )
if current == goal:
break
for n in world.get_neigbours(_lowercase ):
for c in _closed:
if c == n:
continue
UpperCAmelCase : List[str] = current.g + 1
UpperCAmelCase , UpperCAmelCase : List[str] = n.position
UpperCAmelCase , UpperCAmelCase : Dict = goal.position
UpperCAmelCase : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
UpperCAmelCase : Dict = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_lowercase )
UpperCAmelCase : Dict = []
while current.parent is not None:
path.append(current.position )
UpperCAmelCase : Optional[int] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
a : List[str] = Gridworld()
# Start position and goal
a : Optional[int] = Cell()
a : Optional[Any] = (0, 0)
a : Optional[Any] = Cell()
a : str = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
a : List[Any] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
a : Any = 1
print(world.w)
| 338 | 0 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
UpperCAmelCase_ = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
UpperCAmelCase_ = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = set()
__lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase = char
__lowerCamelCase = set(__a )
return pairs
class lowerCamelCase__( A__):
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Dict , UpperCamelCase_: int="<s>" , UpperCamelCase_: Any="</s>" , UpperCamelCase_: int="</s>" , UpperCamelCase_: Optional[int]="<s>" , UpperCamelCase_: Optional[Any]="<unk>" , UpperCamelCase_: int="<pad>" , UpperCamelCase_: Union[str, Any]="<mask>" , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = vocab_file
__lowerCamelCase = merges_file
__lowerCamelCase = {}
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = 3
self.add_from_file(UpperCamelCase_ )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
with open(UpperCamelCase_ , encoding="""utf-8""" ) as merges_handle:
__lowerCamelCase = merges_handle.read().split("""\n""" )[:-1]
__lowerCamelCase = [tuple(merge.split()[:-1] ) for merge in merges]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {}
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] = None , UpperCamelCase_: Optional[Any] = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self: List[str] ):
return len(self.encoder )
def lowerCAmelCase__ ( self: Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ):
if token in self.cache:
return self.cache[token]
__lowerCamelCase = tuple(UpperCamelCase_ )
__lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__lowerCamelCase = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__lowerCamelCase = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase = bigram
__lowerCamelCase = []
__lowerCamelCase = 0
while i < len(UpperCamelCase_ ):
try:
__lowerCamelCase = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase = tuple(UpperCamelCase_ )
__lowerCamelCase = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__lowerCamelCase = get_pairs(UpperCamelCase_ )
__lowerCamelCase = '''@@ '''.join(UpperCamelCase_ )
__lowerCamelCase = word[:-4]
__lowerCamelCase = word
return word
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = []
__lowerCamelCase = re.findall(r"""\S+\n?""" , UpperCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(""" """ ) ) )
return split_tokens
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[int] ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[Any] ):
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict ):
__lowerCamelCase = ''' '''.join(UpperCamelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.merges_file , UpperCamelCase_ )
return out_vocab_file, out_merge_file
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Tuple ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
try:
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(UpperCamelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
__lowerCamelCase = f.readlines()
for lineTmp in lines:
__lowerCamelCase = lineTmp.strip()
__lowerCamelCase = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected \'<token> <cnt>\'""" )
__lowerCamelCase = line[:idx]
__lowerCamelCase = len(self.encoder )
| 12 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset) | 97 | 0 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( __snake_case ):
A__ : Any = ['image_processor', 'tokenizer']
A__ : List[Any] = 'FlavaImageProcessor'
A__ : Tuple = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Any:
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCAmelCase , )
__a = kwargs.pop('feature_extractor' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
__a = self.image_processor
def __call__( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[str]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__a = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
if images is not None:
__a = self.image_processor(
_lowerCAmelCase , return_image_mask=_lowerCAmelCase , return_codebook_pixels=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
if text is not None and images is not None:
encoding.update(_lowerCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCAmelCase , )
return self.image_processor_class
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCAmelCase , )
return self.image_processor
| 354 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[int] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
__A = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
__A = {"vinai/bartpho-syllable": 1_0_2_4}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = VOCAB_FILES_NAMES
_UpperCAmelCase :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase = None , **_UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
lowercase__: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
lowercase__: Tuple = vocab_file
lowercase__: Tuple = monolingual_vocab_file
lowercase__: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__: List[Any] = {}
lowercase__: Optional[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
lowercase__: Union[str, Any] = cnt
cnt += 1
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
lowercase__: int = line.strip().split()[0]
lowercase__: str = len(self.fairseq_tokens_to_ids )
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
lowercase__: Optional[int] = len(self.fairseq_tokens_to_ids )
lowercase__: Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
lowercase__: Dict = self.__dict__.copy()
lowercase__: int = None
lowercase__: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _UpperCAmelCase ):
lowercase__: List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__: Optional[int] = {}
lowercase__: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: int = [self.cls_token_id]
lowercase__: int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: str = [self.sep_token_id]
lowercase__: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.fairseq_ids_to_tokens )
def _snake_case ( self ):
lowercase__: Optional[Any] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , _UpperCAmelCase ):
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def _snake_case ( self , _UpperCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _snake_case ( self , _UpperCAmelCase ):
return self.fairseq_ids_to_tokens[index]
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[Any] = "".join(__lowerCamelCase ).replace(__lowerCamelCase , ''' ''' ).strip()
return out_string
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__: Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: Optional[int] = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , '''wb''' ) as fi:
lowercase__: int = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(__lowerCamelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 177 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 359 |
"""simple docstring"""
import sys
__snake_case : List[Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _lowercase ( __snake_case ) -> int:
__lowerCAmelCase : int = 1
for digit in s:
product *= int(__snake_case )
return product
def _lowercase ( __snake_case = N ) -> int:
__lowerCAmelCase : Optional[Any] = -sys.maxsize - 1
__lowerCAmelCase : Union[str, Any] = n[:13]
__lowerCAmelCase : Dict = 13
while cur_index < len(__snake_case ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__lowerCAmelCase : List[str] = substr[1:] + n[cur_index]
cur_index += 1
else:
__lowerCAmelCase : Dict = max(__snake_case ,str_eval(__snake_case ) )
__lowerCAmelCase : Optional[int] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""") | 58 | 0 |
from __future__ import annotations
class _snake_case :
def __init__( self , _a , _a ):
__magic_name__ , __magic_name__ : Dict = text, pattern
__magic_name__ , __magic_name__ : int = len(_a ), len(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE ( self , _a ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE ( self ):
# searches pattern in text and returns index positions
__magic_name__ : Optional[int] = []
for i in range(self.textLen - self.patLen + 1 ):
__magic_name__ : Any = self.mismatch_in_text(_a )
if mismatch_index == -1:
positions.append(_a )
else:
__magic_name__ : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] )
__magic_name__ : List[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
snake_case : Any = "ABAABA"
snake_case : List[str] = "AB"
snake_case : Tuple = BoyerMooreSearch(text, pattern)
snake_case : str = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 281 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
snake_case : Dict = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
snake_case : Union[str, Any] = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : int = char
__magic_name__ : List[str] = set(_snake_case )
return pairs
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , **_a , ):
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , **_a , )
__magic_name__ : Dict = vocab_file
__magic_name__ : Tuple = merges_file
__magic_name__ : List[Any] = {}
__magic_name__ : List[Any] = 0
__magic_name__ : Tuple = 1
__magic_name__ : int = 2
__magic_name__ : Union[str, Any] = 3
self.add_from_file(_a )
__magic_name__ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(_a , encoding="utf-8" ) as merges_handle:
__magic_name__ : List[str] = merges_handle.read().split("\n" )[:-1]
__magic_name__ : Union[str, Any] = [tuple(merge.split()[:-1] ) for merge in merges]
__magic_name__ : Union[str, Any] = dict(zip(_a , range(len(_a ) ) ) )
__magic_name__ : Optional[int] = {}
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
__magic_name__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _a ):
if token in self.cache:
return self.cache[token]
__magic_name__ : List[Any] = tuple(_a )
__magic_name__ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__magic_name__ : Any = get_pairs(_a )
if not pairs:
return token
while True:
__magic_name__ : str = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ , __magic_name__ : List[str] = bigram
__magic_name__ : List[str] = []
__magic_name__ : List[str] = 0
while i < len(_a ):
try:
__magic_name__ : Any = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : Union[str, Any] = tuple(_a )
__magic_name__ : Optional[int] = new_word
if len(_a ) == 1:
break
else:
__magic_name__ : List[Any] = get_pairs(_a )
__magic_name__ : Optional[int] = "@@ ".join(_a )
__magic_name__ : Tuple = word[:-4]
__magic_name__ : str = word
return word
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = []
__magic_name__ : Dict = re.findall(r"\S+\n?" , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(" " ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.decoder.get(_a , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = " ".join(_a ).replace("@@ " , "" ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Optional[int] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__magic_name__ : Union[str, Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
if os.path.abspath(self.merges_file ) != os.path.abspath(_a ):
copyfile(self.merges_file , _a )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE ( self , _a ):
if isinstance(_a , _a ):
try:
with open(_a , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
__magic_name__ : List[Any] = f.readlines()
for lineTmp in lines:
__magic_name__ : Optional[Any] = lineTmp.strip()
__magic_name__ : Union[str, Any] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
__magic_name__ : Optional[int] = line[:idx]
__magic_name__ : Dict = len(self.encoder )
| 281 | 1 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__A =TypeVar('''KT''')
__A =TypeVar('''VT''')
class _SCREAMING_SNAKE_CASE ( Generic[KT, VT] ):
def __init__( self , lowercase = "root" , lowercase = None ) -> Optional[int]:
lowerCamelCase_ = key
lowerCamelCase_ = value
lowerCamelCase_ = []
def __repr__( self ) -> str:
return f'Node({self.key}: {self.value})'
@property
def SCREAMING_SNAKE_CASE_( self ) -> int:
return len(self.forward )
class _SCREAMING_SNAKE_CASE ( Generic[KT, VT] ):
def __init__( self , lowercase = 0.5 , lowercase = 16 ) -> Union[str, Any]:
lowerCamelCase_ = Node[KT, VT]()
lowerCamelCase_ = 0
lowerCamelCase_ = p
lowerCamelCase_ = max_level
def __str__( self ) -> str:
lowerCamelCase_ = list(self )
if len(lowercase ) == 0:
return f'SkipList(level={self.level})'
lowerCamelCase_ = max((len(str(lowercase ) ) for item in items) , default=4 )
lowerCamelCase_ = max(lowercase , 4 ) + 4
lowerCamelCase_ = self.head
lowerCamelCase_ = []
lowerCamelCase_ = node.forward.copy()
lines.append(f'[{node.key}]'.ljust(lowercase , "-" ) + "* " * len(lowercase ) )
lines.append(" " * label_size + "| " * len(lowercase ) )
while len(node.forward ) != 0:
lowerCamelCase_ = node.forward[0]
lines.append(
f'[{node.key}]'.ljust(lowercase , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(lowercase ) )
lowerCamelCase_ = node.forward
lines.append("None".ljust(lowercase ) + "* " * len(lowercase ) )
return f'SkipList(level={self.level})\n' + "\n".join(lowercase )
def __iter__( self ) -> str:
lowerCamelCase_ = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
lowerCamelCase_ = node.forward[0]
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
lowerCamelCase_ = []
lowerCamelCase_ = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
lowerCamelCase_ = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
lowerCamelCase_ , lowerCamelCase_ = self._locate_node(lowercase )
if node is not None:
for i, update_node in enumerate(lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
lowerCamelCase_ = node.forward[i]
else:
lowerCamelCase_ = update_node.forward[:i]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ = self._locate_node(lowercase )
if node is not None:
lowerCamelCase_ = value
else:
lowerCamelCase_ = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , lowercase ):
update_vector.append(self.head )
lowerCamelCase_ = level
lowerCamelCase_ = Node(lowercase , lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowercase )
else:
lowerCamelCase_ = new_node
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> VT | None:
lowerCamelCase_ , lowerCamelCase_ = self._locate_node(lowercase )
if node is not None:
return node.value
return None
def lowerCamelCase_ ( ):
lowerCamelCase_ = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 1_2 )
skip_list.insert("Key3" , 4_1 )
skip_list.insert("Key4" , -1_9 )
lowerCamelCase_ = skip_list.head
lowerCamelCase_ = {}
while node.level != 0:
lowerCamelCase_ = node.forward[0]
lowerCamelCase_ = node.value
assert len(lowerCamelCase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def lowerCamelCase_ ( ):
lowerCamelCase_ = SkipList()
skip_list.insert("Key1" , 1_0 )
skip_list.insert("Key1" , 1_2 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 1_0 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 1_0 )
lowerCamelCase_ = skip_list.head
lowerCamelCase_ = {}
while node.level != 0:
lowerCamelCase_ = node.forward[0]
lowerCamelCase_ = node.value
if len(lowerCamelCase__ ) != 4:
print()
assert len(lowerCamelCase__ ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def lowerCamelCase_ ( ):
lowerCamelCase_ = SkipList()
assert skip_list.find("Some key" ) is None
def lowerCamelCase_ ( ):
lowerCamelCase_ = SkipList()
skip_list.insert("Key2" , 2_0 )
assert skip_list.find("Key2" ) == 2_0
skip_list.insert("Some Key" , 1_0 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 1_3 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 1_0
assert skip_list.find("V" ) == 1_3
def lowerCamelCase_ ( ):
lowerCamelCase_ = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def lowerCamelCase_ ( ):
lowerCamelCase_ = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def lowerCamelCase_ ( ):
lowerCamelCase_ = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 1_4
assert skip_list.find("Key1" ) == 1_2
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 1_2
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def lowerCamelCase_ ( ):
lowerCamelCase_ = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4_2 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("X" )
def traverse_keys(lowerCamelCase__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(lowerCamelCase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowerCamelCase_ ( ):
def is_sorted(lowerCamelCase__ ):
return all(next_item >= item for item, next_item in zip(lowerCamelCase__ , lst[1:] ) )
lowerCamelCase_ = SkipList()
for i in range(1_0 ):
skip_list.insert(lowerCamelCase__ , lowerCamelCase__ )
assert is_sorted(list(lowerCamelCase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(lowerCamelCase__ ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowerCamelCase_ ( ):
lowerCamelCase_ = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 47 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__A =True
except ImportError:
__A =False
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_ ( lowerCamelCase__ ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
lowerCamelCase_ = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=lowercase , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=lowercase , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=lowercase )
def __init__( self , lowercase , lowercase , lowercase=None , *lowercase ) -> List[str]:
lowerCamelCase_ = testing
lowerCamelCase_ = testing_file
lowerCamelCase_ = path
def SCREAMING_SNAKE_CASE_( self ) -> str:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCamelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(lowercase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
lowerCamelCase_ = (
Path(lowercase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCamelCase_ = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowercase ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
lowerCamelCase_ = json.load(lowercase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowercase , extra_context=lowercase , )
lowerCamelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
lowerCamelCase_ = json.load(lowercase )
lowerCamelCase_ = configuration["lowercase_modelname"]
lowerCamelCase_ = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
lowerCamelCase_ = "PyTorch" in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = "TensorFlow" in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = "Flax" in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(lowercase , exist_ok=lowercase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=lowercase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , "w" ):
pass
shutil.move(
f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , )
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(lowercase ):
with open(lowercase , "r" ) as f:
lowerCamelCase_ = f.readlines()
with open(lowercase , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowercase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase , lowercase , lowercase ):
# Create temp file
lowerCamelCase_ , lowerCamelCase_ = mkstemp()
lowerCamelCase_ = False
with fdopen(lowercase , "w" ) as new_file:
with open(lowercase ) as old_file:
for line in old_file:
new_file.write(lowercase )
if line_to_copy_below in line:
lowerCamelCase_ = True
for line_to_copy in lines_to_copy:
new_file.write(lowercase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(lowercase , lowercase )
# Remove original file
remove(lowercase )
# Move new file
move(lowercase , lowercase )
def skip_units(lowercase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase ):
with open(lowercase ) as datafile:
lowerCamelCase_ = []
lowerCamelCase_ = False
lowerCamelCase_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCamelCase_ = line.split("\"" )[1]
lowerCamelCase_ = skip_units(lowercase )
elif "# Below: " in line and "##" not in line:
lowerCamelCase_ = line.split("\"" )[1]
lowerCamelCase_ = skip_units(lowercase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowercase , lowercase , lowercase )
lowerCamelCase_ = []
elif "# Replace with" in line and "##" not in line:
lowerCamelCase_ = []
elif "##" not in line:
lines_to_copy.append(lowercase )
remove(lowercase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(lowercase )
| 47 | 1 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_SCREAMING_SNAKE_CASE : Union[str, Any] = 20_48
_SCREAMING_SNAKE_CASE : Tuple = 40_96
_SCREAMING_SNAKE_CASE : Tuple = 42
_SCREAMING_SNAKE_CASE : Optional[Any] = os.environ.pop("PROCESS_TRAIN", "false")
_SCREAMING_SNAKE_CASE : List[Any] = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
def choose_first(UpperCamelCase_ ,UpperCamelCase_=False ):
assert isinstance(A__ ,A__ )
if len(A__ ) == 1:
snake_case = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
snake_case = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
snake_case = {'''id''': example['''id''']}
snake_case = example['''annotations''']
snake_case = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
snake_case = ['''yes'''] if 1 in yes_no_answer else ['''no''']
snake_case = snake_case = []
snake_case = snake_case = []
snake_case = ['''<cls>''']
else:
snake_case = ['''short''']
snake_case = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
snake_case = ['''long''']
snake_case = choose_first(annotation['''long_answer'''] ,is_long_answer=A__ )
snake_case = []
answer.update(A__ )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
snake_case = True
else:
snake_case = False
snake_case = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] ,A__ ) for k in cols ):
raise ValueError('''Issue in ID''' ,example['''id'''] )
return answer
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=False ):
"""simple docstring"""
snake_case = _get_single_answer(A__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case = example['''document''']['''tokens''']
snake_case = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(A__ ),
"answer": {
"start_token": -1_00, # ignore index in cross-entropy
"end_token": -1_00, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
snake_case = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
snake_case = example['''document''']['''tokens''']
snake_case = answer['''start_token''']
snake_case = answer['''end_token''']
snake_case = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
snake_case = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
snake_case = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
snake_case = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
snake_case = ''' '''.join([old[i] for i in range(len(A__ ) ) if not is_html[i]] )
if new != old:
print('''ID:''' ,example['''id'''] )
print('''New:''' ,A__ ,end='''\n''' )
print('''Old:''' ,A__ ,end='''\n\n''' )
return {
"context": " ".join(A__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=20_48 ,UpperCamelCase_=40_96 ,UpperCamelCase_=True ):
"""simple docstring"""
snake_case = get_context_and_ans(A__ ,assertion=A__ )
snake_case = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
snake_case = tokenizer(example['''question''']['''text'''] ,out['''context'''] ).input_ids
snake_case = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case = []
snake_case = []
snake_case = input_ids[:q_len]
snake_case = range(A__ ,len(A__ ) ,max_length - doc_stride )
for i in doc_start_indices:
snake_case = i + max_length - q_len
snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_00] * len(A__ ),
"end_token": [-1_00] * len(A__ ),
"category": category,
},
}
snake_case = out['''context'''].split()
snake_case = splitted_context[answer['''end_token''']]
snake_case = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) ,add_special_tokens=A__ ,).input_ids )
snake_case = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) ,add_special_tokens=A__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
snake_case = len(tokenizer(A__ ,add_special_tokens=A__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
snake_case = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
snake_case = answer['''start_token''']
snake_case = answer['''end_token''']
if assertion:
snake_case = tokenizer.decode(A__ )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' ,answer['''span'''] )
print('''NEW:''' ,A__ ,end='''\n\n''' )
if len(A__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
snake_case = input_ids[:q_len]
snake_case = range(A__ ,len(A__ ) ,max_length - doc_stride )
snake_case = []
snake_case = []
snake_case = []
snake_case = [] # null, yes, no, long, short
for i in doc_start_indices:
snake_case = i + max_length - q_len
snake_case = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
snake_case = start_token - i + q_len
snake_case = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
snake_case = -1_00
snake_case = -1_00
answers_category.append('''null''' )
snake_case = inputs[-1][start_token : end_token + 1]
answers_start_token.append(A__ )
answers_end_token.append(A__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' ,example['''id'''] )
print('''New:''' ,tokenizer.decode(A__ ) )
print('''Old:''' ,tokenizer.decode(A__ ) ,end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=20_48 ,UpperCamelCase_=40_96 ,UpperCamelCase_=False ):
"""simple docstring"""
snake_case = get_strided_contexts_and_ans(
A__ ,A__ ,doc_stride=A__ ,max_length=A__ ,assertion=A__ ,)
return example
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
with jsonlines.open(A__ ,'''a''' ) as writer:
for example in tqdm(A__ ,total=len(A__ ) ,desc='''Saving samples ... ''' ):
snake_case = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] ,labels['''start_token'''] ,labels['''end_token'''] ,labels['''category'''] ,):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_SCREAMING_SNAKE_CASE : Tuple = load_dataset("natural_questions")
_SCREAMING_SNAKE_CASE : str = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
_SCREAMING_SNAKE_CASE : List[Any] = data["train" if PROCESS_TRAIN == "true" else "validation"]
_SCREAMING_SNAKE_CASE : Optional[int] = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
_SCREAMING_SNAKE_CASE : Optional[int] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_SCREAMING_SNAKE_CASE : str = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
_SCREAMING_SNAKE_CASE : str = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 127 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowercase_ = logging.getLogger(__name__)
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
_lowercase =self.layer[current_layer](lowerCAmelCase , lowerCAmelCase , head_mask[current_layer] )
_lowercase =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =BertEncoderWithPabee(lowerCAmelCase )
self.init_weights()
_lowercase =0
_lowercase =0
_lowercase =0
_lowercase =0
def A__ ( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =threshold
def A__ ( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowercase =patience
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =0
_lowercase =0
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =self.inference_layers_num / self.inference_instances_num
_lowercase =(
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase )
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False , ) -> str:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase =input_ids.size()
elif inputs_embeds is not None:
_lowercase =inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
if token_type_ids is None:
_lowercase =torch.zeros(lowerCAmelCase , dtype=torch.long , device=lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase =self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_lowercase , _lowercase , _lowercase =encoder_hidden_states.size()
_lowercase =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
_lowercase =self.invert_attention_mask(lowerCAmelCase )
else:
_lowercase =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase =self.get_head_mask(lowerCAmelCase , self.config.num_hidden_layers )
_lowercase =self.embeddings(
input_ids=lowerCAmelCase , position_ids=lowerCAmelCase , token_type_ids=lowerCAmelCase , inputs_embeds=lowerCAmelCase )
_lowercase =embedding_output
if self.training:
_lowercase =[]
for i in range(self.config.num_hidden_layers ):
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](output_dropout(lowerCAmelCase ) )
res.append(lowerCAmelCase )
elif self.patience == 0: # Use all layers for inference
_lowercase =self.encoder(
lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
_lowercase =self.pooler(encoder_outputs[0] )
_lowercase =[output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase )]
else:
_lowercase =0
_lowercase =None
_lowercase =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](lowerCAmelCase )
if regression:
_lowercase =logits.detach()
if patient_result is not None:
_lowercase =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_lowercase =0
else:
_lowercase =logits.detach().argmax(dim=1 )
if patient_result is not None:
_lowercase =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase ) ):
patient_counter += 1
else:
_lowercase =0
_lowercase =logits
if patient_counter == self.patience:
break
_lowercase =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =config.num_labels
_lowercase =BertModelWithPabee(lowerCAmelCase )
_lowercase =nn.Dropout(config.hidden_dropout_prob )
_lowercase =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.bert(
input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , position_ids=lowerCAmelCase , head_mask=lowerCAmelCase , inputs_embeds=lowerCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_lowercase =(logits[-1],)
if labels is not None:
_lowercase =None
_lowercase =0
for ix, logits_item in enumerate(lowerCAmelCase ):
if self.num_labels == 1:
# We are doing regression
_lowercase =MSELoss()
_lowercase =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_lowercase =CrossEntropyLoss()
_lowercase =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_lowercase =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_lowercase =(total_loss / total_weights,) + outputs
return outputs
| 205 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase__ ( A__ : str , A__ : Union[str, Any]=None ):
'''simple docstring'''
__lowerCamelCase = None
if token is not None:
__lowerCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
__lowerCamelCase = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
__lowerCamelCase = requests.get(A__ , headers=A__ ).json()
__lowerCamelCase = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
__lowerCamelCase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(A__ ):
__lowerCamelCase = requests.get(url + f'&page={i + 2}' , headers=A__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int]=None ):
'''simple docstring'''
__lowerCamelCase = None
if token is not None:
__lowerCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
__lowerCamelCase = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
__lowerCamelCase = requests.get(A__ , headers=A__ ).json()
__lowerCamelCase = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
__lowerCamelCase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(A__ ):
__lowerCamelCase = requests.get(url + f'&page={i + 2}' , headers=A__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def lowerCamelCase__ ( A__ : List[Any] , A__ : int , A__ : Optional[Any] , A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = None
if token is not None:
__lowerCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
__lowerCamelCase = requests.get(A__ , headers=A__ , allow_redirects=A__ )
__lowerCamelCase = result.headers["""Location"""]
__lowerCamelCase = requests.get(A__ , allow_redirects=A__ )
__lowerCamelCase = os.path.join(A__ , f'{artifact_name}.zip' )
with open(A__ , """wb""" ) as fp:
fp.write(response.content )
def lowerCamelCase__ ( A__ : int , A__ : List[str]=None ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = None
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A__ ) as f:
for line in f:
__lowerCamelCase = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__lowerCamelCase = line[: line.index(""": """ )]
__lowerCamelCase = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
__lowerCamelCase = line[len("""FAILED """ ) :]
failed_tests.append(A__ )
elif filename == "job_name.txt":
__lowerCamelCase = line
if len(A__ ) != len(A__ ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` '
f'and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
""" problem.""" )
__lowerCamelCase = None
if job_name and job_links:
__lowerCamelCase = job_links.get(A__ , A__ )
# A list with elements of the form (line of error, error, failed test)
__lowerCamelCase = [x + [y] + [job_link] for x, y in zip(A__ , A__ )]
return result
def lowerCamelCase__ ( A__ : List[str] , A__ : str=None ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) )
return errors
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int]=None ):
'''simple docstring'''
__lowerCamelCase = Counter()
counter.update([x[1] for x in logs] )
__lowerCamelCase = counter.most_common()
__lowerCamelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__lowerCamelCase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
__lowerCamelCase = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
__lowerCamelCase = test.split("""/""" )[2]
else:
__lowerCamelCase = None
return test
def lowerCamelCase__ ( A__ : List[str] , A__ : str=None ):
'''simple docstring'''
__lowerCamelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
__lowerCamelCase = [x for x in logs if x[2] is not None]
__lowerCamelCase = {x[2] for x in logs}
__lowerCamelCase = {}
for test in tests:
__lowerCamelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__lowerCamelCase = counter.most_common()
__lowerCamelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__lowerCamelCase = sum(error_counts.values() )
if n_errors > 0:
__lowerCamelCase = {"""count""": n_errors, """errors""": error_counts}
__lowerCamelCase = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = """| no. | error | status |"""
__lowerCamelCase = """|-:|:-|:-|"""
__lowerCamelCase = [header, sep]
for error in reduced_by_error:
__lowerCamelCase = reduced_by_error[error]["""count"""]
__lowerCamelCase = f'| {count} | {error[:100]} | |'
lines.append(A__ )
return "\n".join(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = """| model | no. of errors | major error | count |"""
__lowerCamelCase = """|-:|-:|-:|-:|"""
__lowerCamelCase = [header, sep]
for model in reduced_by_model:
__lowerCamelCase = reduced_by_model[model]["""count"""]
__lowerCamelCase, __lowerCamelCase = list(reduced_by_model[model]["""errors"""].items() )[0]
__lowerCamelCase = f'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(A__ )
return "\n".join(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
UpperCAmelCase_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCAmelCase_ = get_job_links(args.workflow_run_id, token=args.token)
UpperCAmelCase_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCAmelCase_ = k.find(' / ')
UpperCAmelCase_ = k[index + len(' / ') :]
UpperCAmelCase_ = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCAmelCase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCAmelCase_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCAmelCase_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCAmelCase_ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCAmelCase_ = reduce_by_error(errors)
UpperCAmelCase_ = reduce_by_model(errors)
UpperCAmelCase_ = make_github_table(reduced_by_error)
UpperCAmelCase_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 350 |
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCamelCase = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 155 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 0 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
snake_case_ = f'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE )
snake_case_ = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 267 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = "wavlm"
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=7_68 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=30_72 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=1_28 , _UpperCAmelCase=16 , _UpperCAmelCase=3_20 , _UpperCAmelCase=8_00 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=3_20 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1_00 , _UpperCAmelCase=2_56 , _UpperCAmelCase=2_56 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=2_56 , _UpperCAmelCase=(5_12, 5_12, 5_12, 5_12, 15_00) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=5_12 , _UpperCAmelCase=80 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(_UpperCAmelCase )
snake_case_ = list(_UpperCAmelCase )
snake_case_ = list(_UpperCAmelCase )
snake_case_ = conv_bias
snake_case_ = num_buckets
snake_case_ = max_bucket_distance
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = num_ctc_classes
snake_case_ = vocab_size
snake_case_ = do_stable_layer_norm
snake_case_ = use_weighted_layer_sum
snake_case_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case_ = num_codevectors_per_group
snake_case_ = num_codevector_groups
snake_case_ = contrastive_logits_temperature
snake_case_ = num_negatives
snake_case_ = codevector_dim
snake_case_ = proj_codevector_dim
snake_case_ = diversity_loss_weight
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# adapter
snake_case_ = add_adapter
snake_case_ = adapter_kernel_size
snake_case_ = adapter_stride
snake_case_ = num_adapter_layers
snake_case_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(_UpperCAmelCase )
snake_case_ = list(_UpperCAmelCase )
snake_case_ = list(_UpperCAmelCase )
snake_case_ = xvector_output_dim
@property
def UpperCamelCase__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 267 | 1 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case : Optional[int] = TypeVar('''T''')
class _snake_case ( Generic[T] ):
SCREAMING_SNAKE_CASE__ = 42 # Cache store of keys
SCREAMING_SNAKE_CASE__ = 42 # References of the keys in cache
SCREAMING_SNAKE_CASE__ = 10 # Maximum capacity of cache
def __init__( self , _lowerCamelCase ):
a :Optional[int] = deque()
a :int = set()
if not n:
a :Dict = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
a :int = n
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
a :str = self.dq_store.pop()
self.key_reference.remove(_lowerCamelCase )
else:
self.dq_store.remove(_lowerCamelCase )
self.dq_store.appendleft(_lowerCamelCase )
self.key_reference.add(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
for k in self.dq_store:
print(_lowerCamelCase )
def __repr__( self ):
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 94 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a__ : Tuple = {'UserAgent': UserAgent().random}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
UpperCamelCase__ = script.contents[0]
UpperCamelCase__ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase_ :
def __init__( self , a ):
UpperCamelCase__ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase__ = self.get_json()
def __a ( self ):
UpperCamelCase__ = requests.get(self.url , headers=a ).text
UpperCamelCase__ = BeautifulSoup(a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __a ( self ):
return self.user_data["username"]
@property
def __a ( self ):
return self.user_data["full_name"]
@property
def __a ( self ):
return self.user_data["biography"]
@property
def __a ( self ):
return self.user_data["business_email"]
@property
def __a ( self ):
return self.user_data["external_url"]
@property
def __a ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __a ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __a ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __a ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __a ( self ):
return self.user_data["is_verified"]
@property
def __a ( self ):
return self.user_data["is_private"]
def _UpperCamelCase ( __A = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCamelCase__ = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Any = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 80 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__a = get_tests_dir('fixtures')
__a = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
__a = get_tests_dir('fixtures/dummy-config.json')
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 0
def a__ ( self ) -> int:
UpperCAmelCase_ : int = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Any = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase_ : List[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ).to_dict()
config_dict.pop('''feature_extractor_type''' )
UpperCAmelCase_ : Dict = WavaVecaFeatureExtractor(**_SCREAMING_SNAKE_CASE )
# save in new folder
model_config.save_pretrained(_SCREAMING_SNAKE_CASE )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
# make sure private variable is not incorrectly saved
UpperCAmelCase_ : List[str] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[int] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase_ : int = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def a__ ( self ) -> Tuple:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase_ : Dict = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' )
def a__ ( self ) -> List[str]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' ,):
UpperCAmelCase_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def a__ ( self ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
def a__ ( self ) -> int:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase_ : Optional[int] = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> List[Any]:
class __a( __snake_case ):
"""simple docstring"""
lowerCAmelCase = True
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
UpperCAmelCase_ : Tuple = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ : int = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ : Optional[Any] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
self.assertTrue(not hasattr(_SCREAMING_SNAKE_CASE ,'''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] | 362 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (IPNDMScheduler,)
lowerCAmelCase = (('''num_inference_steps''', 50),)
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : Tuple = {'''num_train_timesteps''': 1_000}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def a__ ( self ,_SCREAMING_SNAKE_CASE=0 ,**_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : int = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self.dummy_sample
UpperCAmelCase_ : Optional[Any] = 0.1 * sample
UpperCAmelCase_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Tuple = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase_ : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCAmelCase_ : Optional[int] = dummy_past_residuals[:]
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Union[str, Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Optional[int] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ,_SCREAMING_SNAKE_CASE=0 ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : List[str] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Tuple = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = self.dummy_sample
UpperCAmelCase_ : Tuple = 0.1 * sample
UpperCAmelCase_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : Tuple = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase_ : Dict = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : str = dummy_past_residuals[:]
UpperCAmelCase_ : Tuple = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Optional[int] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : str = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : List[str] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = 10
UpperCAmelCase_ : Tuple = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : int = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
return sample
def a__ ( self ) -> str:
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Any = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : int = self.get_scheduler_config()
UpperCAmelCase_ : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(_SCREAMING_SNAKE_CASE ,'''set_timesteps''' ):
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(_SCREAMING_SNAKE_CASE ,'''set_timesteps''' ):
UpperCAmelCase_ : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : int = dummy_past_residuals[:]
UpperCAmelCase_ : List[Any] = scheduler.timesteps[5]
UpperCAmelCase_ : Dict = scheduler.timesteps[6]
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def a__ ( self ) -> Any:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE ,time_step=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE ,time_step=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.full_loop()
UpperCAmelCase_ : str = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 2_540_529 ) < 10 | 235 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """sew"""
def __init__( self , lowerCAmelCase=32 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3_072 , lowerCAmelCase=2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-5 , lowerCAmelCase="group" , lowerCAmelCase="gelu" , lowerCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCAmelCase=False , lowerCAmelCase=128 , lowerCAmelCase=16 , lowerCAmelCase=True , lowerCAmelCase=0.05 , lowerCAmelCase=10 , lowerCAmelCase=2 , lowerCAmelCase=0.0 , lowerCAmelCase=10 , lowerCAmelCase=0 , lowerCAmelCase="mean" , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=256 , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , **lowerCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCAmelCase , pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(lowerCAmelCase )
_lowercase =list(lowerCAmelCase )
_lowercase =list(lowerCAmelCase )
_lowercase =conv_bias
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =squeeze_factor
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
_lowercase =mask_feature_min_masks
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# sequence classification
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
@property
def A__ ( self ) -> int:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 205 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = ["""input_features"""]
def __init__( self , lowerCAmelCase=80 , lowerCAmelCase=16_000 , lowerCAmelCase=160 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=0.0 , lowerCAmelCase=False , **lowerCAmelCase , ) -> Any:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
_lowercase =n_fft
_lowercase =hop_length
_lowercase =chunk_length
_lowercase =chunk_length * sampling_rate
_lowercase =self.n_samples // hop_length
_lowercase =sampling_rate
_lowercase =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , )
def A__ ( self , lowerCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowercase =spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
_lowercase =log_spec[:, :-1]
_lowercase =np.maximum(lowerCAmelCase , log_spec.max() - 8.0 )
_lowercase =(log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def A__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
_lowercase =np.array(lowerCAmelCase , np.intaa )
_lowercase =[]
for vector, length in zip(lowerCAmelCase , attention_mask.sum(-1 ) ):
_lowercase =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_lowercase =padding_value
normed_input_values.append(lowerCAmelCase )
else:
_lowercase =[(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "max_length" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase =isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase =is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
_lowercase =np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase =[np.asarray([raw_speech] ).T]
_lowercase =BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
_lowercase =self.pad(
lowerCAmelCase , padding=lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_lowercase =self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
_lowercase =np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
_lowercase =padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
_lowercase =[self._np_extract_fbank_features(lowerCAmelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , lowerCAmelCase ):
_lowercase =[np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in input_features]
else:
_lowercase =input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_lowercase =padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
_lowercase =padded_inputs.convert_to_tensors(lowerCAmelCase )
return padded_inputs
def A__ ( self ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase =copy.deepcopy(self.__dict__ )
_lowercase =self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 205 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : Dict ={"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :List[str] = "ctrl"
__lowerCAmelCase :Union[str, Any] = ["past_key_values"]
__lowerCAmelCase :Union[str, Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __lowercase=2_4_6_5_3_4 , __lowercase=2_5_6 , __lowercase=1_2_8_0 , __lowercase=8_1_9_2 , __lowercase=4_8 , __lowercase=1_6 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-6 , __lowercase=0.0_2 , __lowercase=True , **__lowercase , ) -> Dict:
"""simple docstring"""
a__ : List[str] = vocab_size
a__ : str = n_positions
a__ : Tuple = n_embd
a__ : Optional[int] = n_layer
a__ : int = n_head
a__ : Union[str, Any] = dff
a__ : List[str] = resid_pdrop
a__ : List[Any] = embd_pdrop
a__ : Optional[int] = layer_norm_epsilon
a__ : List[str] = initializer_range
a__ : str = use_cache
super().__init__(**__lowercase )
| 356 |
from __future__ import annotations
import math
def lowerCAmelCase_ ( _lowercase : float , _lowercase : int) -> float:
"""simple docstring"""
a__ : Union[str, Any] = u
for i in range(1 , _lowercase):
a__ : Optional[int] = temp * (u - i)
return temp
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
a__ : Tuple = int(input("""enter the numbers of values: """))
a__ : list[list[float]] = []
for _ in range(_lowercase):
y.append([])
for i in range(_lowercase):
for j in range(_lowercase):
y[i].append(_lowercase)
a__ : Optional[Any] = 0
print("""enter the values of parameters in a list: """)
a__ : List[Any] = list(map(_lowercase , input().split()))
print("""enter the values of corresponding parameters: """)
for i in range(_lowercase):
a__ : Optional[Any] = float(input())
a__ : Tuple = int(input("""enter the value to interpolate: """))
a__ : int = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowercase):
for j in range(n - i):
a__ : int = y[j + 1][i - 1] - y[j][i - 1]
a__ : Optional[int] = y[0][0]
for i in range(1 , _lowercase):
summ += (ucal(_lowercase , _lowercase) * y[0][i]) / math.factorial(_lowercase)
print(F'''the value at {value} is {summ}''')
if __name__ == "__main__":
main()
| 266 | 0 |
'''simple docstring'''
import operator as op
__UpperCAmelCase = """scaler.pt"""
__UpperCAmelCase = """pytorch_model"""
__UpperCAmelCase = """random_states"""
__UpperCAmelCase = """optimizer"""
__UpperCAmelCase = """scheduler"""
__UpperCAmelCase = """pytorch_model.bin"""
__UpperCAmelCase = """pytorch_model.bin.index.json"""
__UpperCAmelCase = """model.safetensors"""
__UpperCAmelCase = """model.safetensors.index.json"""
__UpperCAmelCase = """1.10.2"""
__UpperCAmelCase = """py38"""
__UpperCAmelCase = """4.17.0"""
__UpperCAmelCase = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
__UpperCAmelCase = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
__UpperCAmelCase = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
__UpperCAmelCase = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
__UpperCAmelCase = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
__UpperCAmelCase = """2.0.1"""
__UpperCAmelCase = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
__UpperCAmelCase = ["""default""", """reduce-overhead""", """max-autotune"""]
__UpperCAmelCase = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__UpperCAmelCase = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
__UpperCAmelCase = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
__UpperCAmelCase = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 323 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = 1
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase_ ):
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Dict = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 323 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] ):
A_ = dataset
A_ = process
A_ = params
def __len__( self : List[str] ):
return len(self.dataset )
def __getitem__( self : int , UpperCAmelCase : Tuple ):
A_ = self.dataset[i]
A_ = self.process(UpperCAmelCase , **self.params )
return processed
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=None ):
A_ = loader
A_ = infer
A_ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
A_ = None
A_ = loader_batch_size
# Internal bookkeeping
A_ = None
A_ = None
def __len__( self : Union[str, Any] ):
return len(self.loader )
def __iter__( self : int ):
A_ = iter(self.loader )
return self
def __A ( self : int ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
A_ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
A_ = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
# Convert ModelOutput to tuple first
A_ = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
A_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
A_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCAmelCase , UpperCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
A_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
A_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
A_ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
A_ = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
A_ = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
A_ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
A_ = self._loader_batch_data.__class__(UpperCAmelCase )
self._loader_batch_index += 1
return result
def __A ( self : List[str] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
A_ = next(self.iterator )
A_ = self.infer(UpperCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCAmelCase , torch.Tensor ):
A_ = processed
else:
A_ = list(processed.keys() )[0]
A_ = processed[key]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = len(UpperCAmelCase )
else:
A_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
A_ = observed_batch_size
# Setting internal index to unwrap the batch
A_ = processed
A_ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ):
super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __iter__( self : Any ):
A_ = iter(self.loader )
A_ = None
return self
def __A ( self : Dict ):
if self.subiterator is None:
A_ = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
A_ = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
A_ = self.infer(next(self.iterator ) , **self.params )
A_ = next(self.subiterator )
return processed
class _a ( snake_case_ ):
"""simple docstring"""
def __iter__( self : List[str] ):
A_ = iter(self.loader )
return self
def __A ( self : Optional[Any] ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
A_ = False
A_ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
A_ = self.loader_batch_item()
A_ = item.pop("is_last" )
accumulator.append(UpperCAmelCase )
if is_last:
return accumulator
while not is_last:
A_ = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCAmelCase , torch.Tensor ):
A_ = processed
else:
A_ = list(processed.keys() )[0]
A_ = processed[key]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = len(UpperCAmelCase )
else:
A_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
A_ = observed_batch_size
A_ = processed
A_ = 0
while self._loader_batch_index < self.loader_batch_size:
A_ = self.loader_batch_item()
A_ = item.pop("is_last" )
accumulator.append(UpperCAmelCase )
if is_last:
return accumulator
else:
A_ = processed
A_ = item.pop("is_last" )
accumulator.append(UpperCAmelCase )
return accumulator
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Dataset , UpperCAmelCase : str ):
A_ = dataset
A_ = key
def __len__( self : Any ):
return len(self.dataset )
def __getitem__( self : List[str] , UpperCAmelCase : Tuple ):
return self.dataset[i][self.key]
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : str , UpperCAmelCase : str ):
A_ = dataset
A_ = keya
A_ = keya
def __len__( self : List[str] ):
return len(self.dataset )
def __getitem__( self : Optional[Any] , UpperCAmelCase : Tuple ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]} | 329 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : int = 4 ):
"""simple docstring"""
A_ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = matrix[::-1]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [x[::-1] for x in matrix]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix)) | 329 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =["""image_processor""", """tokenizer"""]
a_ ="""ViltImageProcessor"""
a_ =("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] , _a : Optional[Any]=None , _a : int=None , **_a : int ) -> Dict:
__lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _a , )
__lowerCamelCase : Any = kwargs.pop('feature_extractor' )
__lowerCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_a , _a )
__lowerCamelCase : List[str] = self.image_processor
def __call__( self : Any , _a : Any , _a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _a : bool = True , _a : Union[bool, str, PaddingStrategy] = False , _a : Union[bool, str, TruncationStrategy] = None , _a : Optional[int] = None , _a : int = 0 , _a : Optional[int] = None , _a : Optional[bool] = None , _a : Optional[bool] = None , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = False , _a : bool = True , _a : Optional[Union[str, TensorType]] = None , **_a : Dict , ) -> BatchEncoding:
__lowerCamelCase : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
__lowerCamelCase : str = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def _lowercase ( self : Tuple , *_a : Optional[Any] , **_a : Union[str, Any] ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def _lowercase ( self : List[Any] , *_a : int , **_a : List[str] ) -> str:
return self.tokenizer.decode(*_a , **_a )
@property
def _lowercase ( self : int ) -> Tuple:
__lowerCamelCase : Any = self.tokenizer.model_input_names
__lowerCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase ( self : Dict ) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _a , )
return self.image_processor_class
@property
def _lowercase ( self : Optional[int] ) -> Tuple:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _a , )
return self.image_processor
| 208 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : Tuple , _a : List[Any] , _a : Dict=2 , _a : Dict=32 , _a : int=16 , _a : str=3 , _a : Optional[int]=True , _a : List[Any]=True , _a : int=32 , _a : int=4 , _a : Optional[Any]=[0, 1, 2, 3] , _a : int=4 , _a : Union[str, Any]=37 , _a : List[str]="gelu" , _a : List[str]=0.1 , _a : List[str]=0.1 , _a : Union[str, Any]=0.02 , _a : str=3 , _a : int=[1, 384, 24, 24] , _a : Optional[Any]=True , _a : Tuple=None , ) -> Tuple:
__lowerCamelCase : Dict = parent
__lowerCamelCase : List[Any] = batch_size
__lowerCamelCase : int = image_size
__lowerCamelCase : Any = patch_size
__lowerCamelCase : Tuple = num_channels
__lowerCamelCase : Dict = is_training
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : Union[str, Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : List[Any] = backbone_out_indices
__lowerCamelCase : Tuple = num_attention_heads
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : Dict = num_labels
__lowerCamelCase : List[Any] = backbone_featmap_shape
__lowerCamelCase : Optional[int] = scope
__lowerCamelCase : str = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : Union[str, Any] = (image_size // patch_size) ** 2
__lowerCamelCase : Optional[Any] = num_patches + 1
def _lowercase ( self : Dict ) -> Any:
__lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = None
if self.use_labels:
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCamelCase : Optional[Any] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_a , backbone_featmap_shape=self.backbone_featmap_shape , )
def _lowercase ( self : Optional[int] , _a : int , _a : str , _a : Optional[int] ) -> Optional[int]:
__lowerCamelCase : Any = DPTModel(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : List[Any] ) -> List[Any]:
__lowerCamelCase : Dict = self.num_labels
__lowerCamelCase : List[Any] = DPTForDepthEstimation(_a )
model.to(_a )
model.eval()
__lowerCamelCase : List[str] = model(_a )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _lowercase ( self : List[str] , _a : Optional[Any] , _a : Tuple , _a : Tuple ) -> List[Any]:
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Optional[Any] = DPTForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__lowerCamelCase : int = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase : Tuple = self.prepare_config_and_inputs()
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase : str = config_and_inputs
__lowerCamelCase : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a_ =(
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ =False
a_ =False
a_ =False
def _lowercase ( self : Dict ) -> Any:
__lowerCamelCase : Optional[Any] = DPTModelTester(self )
__lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def _lowercase ( self : Dict ) -> Union[str, Any]:
pass
def _lowercase ( self : Dict ) -> str:
__lowerCamelCase ,__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _lowercase ( self : List[str] ) -> Any:
__lowerCamelCase ,__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : str = model_class(_a )
__lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _a )
def _lowercase ( self : Dict ) -> Any:
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowercase ( self : Union[str, Any] ) -> int:
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_a )
def _lowercase ( self : List[Any] ) -> Any:
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
def _lowercase ( self : Optional[int] ) -> Dict:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__lowerCamelCase ,__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Optional[int] = True
if model_class in get_values(_a ):
continue
__lowerCamelCase : Tuple = model_class(_a )
model.to(_a )
model.train()
__lowerCamelCase : str = self._prepare_for_class(_a , _a , return_labels=_a )
__lowerCamelCase : Dict = model(**_a ).loss
loss.backward()
def _lowercase ( self : Union[str, Any] ) -> str:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__lowerCamelCase ,__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Tuple = False
__lowerCamelCase : List[str] = True
if model_class in get_values(_a ) or not model_class.supports_gradient_checkpointing:
continue
__lowerCamelCase : Optional[int] = model_class(_a )
model.to(_a )
model.gradient_checkpointing_enable()
model.train()
__lowerCamelCase : List[Any] = self._prepare_for_class(_a , _a , return_labels=_a )
__lowerCamelCase : str = model(**_a ).loss
loss.backward()
def _lowercase ( self : Dict ) -> Optional[Any]:
__lowerCamelCase ,__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Union[str, Any] = _config_zero_init(_a )
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(config=_a )
# Skip the check for the backbone
__lowerCamelCase : Any = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__lowerCamelCase : Dict = [f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowercase ( self : Dict ) -> Optional[int]:
pass
@slow
def _lowercase ( self : Any ) -> int:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__lowerCamelCase : Union[str, Any] = DPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowercase ( self : List[Any] ) -> List[Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__lowerCamelCase ,__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : List[Any] = 'add'
with self.assertRaises(_a ):
__lowerCamelCase : int = DPTForDepthEstimation(_a )
def a_ ( ) -> str:
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict ) -> Tuple:
__lowerCamelCase : Any = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
__lowerCamelCase : Any = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_a )
__lowerCamelCase : Any = prepare_img()
__lowerCamelCase : Dict = image_processor(images=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
__lowerCamelCase : Any = model(**_a )
__lowerCamelCase : Any = outputs.predicted_depth
# verify the predicted depth
__lowerCamelCase : int = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _a )
__lowerCamelCase : Tuple = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_a )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _a , atol=1e-4 ) )
| 208 | 1 |
import collections
import importlib.util
import os
import re
from pathlib import Path
__magic_name__: str = '''src/transformers'''
# Matches is_xxx_available()
__magic_name__: str = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__magic_name__: List[str] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__magic_name__: Any = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__magic_name__: Any = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__magic_name__: Union[str, Any] = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__magic_name__: List[str] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__magic_name__: str = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__magic_name__: str = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__magic_name__: Union[str, Any] = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__magic_name__: str = re.compile(r"^\s*try:")
# Catches a line with else:
__magic_name__: Optional[int] = re.compile(r"^\s*else:")
def UpperCamelCase ( _A ):
"""simple docstring"""
if _re_test_backend.search(_UpperCAmelCase ) is None:
return None
__magic_name__ : Dict = [b[0] for b in _re_backend.findall(_UpperCAmelCase )]
backends.sort()
return "_and_".join(_UpperCAmelCase )
def UpperCamelCase ( _A ):
"""simple docstring"""
with open(_UpperCAmelCase, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
__magic_name__ : Dict = f.readlines()
__magic_name__ : Optional[int] = 0
while line_index < len(_UpperCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
__magic_name__ : Dict = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
__magic_name__ : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_UpperCAmelCase ):
__magic_name__ : Tuple = _re_one_line_import_struct.search(_UpperCAmelCase ).groups()[0]
__magic_name__ : str = re.findall("""\[([^\]]+)\]""", _UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
__magic_name__ : int = _re_import_struct_key_value.search(_UpperCAmelCase )
if single_line_import_search is not None:
__magic_name__ : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_UpperCAmelCase ) > 0]
objects.extend(_UpperCAmelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
__magic_name__ : Union[str, Any] = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__magic_name__ : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__magic_name__ : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__magic_name__ : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
__magic_name__ : List[str] = lines[line_index]
if _re_import_struct_add_one.search(_UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_UpperCAmelCase ) is not None:
__magic_name__ : Any = _re_import_struct_add_many.search(_UpperCAmelCase ).groups()[0].split(""", """ )
__magic_name__ : Optional[Any] = [obj[1:-1] for obj in imports if len(_UpperCAmelCase ) > 0]
objects.extend(_UpperCAmelCase )
elif _re_between_brackets.search(_UpperCAmelCase ) is not None:
__magic_name__ : List[Any] = _re_between_brackets.search(_UpperCAmelCase ).groups()[0].split(""", """ )
__magic_name__ : List[Any] = [obj[1:-1] for obj in imports if len(_UpperCAmelCase ) > 0]
objects.extend(_UpperCAmelCase )
elif _re_quote_object.search(_UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(_UpperCAmelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
__magic_name__ : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__magic_name__ : str = []
while (
line_index < len(_UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
__magic_name__ : Any = lines[line_index]
__magic_name__ : Tuple = _re_import.search(_UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
__magic_name__ : List[str] = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(_UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
__magic_name__ : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__magic_name__ : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__magic_name__ : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
__magic_name__ : Optional[int] = lines[line_index]
__magic_name__ : Optional[int] = _re_import.search(_UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
__magic_name__ : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
def find_duplicates(_A ):
return [k for k, v in collections.Counter(_UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__magic_name__ : Optional[Any] = []
for key in import_dict_objects.keys():
__magic_name__ : List[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' )
__magic_name__ : Optional[int] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__magic_name__ : Tuple = 'base imports' if key == 'none' else f'{key} backend'
errors.append(f'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Tuple = []
for root, _, files in os.walk(_UpperCAmelCase ):
if "__init__.py" in files:
__magic_name__ : Any = os.path.join(_UpperCAmelCase, """__init__.py""" )
__magic_name__ : Union[str, Any] = parse_init(_UpperCAmelCase )
if objects is not None:
__magic_name__ : Tuple = analyze_results(*_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
__magic_name__ : Any = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("""\n""".join(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) > 0:
raise ValueError("""\n\n""".join(_UpperCAmelCase ) )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = []
for path, directories, files in os.walk(_UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_UpperCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
__magic_name__ : Dict = str((Path(_UpperCAmelCase ) / folder).relative_to(_UpperCAmelCase ) )
__magic_name__ : Any = short_path.replace(os.path.sep, """.""" )
submodules.append(_UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
__magic_name__ : Any = str((Path(_UpperCAmelCase ) / fname).relative_to(_UpperCAmelCase ) )
__magic_name__ : Any = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_UpperCAmelCase )
return submodules
__magic_name__: Tuple = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = importlib.util.spec_from_file_location(
"""transformers""", os.path.join(_UpperCAmelCase, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
__magic_name__ : Tuple = spec.loader.load_module()
__magic_name__ : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_UpperCAmelCase ) > 0:
__magic_name__ : Any = '\n'.join(f'- {module}' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f'{list_of_modules}\n'
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 350 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = StableUnCLIPImgaImgPipeline
lowercase__ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ : Union[str, Any] = frozenset([] )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Any = 32
__magic_name__ : Union[str, Any] = embedder_hidden_size
# image encoding components
__magic_name__ : Optional[int] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCAmelCase__ , projection_dim=lowerCAmelCase__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__magic_name__ : Any = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase__ )
__magic_name__ : int = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
__magic_name__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__magic_name__ : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase__ , layers_per_block=1 , upcast_attention=lowerCAmelCase__ , use_linear_projection=lowerCAmelCase__ , )
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , )
torch.manual_seed(0 )
__magic_name__ : List[str] = AutoencoderKL()
__magic_name__ : List[str] = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 , lowerCAmelCase__=True ) -> List[Any]:
if str(lowerCAmelCase__ ).startswith("""mps""" ):
__magic_name__ : Optional[int] = torch.manual_seed(lowerCAmelCase__ )
else:
__magic_name__ : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__magic_name__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if pil_image:
__magic_name__ : Optional[Any] = input_image * 0.5 + 0.5
__magic_name__ : int = input_image.clamp(0 , 1 )
__magic_name__ : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ : Optional[int] = DiffusionPipeline.numpy_to_pil(lowerCAmelCase__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : List[str] = self.get_dummy_components()
__magic_name__ : int = StableUnCLIPImgaImgPipeline(**lowerCAmelCase__ )
__magic_name__ : List[str] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
inputs.update({"""image_embeds""": None} )
__magic_name__ : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__magic_name__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ : int = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __magic_name__ ( self ) -> Dict:
__magic_name__ : int = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Tuple = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase__ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase__ )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
__magic_name__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
__magic_name__ : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__magic_name__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ : Union[str, Any] = pipe(lowerCAmelCase__ , """anime turle""" , generator=lowerCAmelCase__ , output_type="""np""" )
__magic_name__ : Optional[Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
__magic_name__ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
__magic_name__ : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__magic_name__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ : Any = pipe(lowerCAmelCase__ , """anime turle""" , generator=lowerCAmelCase__ , output_type="""np""" )
__magic_name__ : Any = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__ : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
__magic_name__ : int = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__magic_name__ : List[Any] = pipe(
lowerCAmelCase__ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
__magic_name__ : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 138 | 0 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__snake_case ) ),
} , features=__snake_case , )
return dataset
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' )
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' )
with gzip.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lza.frame.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__snake_case , 'w' ) as archive:
archive.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lzma.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
UpperCAmelCase_ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__snake_case , 'rb' ) as f:
UpperCAmelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase_ : Dict = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__snake_case , 'wb' ) as f:
UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Optional[int] = {'data': DATA}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['0', '1', '2', '3']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3']
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['0', '1', '2', '3']
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) )
f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 29 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_A = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( lowerCAmelCase ) -> Optional[int]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
if args.student_type == "roberta":
UpperCAmelCase__ : Optional[Any] = False
elif args.student_type == "gpt2":
UpperCAmelCase__ : Optional[int] = False
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Any:
if args.student_type == "roberta":
UpperCAmelCase__ : Tuple = False
def a__ ( ) -> int:
UpperCAmelCase__ : Dict = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=lowerCAmelCase , required=lowerCAmelCase , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=lowerCAmelCase , required=lowerCAmelCase , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=lowerCAmelCase , choices=["""distilbert""", """roberta""", """gpt2"""] , required=lowerCAmelCase , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=lowerCAmelCase , required=lowerCAmelCase , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=lowerCAmelCase , type=lowerCAmelCase , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=lowerCAmelCase , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=lowerCAmelCase , required=lowerCAmelCase , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=lowerCAmelCase , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=lowerCAmelCase , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=lowerCAmelCase , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=lowerCAmelCase , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=lowerCAmelCase , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=lowerCAmelCase , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=lowerCAmelCase , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=lowerCAmelCase , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=lowerCAmelCase , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=lowerCAmelCase , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=lowerCAmelCase , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=lowerCAmelCase , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=lowerCAmelCase , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=lowerCAmelCase , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowerCAmelCase , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=lowerCAmelCase , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=lowerCAmelCase , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=lowerCAmelCase , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=lowerCAmelCase , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=lowerCAmelCase , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=lowerCAmelCase , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=lowerCAmelCase , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=lowerCAmelCase , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=lowerCAmelCase , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=lowerCAmelCase , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=lowerCAmelCase , default=5_00 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=lowerCAmelCase , default=40_00 , help="""Checkpoint interval.""" )
UpperCAmelCase__ : List[Any] = parser.parse_args()
sanity_checks(lowerCAmelCase )
# ARGS #
init_gpu_params(lowerCAmelCase )
set_seed(lowerCAmelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(lowerCAmelCase ) , lowerCAmelCase , indent=4 )
git_log(args.dump_path )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = MODEL_CLASSES[args.student_type]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase__ : List[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase__ : List[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase__ : List[Any] = tokenizer.all_special_tokens.index(lowerCAmelCase )
UpperCAmelCase__ : Tuple = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
UpperCAmelCase__ : Any = special_tok_ids
UpperCAmelCase__ : Any = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , """rb""" ) as fp:
UpperCAmelCase__ : List[str] = pickle.load(lowerCAmelCase )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , """rb""" ) as fp:
UpperCAmelCase__ : List[Any] = pickle.load(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = np.maximum(lowerCAmelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase__ : int = 0.0 # do not predict special tokens
UpperCAmelCase__ : str = torch.from_numpy(lowerCAmelCase )
else:
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : str = LmSeqsDataset(params=lowerCAmelCase , data=lowerCAmelCase )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
UpperCAmelCase__ : List[str] = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase__ : List[Any] = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase__ : List[str] = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowerCAmelCase )
else:
UpperCAmelCase__ : List[Any] = student_model_class(lowerCAmelCase )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info("""Student loaded.""" )
# TEACHER #
UpperCAmelCase__ : str = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowerCAmelCase )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowerCAmelCase , lowerCAmelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowerCAmelCase , lowerCAmelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase__ : Optional[int] = Distiller(
params=lowerCAmelCase , dataset=lowerCAmelCase , token_probs=lowerCAmelCase , student=lowerCAmelCase , teacher=lowerCAmelCase )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 171 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A :
def __init__( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any]=13 , lowerCAmelCase_ : Optional[Any]=30 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : Optional[int]=5 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Dict=37 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : List[Any]=10 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Tuple=2 , ) -> Tuple:
"""simple docstring"""
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = scope
_a = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_a = (image_size // patch_size) ** 2
_a = num_patches + 2
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_a = DeiTModel(config=a__ )
model.to(a__ )
model.eval()
_a = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str ) -> int:
"""simple docstring"""
_a = DeiTForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
_a = model(a__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_a = 1
_a = DeiTForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(a__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : str ) -> List[Any]:
"""simple docstring"""
_a = self.type_sequence_label_size
_a = DeiTForImageClassification(a__ )
model.to(a__ )
model.eval()
_a = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a = 1
_a = DeiTForImageClassification(a__ )
model.to(a__ )
model.eval()
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( lowercase_ ,lowercase_ ,unittest.TestCase ):
lowercase_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_a = DeiTModelTester(self )
_a = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(a__ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]=False ) -> str:
"""simple docstring"""
_a = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
if not self.model_tester.is_training:
return
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_a = model_class(a__ )
model.to(a__ )
model.train()
_a = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_a = model(**a__ ).loss
loss.backward()
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_a = False
_a = True
for model_class in self.all_model_classes:
if model_class in get_values(a__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_a = model_class(a__ )
model.gradient_checkpointing_enable()
model.to(a__ )
model.train()
_a = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_a = model(**a__ ).loss
loss.backward()
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a__ ),
*get_values(a__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
_a = problem_type['''title''']
_a = problem_type['''num_labels''']
_a = model_class(a__ )
model.to(a__ )
model.train()
_a = self._prepare_for_class(a__ , a__ , return_labels=a__ )
if problem_type["num_labels"] > 1:
_a = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
_a = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a__ ) as warning_list:
_a = model(**a__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DeiTModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def snake_case_ ():
'''simple docstring'''
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_a = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
a__ )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
_a = model(**a__ )
# verify the logits
_a = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a__ )
_a = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_a = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=a__ , return_tensors='''pt''' )
_a = inputs.pixel_values.to(a__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_a = model(a__ )
| 354 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 179 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Tuple = ["""pixel_values"""]
def __init__( self : List[str] , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : float = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 2_5_5 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , **snake_case_ : Tuple , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = size if size is not None else {"shortest_edge": 3_8_4}
_UpperCAmelCase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
# Default value set here for backwards compatibility where the value in config is None
_UpperCAmelCase = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase ( self : Any , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : float , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Optional[int] , ):
_UpperCAmelCase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_UpperCAmelCase = size["shortest_edge"]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_UpperCAmelCase = int(shortest_edge / crop_pct )
_UpperCAmelCase = get_resize_output_image_size(snake_case_ , size=snake_case_ , default_to_square=snake_case_ )
_UpperCAmelCase = resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=snake_case_ , size=(shortest_edge, shortest_edge) , data_format=snake_case_ , **snake_case_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
snake_case_ , size=(shortest_edge, shortest_edge) , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : int , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : Optional[int] , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Union[str, Any] , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : float = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : str , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_UpperCAmelCase = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=snake_case_ , size=snake_case_ , crop_pct=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
_UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 22 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class SCREAMING_SNAKE_CASE (datasets.BuilderConfig ):
lowerCAmelCase = None
class SCREAMING_SNAKE_CASE (datasets.ArrowBasedBuilder ):
lowerCAmelCase = PandasConfig
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}')
__A : Dict = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_UpperCAmelCase , (str, list, tuple)):
__A : Union[str, Any] = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Optional[Any] = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
__A : Tuple = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Optional[Any] = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files}))
return splits
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__A : List[str] = table_cast(_UpperCAmelCase , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase)):
with open(_UpperCAmelCase , 'rb') as f:
__A : Optional[int] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase))
yield i, self._cast_table(_UpperCAmelCase) | 190 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( A_):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead.", _lowerCamelCase, )
super().__init__(*_lowerCamelCase, **_lowerCamelCase)
| 368 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_SCREAMING_SNAKE_CASE = sys.version_info >= (3, 10)
def SCREAMING_SNAKE_CASE__ ( __a=None , __a=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: Dict = 42
__magic_name__: Optional[Any] = 42
__magic_name__: List[str] = 42
__magic_name__: int = 42
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = 42
__magic_name__: str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: List[str] = False
__magic_name__: Optional[Any] = True
__magic_name__: Optional[Any] = None
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ):
__magic_name__: List[str] = "titi"
__magic_name__: Any = "toto"
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ):
__magic_name__: List[str] = "titi"
__magic_name__: List[str] = "toto"
__magic_name__: Tuple = 42
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: Any = "toto"
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: Union[str, Any] = "toto"
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
snake_case_ : str = MixedTypeEnum(self.foo )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: Optional[Any] = None
__magic_name__: Union[str, Any] = field(default=UpperCAmelCase__ , metadata={"help": "help message"} )
__magic_name__: Optional[Any] = None
__magic_name__: Dict = list_field(default=[] )
__magic_name__: List[str] = list_field(default=[] )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: List[Any] = list_field(default=[] )
__magic_name__: Any = list_field(default=[1, 2, 3] )
__magic_name__: List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
__magic_name__: Union[str, Any] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field()
__magic_name__: Dict = field()
__magic_name__: int = field()
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Dict = BasicEnum(self.required_enum )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: Optional[Any] = 42
__magic_name__: Optional[Any] = field()
__magic_name__: Any = None
__magic_name__: List[str] = field(default="toto" , metadata={"help": "help message"} )
__magic_name__: Optional[Any] = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: Optional[Any] = False
__magic_name__: List[str] = True
__magic_name__: Tuple = None
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: List[Any] = None
__magic_name__: Optional[int] = field(default=UpperCAmelCase__ , metadata={"help": "help message"} )
__magic_name__: str = None
__magic_name__: str = list_field(default=[] )
__magic_name__: List[str] = list_field(default=[] )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Dict , _A : argparse.ArgumentParser , _A : argparse.ArgumentParser ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ : Tuple = {k: v for k, v in vars(_UpperCAmelCase ).items() if k != 'container'}
snake_case_ : Any = {k: v for k, v in vars(_UpperCAmelCase ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , _UpperCAmelCase ) and yy.get('choices' , _UpperCAmelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](_UpperCAmelCase ) , yy['type'](_UpperCAmelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = HfArgumentParser(_UpperCAmelCase )
snake_case_ : List[str] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument('--bar' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument('--baz' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument('--flag' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?' )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Union[str, Any] = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((snake_case_ ) , ) : Dict = parser.parse_args_into_dataclasses(_UpperCAmelCase , look_for_args_file=_UpperCAmelCase )
self.assertFalse(example.flag )
def UpperCAmelCase_ ( self : List[str] ) -> int:
"""simple docstring"""
snake_case_ : Any = HfArgumentParser(_UpperCAmelCase )
snake_case_ : str = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=_UpperCAmelCase )
expected.add_argument('--baz' , default='toto' , type=_UpperCAmelCase , help='help message' )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?' )
expected.add_argument('--baz' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=_UpperCAmelCase , dest='baz' )
expected.add_argument('--opt' , type=_UpperCAmelCase , default=_UpperCAmelCase )
snake_case_ : str = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCAmelCase )
for dataclass_type in dataclass_types:
snake_case_ : int = HfArgumentParser(_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
snake_case_ : str = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
snake_case_ : int = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
snake_case_ : str = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
snake_case_ : int = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
"""simple docstring"""
snake_case_ : int = HfArgumentParser(_UpperCAmelCase )
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : List[str] = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
snake_case_ : Dict = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ : Dict = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
snake_case_ : Tuple = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ : List[Any] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
snake_case_ : Tuple = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: Dict = "toto"
snake_case_ : Optional[int] = HfArgumentParser(_UpperCAmelCase )
snake_case_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : int = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
snake_case_ : Optional[Any] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
snake_case_ : Optional[int] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def UpperCAmelCase_ ( self : int ) -> List[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = HfArgumentParser(_UpperCAmelCase )
snake_case_ : str = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=_UpperCAmelCase )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=_UpperCAmelCase )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_UpperCAmelCase )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Tuple = parser.parse_args([] )
self.assertEqual(
_UpperCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ : List[Any] = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(_UpperCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def UpperCAmelCase_ ( self : str ) -> int:
"""simple docstring"""
snake_case_ : int = argparse.ArgumentParser()
expected.add_argument('--foo' , default=_UpperCAmelCase , type=_UpperCAmelCase )
expected.add_argument('--bar' , default=_UpperCAmelCase , type=_UpperCAmelCase , help='help message' )
expected.add_argument('--baz' , default=_UpperCAmelCase , type=_UpperCAmelCase )
expected.add_argument('--ces' , nargs='+' , default=[] , type=_UpperCAmelCase )
expected.add_argument('--des' , nargs='+' , default=[] , type=_UpperCAmelCase )
snake_case_ : Any = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCAmelCase )
for dataclass_type in dataclass_types:
snake_case_ : Tuple = HfArgumentParser(_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , bar=_UpperCAmelCase , baz=_UpperCAmelCase , ces=[] , des=[] ) )
snake_case_ : Dict = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(_UpperCAmelCase , Namespace(foo=12 , bar=3.1_4 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def UpperCAmelCase_ ( self : str ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = HfArgumentParser(_UpperCAmelCase )
snake_case_ : Tuple = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument('--required_str' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_UpperCAmelCase , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
snake_case_ : List[Any] = HfArgumentParser(_UpperCAmelCase )
snake_case_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , required=_UpperCAmelCase )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_UpperCAmelCase , )
expected.add_argument('--opt' , type=_UpperCAmelCase , default=_UpperCAmelCase )
expected.add_argument('--baz' , default='toto' , type=_UpperCAmelCase , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_UpperCAmelCase )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self : Tuple ) -> int:
"""simple docstring"""
snake_case_ : List[Any] = HfArgumentParser(_UpperCAmelCase )
snake_case_ : Tuple = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
snake_case_ : Any = parser.parse_dict(_UpperCAmelCase )[0]
snake_case_ : List[Any] = BasicExample(**_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case_ : Tuple = HfArgumentParser(_UpperCAmelCase )
snake_case_ : Optional[Any] = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(_UpperCAmelCase , parser.parse_dict , _UpperCAmelCase , allow_extra_keys=_UpperCAmelCase )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
snake_case_ : Optional[Any] = HfArgumentParser(_UpperCAmelCase )
snake_case_ : Any = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Tuple = os.path.join(_UpperCAmelCase , 'temp_json' )
os.mkdir(_UpperCAmelCase )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : List[Any] = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
snake_case_ : Tuple = BasicExample(**_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ : str = HfArgumentParser(_UpperCAmelCase )
snake_case_ : int = {
'foo': 12,
'bar': 3.1_4,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Optional[int] = os.path.join(_UpperCAmelCase , 'temp_yaml' )
os.mkdir(_UpperCAmelCase )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : int = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
snake_case_ : str = BasicExample(**_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
snake_case_ : Any = HfArgumentParser(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
| 327 |
from __future__ import annotations
import math
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if len(__magic_name__ ) != 2 or len(a[0] ) != 2 or len(__magic_name__ ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowercase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> Union[str, Any]:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> int:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list ) -> tuple[list, list, list, list]:
"""simple docstring"""
if len(__magic_name__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowercase__ = len(__magic_name__ )
lowercase__ = matrix_length // 2
lowercase__ = [[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [
[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ , __magic_name__ )
]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ , __magic_name__ )]
return top_left, top_right, bot_left, bot_right
def UpperCamelCase ( __magic_name__ : list ) -> tuple[int, int]:
"""simple docstring"""
return len(__magic_name__ ), len(matrix[0] )
def UpperCamelCase ( __magic_name__ : list ) -> None:
"""simple docstring"""
print("""\n""".join(str(__magic_name__ ) for line in matrix ) )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ ) == (2, 2):
return default_matrix_multiplication(__magic_name__ , __magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = matrix_addition(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
# construct the new matrix from our 4 quadrants
lowercase__ = []
for i in range(len(__magic_name__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__magic_name__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ )[1] != matrix_dimensions(__magic_name__ )[0]:
lowercase__ = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase__ = max(*__magic_name__ , *__magic_name__ )
lowercase__ = int(math.pow(2 , math.ceil(math.loga(__magic_name__ ) ) ) )
lowercase__ = matrixa
lowercase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase__ = actual_strassen(__magic_name__ , __magic_name__ )
# Removing the additional zeros
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A : Optional[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A : List[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 305 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCAmelCase_ ( A_ ):
lowercase__ = ''''''
lowercase__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : List[Any] , snake_case_ : Optional[DatasetInfo] = None , snake_case_ : Optional[str] = None , **snake_case_ : Dict , ) -> str:
'''simple docstring'''
super().__init__(self , **snake_case_ )
A__ = repo_info
A__ = token
A__ = None
def __magic_name__ ( self : Tuple ) -> Any:
'''simple docstring'''
if self.dir_cache is None:
A__ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
A__ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(snake_case_ ): {"name": str(snake_case_ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __magic_name__ ( self : List[Any] , snake_case_ : str , snake_case_ : str = "rb" , **snake_case_ : str , ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(self.repo_info , snake_case_ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
A__ = hf_hub_url(self.repo_info.id , snake_case_ , revision=self.repo_info.sha )
return fsspec.open(
snake_case_ , mode=snake_case_ , headers=get_authentication_headers_for_url(snake_case_ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Dict , **snake_case_ : Dict ) -> Dict:
'''simple docstring'''
self._get_dirs()
A__ = self._strip_protocol(snake_case_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(snake_case_ )
def __magic_name__ ( self : str , snake_case_ : str , snake_case_ : Optional[Any]=False , **snake_case_ : List[str] ) -> str:
'''simple docstring'''
self._get_dirs()
A__ = PurePosixPath(path.strip("/" ) )
A__ = {}
for p, f in self.dir_cache.items():
A__ = PurePosixPath(p.strip("/" ) )
A__ = p.parent
if root == path:
A__ = f
A__ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 371 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
A__ = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
A__ = parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase_ )
# Let's go
A__ = parser.parse_args()
if not hasattr(lowercase_ , "func" ):
parser.print_help()
exit(1 )
# Run
A__ = args.func(lowercase_ )
service.run()
if __name__ == "__main__":
main()
| 230 | 0 |
import warnings
from ..trainer import Trainer
from ..utils import logging
snake_case_ : str = logging.get_logger(__name__)
class __snake_case ( a ):
def __init__( self : Optional[Any] , _snake_case : List[str]=None , **_snake_case : Any):
"""simple docstring"""
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _snake_case , )
super().__init__(args=_snake_case , **_snake_case)
| 51 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : int = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 | 1 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A : int = logging.get_logger(__name__)
A : List[str] = "▁"
A : Any = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
A : Any = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
A : Optional[Any] = {
"facebook/s2t-small-librispeech-asr": 1_0_2_4,
}
A : Optional[int] = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
A : Dict = {"mustc": MUSTC_LANGS}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Dict =VOCAB_FILES_NAMES
__UpperCAmelCase : str =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] =MAX_MODEL_INPUT_SIZES
__UpperCAmelCase : Optional[int] =["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[int] =[]
def __init__( self , __a , __a , __a="<s>" , __a="</s>" , __a="<pad>" , __a="<unk>" , __a=False , __a=False , __a=None , __a=None , __a = None , **__a , ):
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__lowerCAmelCase = do_upper_case
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = load_json(__a )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase = spm_file
__lowerCAmelCase = load_spm(__a , self.sp_model_kwargs )
if lang_codes is not None:
__lowerCAmelCase = lang_codes
__lowerCAmelCase = LANGUAGES[lang_codes]
__lowerCAmelCase = [f"<lang:{lang}>" for lang in self.langs]
__lowerCAmelCase = {lang: self.sp_model.PieceToId(f"<lang:{lang}>" ) for lang in self.langs}
__lowerCAmelCase = self.lang_tokens
__lowerCAmelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__lowerCAmelCase = {}
@property
def snake_case ( self ):
return len(self.encoder )
@property
def snake_case ( self ):
return self._tgt_lang
@tgt_lang.setter
def snake_case ( self , __a ):
__lowerCAmelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(__a )
def snake_case ( self , __a ):
__lowerCAmelCase = self.lang_code_to_id[tgt_lang]
__lowerCAmelCase = [lang_code_id]
def snake_case ( self , __a ):
return self.sp_model.encode(__a , out_type=__a )
def snake_case ( self , __a ):
return self.encoder.get(__a , self.encoder[self.unk_token] )
def snake_case ( self , __a ):
return self.decoder.get(__a , self.unk_token )
def snake_case ( self , __a ):
__lowerCAmelCase = []
__lowerCAmelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__lowerCAmelCase = self.sp_model.decode(__a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__lowerCAmelCase = []
else:
current_sub_tokens.append(__a )
__lowerCAmelCase = self.sp_model.decode(__a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def snake_case ( self , __a , __a=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
__lowerCAmelCase = [1] * len(self.prefix_tokens )
__lowerCAmelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def snake_case ( self ):
__lowerCAmelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self , __a ):
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__lowerCAmelCase = {}
__lowerCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = Path(__a )
assert save_dir.is_dir(), f"{save_directory} should be a directory"
__lowerCAmelCase = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
__lowerCAmelCase = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = sentencepiece.SentencePieceProcessor(**_UpperCamelCase )
spm.Load(str(_UpperCamelCase ) )
return spm
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" ) as f:
return json.load(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "w" ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase , indent=2 )
| 259 |
"""simple docstring"""
import random
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ):
'''simple docstring'''
__lowerCAmelCase = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 | 1 |
from collections import deque
from math import floor
from random import random
from time import time
class a__ :
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
A__ = {}
def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Tuple:
'''simple docstring'''
if self.graph.get(lowercase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A__ = [[w, v]]
if not self.graph.get(lowercase ):
A__ = []
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return list(self.graph )
def UpperCamelCase ( self , lowercase , lowercase ) -> int:
'''simple docstring'''
if self.graph.get(lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase )
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any:
'''simple docstring'''
if s == d:
return []
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return visited
def UpperCamelCase ( self , lowercase=-1 ) -> Optional[Any]:
'''simple docstring'''
if c == -1:
A__ = floor(random() * 10000 ) + 10
for i in range(lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A__ = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1 )
def UpperCamelCase ( self , lowercase=-2 ) -> Any:
'''simple docstring'''
A__ = deque()
A__ = []
if s == -2:
A__ = list(self.graph )[0]
d.append(lowercase )
visited.append(lowercase )
while d:
A__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase ( self , lowercase ) -> Tuple:
'''simple docstring'''
A__ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
return len(self.graph[u] )
def UpperCamelCase ( self , lowercase=-2 ) -> str:
'''simple docstring'''
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = s
A__ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return sorted_nodes
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return list(lowercase )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return False
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any:
'''simple docstring'''
A__ = time()
self.dfs(lowercase , lowercase )
A__ = time()
return end - begin
def UpperCamelCase ( self , lowercase=-2 ) -> int:
'''simple docstring'''
A__ = time()
self.bfs(lowercase )
A__ = time()
return end - begin
class a__ :
"""simple docstring"""
def __init__( self ) -> int:
'''simple docstring'''
A__ = {}
def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Union[str, Any]:
'''simple docstring'''
if self.graph.get(lowercase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A__ = [[w, v]]
# add the other way
if self.graph.get(lowercase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A__ = [[w, u]]
def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if self.graph.get(lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase )
# the other way round
if self.graph.get(lowercase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase )
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> List[str]:
'''simple docstring'''
if s == d:
return []
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return visited
def UpperCamelCase ( self , lowercase=-1 ) -> str:
'''simple docstring'''
if c == -1:
A__ = floor(random() * 10000 ) + 10
for i in range(lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A__ = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1 )
def UpperCamelCase ( self , lowercase=-2 ) -> Dict:
'''simple docstring'''
A__ = deque()
A__ = []
if s == -2:
A__ = list(self.graph )[0]
d.append(lowercase )
visited.append(lowercase )
while d:
A__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase ( self , lowercase ) -> Tuple:
'''simple docstring'''
return len(self.graph[u] )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return list(lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return False
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
return list(self.graph )
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Optional[Any]:
'''simple docstring'''
A__ = time()
self.dfs(lowercase , lowercase )
A__ = time()
return end - begin
def UpperCamelCase ( self , lowercase=-2 ) -> List[Any]:
'''simple docstring'''
A__ = time()
self.bfs(lowercase )
A__ = time()
return end - begin
| 68 | from __future__ import annotations
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCamelCase = result + left + right
return input_list
def lowercase( UpperCamelCase_ ) -> list:
'''simple docstring'''
if len(UpperCamelCase_ ) <= 1:
return input_list
UpperCamelCase = list(UpperCamelCase_ )
# iteration for two-way merging
UpperCamelCase = 2
while p <= len(UpperCamelCase_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ):
UpperCamelCase = i
UpperCamelCase = i + p - 1
UpperCamelCase = (low + high + 1) // 2
UpperCamelCase = merge(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# final merge of last two parts
if p * 2 >= len(UpperCamelCase_ ):
UpperCamelCase = i
UpperCamelCase = merge(UpperCamelCase_ , 0 , UpperCamelCase_ , len(UpperCamelCase_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
_SCREAMING_SNAKE_CASE = []
else:
_SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 343 | 0 |
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__snake_case = datasets.load_iris()
__snake_case = np.array(data['''data'''])
__snake_case = np.array(data['''target'''])
__snake_case = data['''target_names''']
__snake_case , __snake_case , __snake_case , __snake_case = train_test_split(X, y)
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return np.linalg.norm(np.array(_lowerCAmelCase ) - np.array(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Tuple, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : int, _lowerCAmelCase : str=5 ):
"""simple docstring"""
_a = zip(_lowerCAmelCase, _lowerCAmelCase )
# List of distances of all points from the point to be classified
_a = []
for data_point in data:
_a = euclidean_distance(data_point[0], _lowerCAmelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_a = [i[1] for i in sorted(_lowerCAmelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_a = Counter(_lowerCAmelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 357 |
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__snake_case = datasets.load_iris()
__snake_case = np.array(data['''data'''])
__snake_case = np.array(data['''target'''])
__snake_case = data['''target_names''']
__snake_case ,__snake_case ,__snake_case ,__snake_case = train_test_split(X, y)
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return np.linalg.norm(np.array(_lowerCAmelCase ) - np.array(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Tuple, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : int, _lowerCAmelCase : str=5 ):
"""simple docstring"""
_a = zip(_lowerCAmelCase, _lowerCAmelCase )
# List of distances of all points from the point to be classified
_a = []
for data_point in data:
_a = euclidean_distance(data_point[0], _lowerCAmelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_a = [i[1] for i in sorted(_lowerCAmelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_a = Counter(_lowerCAmelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 153 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : Optional[int] = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = 42
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase : PriorTransformer , UpperCamelCase : CLIPVisionModel , UpperCamelCase : CLIPImageProcessor , UpperCamelCase : HeunDiscreteScheduler , UpperCamelCase : ShapERenderer , ):
'''simple docstring'''
super().__init__()
self.register_modules(
prior=UpperCamelCase , image_encoder=UpperCamelCase , image_processor=UpperCamelCase , scheduler=UpperCamelCase , renderer=UpperCamelCase , )
def lowerCamelCase__ ( self : str , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
if latents is None:
__UpperCAmelCase : str = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__UpperCAmelCase : Any = latents.to(UpperCamelCase )
__UpperCAmelCase : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : int=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__UpperCAmelCase : Optional[int] = torch.device(f'''cuda:{gpu_id}''' )
__UpperCAmelCase : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase , UpperCamelCase )
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase__ ( self : Any , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
__UpperCAmelCase : Dict = torch.cat(UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase , axis=0 )
if not isinstance(UpperCamelCase , torch.Tensor ):
__UpperCAmelCase : Dict = self.image_processor(UpperCamelCase , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
__UpperCAmelCase : str = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase )
__UpperCAmelCase : List[Any] = self.image_encoder(UpperCamelCase )["""last_hidden_state"""]
__UpperCAmelCase : int = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__UpperCAmelCase : Any = image_embeds.repeat_interleave(UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = torch.zeros_like(UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__UpperCAmelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase )
def __call__( self : Union[str, Any] , UpperCamelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase : int = 1 , UpperCamelCase : int = 25 , UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : float = 4.0 , UpperCamelCase : int = 64 , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , ):
'''simple docstring'''
if isinstance(UpperCamelCase , PIL.Image.Image ):
__UpperCAmelCase : Any = 1
elif isinstance(UpperCamelCase , torch.Tensor ):
__UpperCAmelCase : Optional[Any] = image.shape[0]
elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
__UpperCAmelCase : List[Any] = len(UpperCamelCase )
else:
raise ValueError(
f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase )}''' )
__UpperCAmelCase : List[Any] = self._execution_device
__UpperCAmelCase : List[str] = batch_size * num_images_per_prompt
__UpperCAmelCase : int = guidance_scale > 1.0
__UpperCAmelCase : Optional[int] = self._encode_image(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# prior
self.scheduler.set_timesteps(UpperCamelCase , device=UpperCamelCase )
__UpperCAmelCase : Any = self.scheduler.timesteps
__UpperCAmelCase : List[str] = self.prior.config.num_embeddings
__UpperCAmelCase : Optional[int] = self.prior.config.embedding_dim
__UpperCAmelCase : Dict = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase , UpperCamelCase , UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__UpperCAmelCase : Tuple = latents.reshape(latents.shape[0] , UpperCamelCase , UpperCamelCase )
for i, t in enumerate(self.progress_bar(UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCAmelCase : Dict = self.scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Tuple = self.prior(
UpperCamelCase , timestep=UpperCamelCase , proj_embedding=UpperCamelCase , ).predicted_image_embedding
# remove the variance
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = noise_pred.chunk(2 )
__UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__UpperCAmelCase : List[str] = self.scheduler.step(
UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase )
__UpperCAmelCase : List[str] = []
for i, latent in enumerate(UpperCamelCase ):
print()
__UpperCAmelCase : Tuple = self.renderer.decode(
latent[None, :] , UpperCamelCase , size=UpperCamelCase , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase )
__UpperCAmelCase : Optional[int] = torch.stack(UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
__UpperCAmelCase : List[Any] = images.cpu().numpy()
if output_type == "pil":
__UpperCAmelCase : Union[str, Any] = [self.numpy_to_pil(UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase )
| 115 |
"""simple docstring"""
import math
def lowerCamelCase ( _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Dict = 2
__UpperCAmelCase : Union[str, Any] = int(math.sqrt(_UpperCamelCase ) ) # Size of every segment
__UpperCAmelCase : Tuple = [True] * (end + 1)
__UpperCAmelCase : int = []
while start <= end:
if temp[start] is True:
in_prime.append(_UpperCamelCase )
for i in range(start * start , end + 1 , _UpperCamelCase ):
__UpperCAmelCase : Dict = False
start += 1
prime += in_prime
__UpperCAmelCase : Optional[int] = end + 1
__UpperCAmelCase : Dict = min(2 * end , _UpperCamelCase )
while low <= n:
__UpperCAmelCase : Union[str, Any] = [True] * (high - low + 1)
for each in in_prime:
__UpperCAmelCase : Dict = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_UpperCamelCase , high + 1 , _UpperCamelCase ):
__UpperCAmelCase : Tuple = False
for j in range(len(_UpperCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
__UpperCAmelCase : Tuple = high + 1
__UpperCAmelCase : Optional[int] = min(high + end , _UpperCamelCase )
return prime
print(sieve(10**6))
| 115 | 1 |
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = "align_text_model"
def __init__( self , lowerCamelCase_=3_05_22 , lowerCamelCase_=7_68 , lowerCamelCase_=12 , lowerCamelCase_=12 , lowerCamelCase_=30_72 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_=0 , lowerCamelCase_="absolute" , lowerCamelCase_=True , **lowerCamelCase_ , ) -> Optional[Any]:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = position_embedding_type
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = pad_token_id
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , lowerCamelCase_ , **lowerCamelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
lowerCAmelCase__ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[Any] = "align_vision_model"
def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 6_00 , lowerCamelCase_ = 2.0 , lowerCamelCase_ = 3.1 , lowerCamelCase_ = 8 , lowerCamelCase_ = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ = [] , lowerCamelCase_ = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ = 0.25 , lowerCamelCase_ = "swish" , lowerCamelCase_ = 25_60 , lowerCamelCase_ = "mean" , lowerCamelCase_ = 0.02 , lowerCamelCase_ = 0.001 , lowerCamelCase_ = 0.99 , lowerCamelCase_ = 0.2 , **lowerCamelCase_ , ) -> List[Any]:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = width_coefficient
lowerCAmelCase__ = depth_coefficient
lowerCAmelCase__ = depth_divisor
lowerCAmelCase__ = kernel_sizes
lowerCAmelCase__ = in_channels
lowerCAmelCase__ = out_channels
lowerCAmelCase__ = depthwise_padding
lowerCAmelCase__ = strides
lowerCAmelCase__ = num_block_repeats
lowerCAmelCase__ = expand_ratios
lowerCAmelCase__ = squeeze_expansion_ratio
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dim
lowerCAmelCase__ = pooling_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = batch_norm_eps
lowerCAmelCase__ = batch_norm_momentum
lowerCAmelCase__ = drop_connect_rate
lowerCAmelCase__ = sum(lowerCamelCase_ ) * 4
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , lowerCamelCase_ , **lowerCamelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
lowerCAmelCase__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = "align"
lowercase__ : str = True
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=6_40 , lowerCamelCase_=1.0 , lowerCamelCase_=0.02 , **lowerCamelCase_ , ) -> Optional[Any]:
super().__init__(**lowerCamelCase_ )
if text_config is None:
lowerCAmelCase__ = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
lowerCAmelCase__ = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
lowerCAmelCase__ = AlignTextConfig(**lowerCamelCase_ )
lowerCAmelCase__ = AlignVisionConfig(**lowerCamelCase_ )
lowerCAmelCase__ = projection_dim
lowerCAmelCase__ = temperature_init_value
lowerCAmelCase__ = initializer_range
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.text_config.to_dict()
lowerCAmelCase__ = self.vision_config.to_dict()
lowerCAmelCase__ = self.__class__.model_type
return output | 228 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Dict = ""
lowercase__ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowercase__ : str = None # compression type in fsspec. ex: "gzip"
lowercase__ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , lowerCamelCase_ = "" , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ ) -> Any:
super().__init__(self , **lowerCamelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCAmelCase__ = fsspec.open(
lowerCamelCase_ , mode='''rb''' , protocol=lowerCamelCase_ , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCAmelCase__ = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCAmelCase__ = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCAmelCase__ = None
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , lowerCamelCase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCamelCase_ ).lstrip('''/''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if self.dir_cache is None:
lowerCAmelCase__ = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCAmelCase__ = {f['''name''']: f}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[Any]:
return self.file.open().read()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = "rb" , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> List[str]:
lowerCAmelCase__ = self._strip_protocol(lowerCamelCase_ )
if mode != "rb":
raise ValueError(F"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = "bz2"
lowercase__ : str = "bz2"
lowercase__ : Optional[int] = ".bz2"
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = "gzip"
lowercase__ : int = "gzip"
lowercase__ : int = ".gz"
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Tuple = "lz4"
lowercase__ : Optional[Any] = "lz4"
lowercase__ : int = ".lz4"
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[int] = "xz"
lowercase__ : str = "xz"
lowercase__ : List[Any] = ".xz"
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = "zstd"
lowercase__ : Union[str, Any] = "zstd"
lowercase__ : Dict = ".zst"
def __init__( self , lowerCamelCase_ , lowerCamelCase_ = "rb" , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = DEFAULT_BLOCK_SIZE , **lowerCamelCase_ , ) -> List[Any]:
super().__init__(
fo=lowerCamelCase_ , mode=lowerCamelCase_ , target_protocol=lowerCamelCase_ , target_options=lowerCamelCase_ , block_size=lowerCamelCase_ , **lowerCamelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCAmelCase__ = self.file.__enter__
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = file_
def __enter__( self ) -> Tuple:
self._file.__enter__()
return self
def __exit__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> str:
self._file.__exit__(*lowerCamelCase_ , **lowerCamelCase_ )
def __iter__( self ) -> Any:
return iter(self._file )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return next(self._file )
def __getattr__( self , lowerCamelCase_ ) -> str:
return getattr(self._file , lowerCamelCase_ )
def fixed_enter(*lowerCamelCase_ , **lowerCamelCase_ ):
return WrappedFile(_enter(*lowerCamelCase_ , **lowerCamelCase_ ) )
lowerCAmelCase__ = fixed_enter | 228 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_A = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_A = 0
_A = 0xe_0_0_0
_A = 0xe_0_0_1
_A = 0xe_0_0_2
_A = 0xe_0_0_3
_A = 0xe_0_0_4
# Maps special codepoints to human-readable names.
_A = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A_=chr(A_ ) , A_=chr(A_ ) , A_=chr(A_ ) , A_=chr(A_ ) , A_=chr(A_ ) , A_=chr(A_ ) , A_=False , A_=2048 , **A_ , ) -> str:
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , model_max_length=A_ , **A_ , )
# Creates a mapping for looking up the IDs of special symbols.
__UpperCamelCase ={}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__UpperCamelCase =codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__UpperCamelCase ={
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__UpperCamelCase =UNICODE_VOCAB_SIZE
__UpperCamelCase =len(self._special_codepoints )
@property
def _a ( self ) -> int:
return self._unicode_vocab_size
def _a ( self , A_ ) -> List[str]:
return list(A_ )
def _a ( self , A_ ) -> int:
try:
return ord(A_ )
except TypeError:
raise ValueError(f'invalid token: \'{token}\'' )
def _a ( self , A_ ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(A_ )
except TypeError:
raise ValueError(f'invalid id: {index}' )
def _a ( self , A_ ) -> List[str]:
return "".join(A_ )
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] + ([0] * len(A_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(A_ )) + [1]
return result
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _a ( self , A_ , A_ = None ) -> List[Any]:
return ()
| 62 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class a__:
def __init__( self : List[Any] , __snake_case : Union[str, Any] ):
if isinstance(__snake_case , __snake_case ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : str = deepcopy(__snake_case )
elif os.path.exists(__snake_case ):
with io.open(__snake_case , 'r' , encoding='utf-8' ) as f:
a : Optional[Any] = json.load(__snake_case )
else:
try:
a : Any = baseaa.urlsafe_baadecode(__snake_case ).decode('utf-8' )
a : Union[str, Any] = json.loads(__snake_case )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
a : List[str] = config
self.set_stage_and_offload()
def lowercase_ ( self : List[str] ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
a : Dict = self.get_value('zero_optimization.stage' , -1 )
# offload
a : str = False
if self.is_zeroa() or self.is_zeroa():
a : Union[str, Any] = set(['cpu', 'nvme'] )
a : Optional[Any] = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
a : List[str] = True
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ):
a : str = self.config
# find the config node of interest if it exists
a : List[str] = ds_key_long.split('.' )
a : Dict = nodes.pop()
for node in nodes:
a : List[Any] = config.get(__snake_case )
if config is None:
return None, ds_key
return config, ds_key
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=None ):
a , a : List[Any] = self.find_config_node(__snake_case )
if config is None:
return default
return config.get(__snake_case , __snake_case )
def lowercase_ ( self : int , __snake_case : Optional[Any] , __snake_case : List[str]=False ):
a : Optional[Any] = self.config
# find the config node of interest if it exists
a : List[str] = ds_key_long.split('.' )
for node in nodes:
a : str = config
a : Dict = config.get(__snake_case )
if config is None:
if must_exist:
raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] ):
a : Union[str, Any] = self.get_value(__snake_case )
return False if value is None else bool(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str ):
a : Optional[Any] = self.get_value(__snake_case )
return False if value is None else not bool(__snake_case )
def lowercase_ ( self : Optional[Any] ):
return self._stage == 2
def lowercase_ ( self : Union[str, Any] ):
return self._stage == 3
def lowercase_ ( self : str ):
return self._offload
class a__:
def __init__( self : Tuple , __snake_case : str ):
a : Optional[Any] = engine
def lowercase_ ( self : Union[str, Any] , __snake_case : str , **__snake_case : Tuple ):
# runs backpropagation and handles mixed precision
self.engine.backward(__snake_case , **__snake_case )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : List[str] ):
super().__init__(__snake_case , device_placement=__snake_case , scaler=__snake_case )
a : Optional[Any] = hasattr(self.optimizer , 'overflow' )
def lowercase_ ( self : Dict , __snake_case : Dict=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowercase_ ( self : Optional[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowercase_ ( self : Tuple ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class a__( lowerCamelCase__ ):
def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ):
super().__init__(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class a__:
def __init__( self : List[Any] , __snake_case : str , __snake_case : Dict=0.001 , __snake_case : Union[str, Any]=0 , **__snake_case : List[Any] ):
a : Optional[Any] = params
a : str = lr
a : List[str] = weight_decay
a : str = kwargs
class a__:
def __init__( self : str , __snake_case : Optional[Any] , __snake_case : List[str]=None , __snake_case : Tuple=0 , **__snake_case : Any ):
a : Union[str, Any] = optimizer
a : Any = total_num_steps
a : List[str] = warmup_num_steps
a : int = kwargs | 297 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = None
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = 1
UpperCamelCase__ = None
UpperCamelCase__ = False
UpperCamelCase__ = None
UpperCamelCase__ = None
def lowercase__ ( self : Any )->"DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__UpperCamelCase ) for k, v in self.__dict__.items()} )
| 326 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class _a ( logging.LoggerAdapter):
"""simple docstring"""
@staticmethod
def lowercase__ ( __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] )->int:
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCAmelCase = kwargs.pop('''main_process_only''' , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop('''in_order''' , __UpperCamelCase )
if self.isEnabledFor(__UpperCamelCase ):
if self._should_log(__UpperCamelCase ):
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
elif in_order:
_UpperCAmelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
state.wait_for_everyone()
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = None ):
'''simple docstring'''
if log_level is None:
_UpperCAmelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = logging.getLogger(_SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_SCREAMING_SNAKE_CASE , {} )
| 326 | 1 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _a :
'''simple docstring'''
def __init__( self, A, A, A, A, A, A=0.2, A=0.2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = bp_numa
SCREAMING_SNAKE_CASE : List[str] = bp_numa
SCREAMING_SNAKE_CASE : Dict = bp_numa
SCREAMING_SNAKE_CASE : List[Any] = conva_get[:2]
SCREAMING_SNAKE_CASE : Any = conva_get[2]
SCREAMING_SNAKE_CASE : Optional[Any] = size_pa
SCREAMING_SNAKE_CASE : Tuple = rate_w
SCREAMING_SNAKE_CASE : Optional[Any] = rate_t
SCREAMING_SNAKE_CASE : List[str] = [
np.mat(-1 * np.random.rand(self.conva[0], self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE : str = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE : Optional[Any] = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE : List[str] = -2 * np.random.rand(self.num_bpa ) + 1
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(A, 'wb' ) as f:
pickle.dump(A, A )
print(F"Model saved: {save_path}" )
@classmethod
def UpperCamelCase_ ( cls, A ):
'''simple docstring'''
with open(A, 'rb' ) as f:
SCREAMING_SNAKE_CASE : int = pickle.load(A ) # noqa: S301
SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
SCREAMING_SNAKE_CASE : List[Any] = model_dic.get('size_pooling1' )
SCREAMING_SNAKE_CASE : Any = model_dic.get('num_bp1' )
SCREAMING_SNAKE_CASE : List[Any] = model_dic.get('num_bp2' )
SCREAMING_SNAKE_CASE : Dict = model_dic.get('num_bp3' )
SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get('rate_weight' )
SCREAMING_SNAKE_CASE : Optional[int] = model_dic.get('rate_thre' )
# create model instance
SCREAMING_SNAKE_CASE : str = CNN(A, A, A, A, A, A, A )
# modify model parameter
SCREAMING_SNAKE_CASE : List[str] = model_dic.get('w_conv1' )
SCREAMING_SNAKE_CASE : Optional[int] = model_dic.get('wkj' )
SCREAMING_SNAKE_CASE : int = model_dic.get('vji' )
SCREAMING_SNAKE_CASE : Optional[int] = model_dic.get('thre_conv1' )
SCREAMING_SNAKE_CASE : Any = model_dic.get('thre_bp2' )
SCREAMING_SNAKE_CASE : str = model_dic.get('thre_bp3' )
return conv_ins
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return round(A, 3 )
def UpperCamelCase_ ( self, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = convs[0]
SCREAMING_SNAKE_CASE : Optional[Any] = convs[1]
SCREAMING_SNAKE_CASE : str = np.shape(A )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE : Optional[int] = []
for i_focus in range(0, size_data - size_conv + 1, A ):
for j_focus in range(0, size_data - size_conv + 1, A ):
SCREAMING_SNAKE_CASE : str = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(A )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(A ):
SCREAMING_SNAKE_CASE : Any = []
for i_focus in range(len(A ) ):
SCREAMING_SNAKE_CASE : List[Any] = (
np.sum(np.multiply(data_focus[i_focus], w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(A ) )
SCREAMING_SNAKE_CASE : int = np.asmatrix(A ).reshape(
A, A )
data_featuremap.append(A )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE : Tuple = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(A ) )
SCREAMING_SNAKE_CASE : List[str] = np.asarray(A )
return focus_list, data_featuremap
def UpperCamelCase_ ( self, A, A, A="average_pool" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = len(featuremaps[0] )
SCREAMING_SNAKE_CASE : Any = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i_map in range(len(A ) ):
SCREAMING_SNAKE_CASE : Tuple = featuremaps[i_map]
SCREAMING_SNAKE_CASE : int = []
for i_focus in range(0, A, A ):
for j_focus in range(0, A, A ):
SCREAMING_SNAKE_CASE : List[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(A ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(A ) )
SCREAMING_SNAKE_CASE : Optional[int] = np.asmatrix(A ).reshape(A, A )
featuremap_pooled.append(A )
return featuremap_pooled
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Any = np.shape(data[i] )
SCREAMING_SNAKE_CASE : Optional[Any] = data[i].reshape(1, shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE : Tuple = data_listed.getA().tolist()[0]
data_expanded.extend(A )
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(A )
return data_expanded
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(A )
SCREAMING_SNAKE_CASE : Any = np.shape(A )
SCREAMING_SNAKE_CASE : Optional[int] = data_mat.reshape(1, shapes[0] * shapes[1] )
return data_expanded
def UpperCamelCase_ ( self, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i_map in range(A ):
SCREAMING_SNAKE_CASE : str = np.ones((size_map, size_map) )
for i in range(0, A, A ):
for j in range(0, A, A ):
SCREAMING_SNAKE_CASE : List[str] = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE : List[Any] = i_pool + 1
SCREAMING_SNAKE_CASE : Dict = np.multiply(
A, np.multiply(out_map[i_map], (1 - out_map[i_map]) ) )
pd_all.append(A )
return pd_all
def UpperCamelCase_ ( self, A, A, A, A, A, A=bool ):
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(A )) )
print((' - - Shape: Teach_Data ', np.shape(A )) )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Optional[int] = 10_000
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE : Any = 0
print(F"-------------Learning Time {rp}--------------" )
for p in range(len(A ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE : Optional[Any] = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE : Dict = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.convolute(
A, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
SCREAMING_SNAKE_CASE : Tuple = self.pooling(A, self.size_poolinga )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.shape(A )
SCREAMING_SNAKE_CASE : str = self._expand(A )
SCREAMING_SNAKE_CASE : List[Any] = data_bp_input
SCREAMING_SNAKE_CASE : List[Any] = np.dot(A, self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE : Optional[int] = self.sig(A )
SCREAMING_SNAKE_CASE : int = np.dot(A, self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE : Optional[Any] = self.sig(A )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE : Any = np.multiply(
(data_teach - bp_outa), np.multiply(A, (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE : Optional[int] = np.multiply(
np.dot(A, self.wkj ), np.multiply(A, (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE : List[Any] = np.dot(A, self.vji )
SCREAMING_SNAKE_CASE : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE : str = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE : str = self._calculate_gradient_from_pool(
A, A, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE : int = self.rate_weight * np.dot(A, A )
SCREAMING_SNAKE_CASE : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE : Union[str, Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE : Optional[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE : List[str] = rp + 1
SCREAMING_SNAKE_CASE : str = error_count / patterns
all_mse.append(A )
def draw_error():
SCREAMING_SNAKE_CASE : Optional[Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(A, '+-' )
plt.plot(A, 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(A, alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, F" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(A )) )
for p in range(len(A ) ):
SCREAMING_SNAKE_CASE : List[Any] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.convolute(
A, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
SCREAMING_SNAKE_CASE : str = self.pooling(A, self.size_poolinga )
SCREAMING_SNAKE_CASE : List[Any] = self._expand(A )
SCREAMING_SNAKE_CASE : Dict = data_bp_input
SCREAMING_SNAKE_CASE : Tuple = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE : List[str] = self.sig(A )
SCREAMING_SNAKE_CASE : List[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE : List[str] = self.sig(A )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE : Dict = [list(map(self.do_round, A ) ) for each in produce_out]
return np.asarray(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = np.asmatrix(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.convolute(
A, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
SCREAMING_SNAKE_CASE : str = self.pooling(A, self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 251 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _a ( unittest.TestCase ):
'''simple docstring'''
A : List[Any] = inspect.getfile(accelerate.test_utils )
A : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
A : List[str] = ['''accelerate''', '''launch''']
A : List[Any] = Path.home() / '''.cache/huggingface/accelerate'''
A : Any = '''default_config.yaml'''
A : Dict = config_folder / config_file
A : Union[str, Any] = config_folder / '''_default_config.yaml'''
A : int = Path('''tests/test_configs''' )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path], env=os.environ.copy() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=A ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(A ), self.test_file_path], env=os.environ.copy() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
execute_subprocess_async(['accelerate', 'test'], env=os.environ.copy() )
class _a ( unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = '''test-tpu'''
A : List[str] = '''us-central1-a'''
A : List[str] = '''ls'''
A : List[str] = ['''accelerate''', '''tpu-config''']
A : List[str] = '''cd /usr/share'''
A : Optional[int] = '''tests/test_samples/test_command_file.sh'''
A : Optional[int] = '''Running gcloud compute tpus tpu-vm ssh'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'], return_stdout=A )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all", A, )
| 251 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Dict = '''lilt'''
def __init__( self , _UpperCamelCase=3_0_5_2_2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=None , _UpperCamelCase=4 , _UpperCamelCase=1_0_2_4 , **_UpperCamelCase , ) -> Optional[int]:
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Dict = type_vocab_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : str = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = position_embedding_type
UpperCAmelCase_ : Optional[int] = classifier_dropout
UpperCAmelCase_ : List[Any] = channel_shrink_ratio
UpperCAmelCase_ : Optional[Any] = max_ad_position_embeddings
| 145 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCAmelCase_ : str = VideoClassificationPipeline(model=_UpperCamelCase , image_processor=_UpperCamelCase , top_k=2 )
UpperCAmelCase_ : List[str] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
for example in examples:
UpperCAmelCase_ : str = video_classifier(_UpperCamelCase )
self.assertEqual(
_UpperCamelCase , [
{'score': ANY(_UpperCamelCase ), 'label': ANY(_UpperCamelCase )},
{'score': ANY(_UpperCamelCase ), 'label': ANY(_UpperCamelCase )},
] , )
@require_torch
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCAmelCase_ : Optional[Any] = VideoMAEFeatureExtractor(
size={'shortest_edge': 1_0} , crop_size={'height': 1_0, 'width': 1_0} )
UpperCAmelCase_ : str = pipeline(
'video-classification' , model=_UpperCamelCase , feature_extractor=_UpperCamelCase , frame_sampling_rate=4 )
UpperCAmelCase_ : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCAmelCase_ : List[str] = video_classifier(_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , )
UpperCAmelCase_ : Tuple = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
] , )
@require_tf
def __UpperCAmelCase ( self ) -> Dict:
pass
| 145 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __lowerCAmelCase :
_lowercase : Optional[int] = LEDConfig
_lowercase : Optional[Any] = {}
_lowercase : Optional[int] = """gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2_0 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=4 , ) -> int:
'''simple docstring'''
a__ : int =parent
a__ : int =batch_size
a__ : Dict =seq_length
a__ : str =is_training
a__ : Optional[int] =use_labels
a__ : Optional[Any] =vocab_size
a__ : List[str] =hidden_size
a__ : Dict =num_hidden_layers
a__ : str =num_attention_heads
a__ : str =intermediate_size
a__ : Dict =hidden_dropout_prob
a__ : Tuple =attention_probs_dropout_prob
a__ : List[str] =max_position_embeddings
a__ : Dict =eos_token_id
a__ : Tuple =pad_token_id
a__ : Optional[Any] =bos_token_id
a__ : int =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
a__ : Union[str, Any] =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
a__ : List[str] =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Any =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a__ : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a__ : List[Any] =tf.concat([input_ids, eos_tensor] , axis=1 )
a__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Tuple =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
a__ : int =prepare_led_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : str =tf.concat(
[tf.zeros_like(lowerCAmelCase__ )[:, :-1], tf.ones_like(lowerCAmelCase__ )[:, -1:]] , axis=-1 , )
a__ : Dict =global_attention_mask
return config, inputs_dict
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
a__ : Any =TFLEDModel(config=lowerCAmelCase__ ).get_decoder()
a__ : Tuple =inputs_dict["input_ids"]
a__ : Any =input_ids[:1, :]
a__ : List[str] =inputs_dict["attention_mask"][:1, :]
a__ : List[Any] =1
# first forward pass
a__ : Any =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
a__ , a__ : Optional[int] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a__ : Optional[Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ : str =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a__ : Optional[int] =tf.concat([input_ids, next_tokens] , axis=-1 )
a__ : List[str] =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a__ : List[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a__ : Tuple =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a__ : Any =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a__ : Optional[Any] =output_from_no_past[:, -3:, random_slice_idx]
a__ : int =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
a__ : Optional[int] =tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a__ : Any =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a__ : Optional[int] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : int = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowercase : Union[str, Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowercase : Optional[int] = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = True
_lowercase : List[str] = False
_lowercase : List[str] = False
_lowercase : Dict = False
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : List[str] =TFLEDModelTester(self )
a__ : Any =ConfigTester(self , config_class=lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ , a__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
a__ : List[str] =tf.zeros_like(inputs_dict["attention_mask"] )
a__ : Tuple =2
a__ : int =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
a__ : Dict =True
a__ : List[Any] =self.model_tester.seq_length
a__ : Dict =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowerCAmelCase__ ):
a__ : List[Any] =outputs.decoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowerCAmelCase__ ):
a__ : Optional[int] =[t.numpy() for t in outputs.encoder_attentions]
a__ : Any =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
a__ : Optional[int] =True
a__ : List[Any] =False
a__ : Any =False
a__ : List[str] =model_class(lowerCAmelCase__ )
a__ : Optional[Any] =model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : Optional[int] =len(lowerCAmelCase__ )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
if self.is_encoder_decoder:
a__ : Union[str, Any] =model_class(lowerCAmelCase__ )
a__ : Dict =model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_decoder_attentions_output(lowerCAmelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a__ : str =True
a__ : Union[str, Any] =model_class(lowerCAmelCase__ )
a__ : Optional[Any] =model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
# Check attention is always last and order is fine
a__ : int =True
a__ : Tuple =True
a__ : Tuple =model_class(lowerCAmelCase__ )
a__ : Optional[Any] =model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCAmelCase__ ) )
self.assertEqual(model.config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> str:
'''simple docstring'''
pass
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE , dtype=tf.intaa )
UpperCAmelCase : Dict = 1E-4
@slow
@require_tf
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Tuple =TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
a__ : Any =_long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : int =_long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : List[str] =prepare_led_inputs_dict(model.config , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =model(**lowerCAmelCase__ )[0]
a__ : Optional[int] =(1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , lowerCAmelCase__ )
# change to expected output here
a__ : Optional[int] =tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-3 )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Tuple =TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
a__ : Union[str, Any] =_long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : Dict =_long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : int =prepare_led_inputs_dict(model.config , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Optional[int] =model(**lowerCAmelCase__ )[0]
a__ : Any =(1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , lowerCAmelCase__ )
# change to expected output here
a__ : Dict =tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-3 , rtol=1E-3 )
| 95 |
class A :
'''simple docstring'''
def __init__(self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
lowercase__ = {}
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if vertex not in self.adjacency:
lowercase__ = {}
self.num_vertices += 1
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(_UpperCAmelCase )
self.add_vertex(_UpperCAmelCase )
if head == tail:
return
lowercase__ = weight
lowercase__ = weight
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = list(edges[i] )
edges.sort(key=lambda _UpperCAmelCase : e[2] )
for i in range(len(_UpperCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = weight
lowercase__ = weight
def __str__(self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Graph()
if vertices is None:
lowercase__ = []
if edges is None:
lowercase__ = []
for vertex in vertices:
g.add_vertex(_UpperCAmelCase )
for edge in edges:
g.add_edge(*_UpperCAmelCase )
return g
class A :
'''simple docstring'''
def __init__(self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = {}
lowercase__ = {}
def __len__(self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.parent )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item in self.parent:
return self.find(_UpperCAmelCase )
lowercase__ = item
lowercase__ = 0
return item
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_UpperCAmelCase )
if item != self.parent[item]:
lowercase__ = self.find(self.parent[item] )
return self.parent[item]
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.find(_UpperCAmelCase )
lowercase__ = self.find(_UpperCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ = roota
return roota
return None
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = graph.num_vertices
lowercase__ = Graph.UnionFind()
lowercase__ = []
while num_components > 1:
lowercase__ = {}
for vertex in graph.get_vertices():
lowercase__ = -1
lowercase__ = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = union_find.find(_UpperCAmelCase )
lowercase__ = union_find.find(_UpperCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ = cheap_edge[vertex]
if union_find.find(_UpperCAmelCase ) != union_find.find(_UpperCAmelCase ):
union_find.union(_UpperCAmelCase , _UpperCAmelCase )
mst_edges.append(cheap_edge[vertex] )
lowercase__ = num_components - 1
lowercase__ = Graph.build(edges=_UpperCAmelCase )
return mst
| 305 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
__magic_name__ = MgpstrTokenizer
__magic_name__ = False
__magic_name__ = {}
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
# fmt: off
A : str = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
A : List[str] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Optional[int] = '''tester'''
A : List[str] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
A : Dict = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
A : int = tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
A : Any = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertTrue(special_token not in decoded )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
A : List[str] = self.get_input_output_texts(UpperCamelCase__ )
A : Optional[int] = tokenizer.tokenize(UpperCamelCase__ )
A : List[str] = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
A : Optional[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A : int = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertNotEqual(len(UpperCamelCase__ ) , 0 )
A : List[str] = tokenizer.decode(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , UpperCamelCase__ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
pass
| 367 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( __snake_case ):
__magic_name__ = (UniPCMultistepScheduler,)
__magic_name__ = (('''num_inference_steps''', 25),)
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : str = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : List[Any] = dict(self.forward_default_kwargs )
A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
A, A : Tuple = sample, sample
for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = dict(self.forward_default_kwargs )
A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : List[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[int] = self.get_scheduler_config()
A : Any = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
A : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if scheduler is None:
A : Dict = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : int = 10
A : Tuple = self.dummy_model()
A : Any = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = dict(self.forward_default_kwargs )
A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config()
A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
A : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
A : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
A : List[Any] = scheduler.timesteps[5]
A : Dict = scheduler.timesteps[6]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
A : Dict = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : int = self.full_loop()
A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[Any] = self.full_loop(prediction_type='''v_prediction''' )
A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = self.scheduler_classes[0]
A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = 10
A : Union[str, Any] = self.dummy_model()
A : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 311 | 0 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : str, **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
UpperCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = AutoModelForSeqaSeqLM.from_config(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).save_pretrained(SCREAMING_SNAKE_CASE__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 125 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 100 ) -> int:
UpperCAmelCase_ : Tuple = n * (n + 1) * (2 * n + 1) / 6
UpperCAmelCase_ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 125 | 1 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Dict:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Dict:
"""simple docstring"""
return (-y * np.log(SCREAMING_SNAKE_CASE_ ) - (1 - y) * np.log(1 - h )).mean()
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Tuple:
"""simple docstring"""
UpperCamelCase_ = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return np.sum(y * scores - np.log(1 + np.exp(SCREAMING_SNAKE_CASE_ ) ) )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7_0_0_0_0 )-> Dict:
"""simple docstring"""
UpperCamelCase_ = np.zeros(x.shape[1] )
for iterations in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = sigmoid_function(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = np.dot(x.T , h - y ) / y.size
UpperCamelCase_ = theta - alpha * gradient # updating the weights
UpperCamelCase_ = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = sigmoid_function(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = cost_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if iterations % 1_0_0 == 0:
print(f"loss: {j} \t" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Optional[int] = datasets.load_iris()
SCREAMING_SNAKE_CASE :Optional[Any] = iris.data[:, :2]
SCREAMING_SNAKE_CASE :str = (iris.target != 0) * 1
SCREAMING_SNAKE_CASE :Tuple = 0.1
SCREAMING_SNAKE_CASE :Optional[int] = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
return sigmoid_function(
np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) :List[str] = (x[:, 0].min(), x[:, 0].max())
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) :List[Any] = (x[:, 1].min(), x[:, 1].max())
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) :int = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
SCREAMING_SNAKE_CASE :List[Any] = np.c_[xxa.ravel(), xxa.ravel()]
SCREAMING_SNAKE_CASE :List[Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 60 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Union[str, Any] = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class __magic_name__ ( snake_case ):
UpperCamelCase_ :List[Any] = """bridgetower_vision_model"""
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=3 , _lowercase=16 , _lowercase=288 , _lowercase=1 , _lowercase=1e-0_5 , _lowercase=False , _lowercase=True , _lowercase=False , **_lowercase , )-> Optional[Any]:
super().__init__(**_lowercase )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_channels
UpperCamelCase_ = patch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = initializer_factor
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = stop_gradient
UpperCamelCase_ = share_layernorm
UpperCamelCase_ = remove_last_layer
@classmethod
def UpperCAmelCase_ ( cls , _lowercase , **_lowercase )-> "PretrainedConfig":
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(_lowercase , **_lowercase )
if config_dict.get("model_type" ) == "bridgetower":
UpperCamelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowercase , **_lowercase )
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Optional[int] = """bridgetower_text_model"""
def __init__( self , _lowercase=50_265 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=1 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=514 , _lowercase=1 , _lowercase=1e-0_5 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , **_lowercase , )-> Optional[int]:
super().__init__(**_lowercase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = initializer_factor
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = eos_token_id
@classmethod
def UpperCAmelCase_ ( cls , _lowercase , **_lowercase )-> "PretrainedConfig":
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(_lowercase , **_lowercase )
if config_dict.get("model_type" ) == "bridgetower":
UpperCamelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowercase , **_lowercase )
class __magic_name__ ( snake_case ):
UpperCamelCase_ :List[Any] = """bridgetower"""
def __init__( self , _lowercase=True , _lowercase="gelu" , _lowercase=768 , _lowercase=1 , _lowercase=1e-0_5 , _lowercase=False , _lowercase="add" , _lowercase=12 , _lowercase=6 , _lowercase=False , _lowercase=False , _lowercase=None , _lowercase=None , **_lowercase , )-> List[Any]:
# TODO: remove this once the Hub files are updated.
UpperCamelCase_ = kwargs.pop("text_config_dict" , _lowercase )
UpperCamelCase_ = kwargs.pop("vision_config_dict" , _lowercase )
super().__init__(**_lowercase )
UpperCamelCase_ = share_cross_modal_transformer_layers
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_size
UpperCamelCase_ = initializer_factor
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = share_link_tower_layers
UpperCamelCase_ = link_tower_type
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = tie_word_embeddings
UpperCamelCase_ = init_layernorm_from_vision_encoder
if text_config is None:
UpperCamelCase_ = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
UpperCamelCase_ = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
UpperCamelCase_ = BridgeTowerTextConfig(**_lowercase )
UpperCamelCase_ = BridgeTowerVisionConfig(**_lowercase )
@classmethod
def UpperCAmelCase_ ( cls , _lowercase , _lowercase , **_lowercase )-> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowercase )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.text_config.to_dict()
UpperCamelCase_ = self.vision_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : str = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : jnp.ndarray
UpperCamelCase__ : jnp.ndarray
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
UpperCamelCase__ : int
UpperCamelCase__ : Tuple[int] = (16, 32, 96, 256)
UpperCamelCase__ : jnp.dtype = jnp.floataa
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = []
for i in range(len(self.block_out_channels ) - 1 ):
__SCREAMING_SNAKE_CASE = self.block_out_channels[i]
__SCREAMING_SNAKE_CASE = self.block_out_channels[i + 1]
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
__SCREAMING_SNAKE_CASE = blocks
__SCREAMING_SNAKE_CASE = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv_in(_A )
__SCREAMING_SNAKE_CASE = nn.silu(_A )
for block in self.blocks:
__SCREAMING_SNAKE_CASE = block(_A )
__SCREAMING_SNAKE_CASE = nn.silu(_A )
__SCREAMING_SNAKE_CASE = self.conv_out(_A )
return embedding
@flax_register_to_config
class UpperCAmelCase_ ( nn.Module , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : int = 32
UpperCamelCase__ : int = 4
UpperCamelCase__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase__ : Union[bool, Tuple[bool]] = False
UpperCamelCase__ : Tuple[int] = (320, 640, 1280, 1280)
UpperCamelCase__ : int = 2
UpperCamelCase__ : Union[int, Tuple[int]] = 8
UpperCamelCase__ : Optional[Union[int, Tuple[int]]] = None
UpperCamelCase__ : int = 1280
UpperCamelCase__ : float = 0.0
UpperCamelCase__ : bool = False
UpperCamelCase__ : jnp.dtype = jnp.floataa
UpperCamelCase__ : bool = True
UpperCamelCase__ : int = 0
UpperCamelCase__ : str = "rgb"
UpperCamelCase__ : Tuple[int] = (16, 32, 96, 256)
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (1, self.in_channels, self.sample_size, self.sample_size)
__SCREAMING_SNAKE_CASE = jnp.zeros(_A , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = jnp.ones((1,) , dtype=jnp.intaa )
__SCREAMING_SNAKE_CASE = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = (1, 3, self.sample_size * 8, self.sample_size * 8)
__SCREAMING_SNAKE_CASE = jnp.zeros(_A , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = jax.random.split(_A )
__SCREAMING_SNAKE_CASE = {'params': params_rng, 'dropout': dropout_rng}
return self.init(_A , _A , _A , _A , _A )["params"]
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.block_out_channels
__SCREAMING_SNAKE_CASE = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__SCREAMING_SNAKE_CASE = self.num_attention_heads or self.attention_head_dim
# input
__SCREAMING_SNAKE_CASE = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__SCREAMING_SNAKE_CASE = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__SCREAMING_SNAKE_CASE = FlaxTimestepEmbedding(_A , dtype=self.dtype )
__SCREAMING_SNAKE_CASE = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__SCREAMING_SNAKE_CASE = self.only_cross_attention
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = (num_attention_heads,) * len(self.down_block_types )
# down
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = block_out_channels[0]
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
for i, down_block_type in enumerate(self.down_block_types ):
__SCREAMING_SNAKE_CASE = output_channel
__SCREAMING_SNAKE_CASE = block_out_channels[i]
__SCREAMING_SNAKE_CASE = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__SCREAMING_SNAKE_CASE = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
for _ in range(self.layers_per_block ):
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
if not is_final_block:
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
__SCREAMING_SNAKE_CASE = down_blocks
__SCREAMING_SNAKE_CASE = controlnet_down_blocks
# mid
__SCREAMING_SNAKE_CASE = block_out_channels[-1]
__SCREAMING_SNAKE_CASE = FlaxUNetMidBlockaDCrossAttn(
in_channels=_A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A , _A , _A , _A , _A = 1.0 , _A = True , _A = False , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__SCREAMING_SNAKE_CASE = jnp.flip(_A , axis=1 )
# 1. time
if not isinstance(_A , jnp.ndarray ):
__SCREAMING_SNAKE_CASE = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE = timesteps.astype(dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(_A , 0 )
__SCREAMING_SNAKE_CASE = self.time_proj(_A )
__SCREAMING_SNAKE_CASE = self.time_embedding(_A )
# 2. pre-process
__SCREAMING_SNAKE_CASE = jnp.transpose(_A , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE = self.conv_in(_A )
__SCREAMING_SNAKE_CASE = jnp.transpose(_A , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE = self.controlnet_cond_embedding(_A )
sample += controlnet_cond
# 3. down
__SCREAMING_SNAKE_CASE = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = down_block(_A , _A , _A , deterministic=not train )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__SCREAMING_SNAKE_CASE = self.mid_block(_A , _A , _A , deterministic=not train )
# 5. contronet blocks
__SCREAMING_SNAKE_CASE = ()
for down_block_res_sample, controlnet_block in zip(_A , self.controlnet_down_blocks ):
__SCREAMING_SNAKE_CASE = controlnet_block(_A )
controlnet_down_block_res_samples += (down_block_res_sample,)
__SCREAMING_SNAKE_CASE = controlnet_down_block_res_samples
__SCREAMING_SNAKE_CASE = self.controlnet_mid_block(_A )
# 6. scaling
__SCREAMING_SNAKE_CASE = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_A , mid_block_res_sample=_A )
| 257 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''swin'''
lowerCAmelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Tuple , A : Union[str, Any]=224 , A : Optional[int]=4 , A : List[str]=3 , A : Dict=96 , A : int=[2, 2, 6, 2] , A : List[Any]=[3, 6, 12, 24] , A : Dict=7 , A : List[str]=4.0 , A : List[Any]=True , A : Any=0.0 , A : int=0.0 , A : Tuple=0.1 , A : str="gelu" , A : Tuple=False , A : Tuple=0.02 , A : Optional[int]=1E-5 , A : List[Any]=32 , A : int=None , A : Tuple=None , **A : str , ):
super().__init__(**_snake_case )
__snake_case: Tuple = image_size
__snake_case: Any = patch_size
__snake_case: Optional[Any] = num_channels
__snake_case: List[str] = embed_dim
__snake_case: int = depths
__snake_case: Tuple = len(_snake_case )
__snake_case: List[str] = num_heads
__snake_case: List[str] = window_size
__snake_case: Any = mlp_ratio
__snake_case: Optional[Any] = qkv_bias
__snake_case: Any = hidden_dropout_prob
__snake_case: List[Any] = attention_probs_dropout_prob
__snake_case: Tuple = drop_path_rate
__snake_case: Optional[int] = hidden_act
__snake_case: int = use_absolute_embeddings
__snake_case: Any = layer_norm_eps
__snake_case: str = initializer_range
__snake_case: Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case: Optional[Any] = int(embed_dim * 2 ** (len(_snake_case ) - 1) )
__snake_case: Optional[Any] = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(_snake_case ) + 1 )]
__snake_case , __snake_case: Any = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self : Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return 1E-4
| 357 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 293 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A_ ( A__ ) -> Dict:
for param in module.parameters():
a__ : Union[str, Any] = False
def A_ ( ) -> Any:
a__ : Dict = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
a__ : Union[str, Any] = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def A_ ( A__ ) -> Any:
a__ : int = plt.imshow(A__ )
fig.axes.get_xaxis().set_visible(A__ )
fig.axes.get_yaxis().set_visible(A__ )
plt.show()
def A_ ( ) -> Any:
a__ : Dict = datetime.now()
a__ : Union[str, Any] = current_time.strftime('%H:%M:%S' )
return timestamp
| 99 |
def A_ ( snake_case : str ) -> int:
'''simple docstring'''
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , snake_case )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 328 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Any ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : int=None ):
SCREAMING_SNAKE_CASE_ = {}
if top_k is not None:
SCREAMING_SNAKE_CASE_ = top_k
return {}, {}, postprocess_params
def __call__( self : Dict , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : Optional[Any] ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = load_image(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = self.model(**_lowerCAmelCase )
return model_outputs
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=5 ):
if top_k > self.model.config.num_labels:
SCREAMING_SNAKE_CASE_ = self.model.config.num_labels
if self.framework == "pt":
SCREAMING_SNAKE_CASE_ = model_outputs.logits.softmax(-1 )[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = probs.topk(_lowerCAmelCase )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE_ = stable_softmax(model_outputs.logits , axis=-1 )[0]
SCREAMING_SNAKE_CASE_ = tf.math.top_k(_lowerCAmelCase , k=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
SCREAMING_SNAKE_CASE_ = scores.tolist()
SCREAMING_SNAKE_CASE_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCAmelCase , _lowerCAmelCase )] | 210 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ : str = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = 0
def lowerCAmelCase_ ( self : Optional[int] ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = AutoConfig.for_model('roberta' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , 'fake-roberta' )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertEqual(type(_lowerCAmelCase ) , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
try:
AutoConfig.register('custom' , _lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoConfig.register('model' , _lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoConfig.register('bert' , _lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCAmelCase_ ( self : Optional[int] ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('bert-base' )
def lowerCAmelCase_ ( self : int ):
with self.assertRaisesRegex(
_lowerCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase , revision='aaaaaa' )
def lowerCAmelCase_ ( self : Tuple ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def lowerCAmelCase_ ( self : Any ):
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "new-model"
try:
AutoConfig.register('new-model' , _lowerCAmelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"] | 210 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : list ) -> bool:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(snake_case_ ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(snake_case_ ) == 1:
return True
UpperCAmelCase_ = series[1] - series[0]
for index in range(len(snake_case_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowerCAmelCase_ ( snake_case_ : list ) -> float:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(snake_case_ ) == 0:
raise ValueError("Input list must be a non empty list" )
UpperCAmelCase_ = 0
for val in series:
answer += val
return answer / len(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = ReformerTokenizer
lowerCamelCase = ReformerTokenizerFast
lowerCamelCase = True
lowerCamelCase = False
lowerCamelCase = True
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
super().setUp()
A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Optional[int] )-> Optional[int]:
'''simple docstring'''
A__ = '<s>'
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ )
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'<unk>' )
self.assertEqual(vocab_keys[1],'<s>' )
self.assertEqual(vocab_keys[-1],'j' )
self.assertEqual(len(lowercase_ ),1_0_0_0 )
def snake_case__ ( self : Dict )-> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,1_0_0_0 )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = 'I was born in 92000, and this is falsé.'
A__ = tokenizer.tokenize(lowercase_ )
A__ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(lowercase_ )
A__ = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
def snake_case__ ( self : int,lowercase_ : Optional[int]=1_5 )-> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = self.rust_tokenizer_class.from_pretrained(lowercase_,**lowercase_ )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' )
# Simple input
self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' )
# Simple input
self.assertRaises(
lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',)
# Pair input
self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' )
# Pair input
self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' )
# Pair input
self.assertRaises(
lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',)
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
pass
def snake_case__ ( self : Dict )-> str:
'''simple docstring'''
A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ),[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2],)
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
],)
A__ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4],)
A__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
],)
@cached_property
def snake_case__ ( self : Optional[int] )-> Any:
'''simple docstring'''
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
A__ = 'Hello World!'
A__ = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) )
@slow
def snake_case__ ( self : Optional[int] )-> str:
'''simple docstring'''
A__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A__ = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
A__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
A__ = ' '.join(lowercase_ )
A__ = self.big_tokenizer.encode_plus(lowercase_,return_tensors='pt' )
A__ = self.big_tokenizer.batch_encode_plus([sequence, sequence],return_tensors='pt' )
A__ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
A__ = encoded_sequence['input_ids'].shape
A__ = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def snake_case__ ( self : int )-> Tuple:
'''simple docstring'''
A__ = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
A__ = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_,model_name='google/reformer-crime-and-punishment',revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a',padding=lowercase_,sequences=lowercase_,)
| 7 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _lowerCamelCase( _a ):
lowercase_ : str = CustomTokenizer
pass
| 350 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = ["DeiTFeatureExtractor"]
SCREAMING_SNAKE_CASE : Dict = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 84 | 0 |
def lowerCAmelCase ( _lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowerCAmelCase )
if number < 0:
return False
UpperCAmelCase__ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Dict = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase : List[str] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
UpperCamelCase_: Any = TextaTextGenerationPipeline(model=snake_case_ , tokenizer=snake_case_ )
return generator, ["Something to write", "Something else"]
def lowerCAmelCase__ ( self : str , snake_case_ : Dict , snake_case_ : Tuple ):
UpperCamelCase_: int = generator("""Something there""" )
self.assertEqual(snake_case_ , [{"""generated_text""": ANY(snake_case_ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
UpperCamelCase_: Dict = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=snake_case_ )
self.assertEqual(
snake_case_ , [
[{"""generated_text""": ANY(snake_case_ )}, {"""generated_text""": ANY(snake_case_ )}],
[{"""generated_text""": ANY(snake_case_ )}, {"""generated_text""": ANY(snake_case_ )}],
] , )
UpperCamelCase_: Optional[Any] = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case_ )
self.assertEqual(
snake_case_ , [
[{"""generated_text""": ANY(snake_case_ )}, {"""generated_text""": ANY(snake_case_ )}],
[{"""generated_text""": ANY(snake_case_ )}, {"""generated_text""": ANY(snake_case_ )}],
] , )
with self.assertRaises(snake_case_ ):
generator(4 )
@require_torch
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Union[str, Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
UpperCamelCase_: Union[str, Any] = generator("""Something there""" , do_sample=snake_case_ )
self.assertEqual(snake_case_ , [{"""generated_text""": """"""}] )
UpperCamelCase_: Dict = 3
UpperCamelCase_: Union[str, Any] = generator(
"""Something there""" , num_return_sequences=snake_case_ , num_beams=snake_case_ , )
UpperCamelCase_: Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Any = generator("""This is a test""" , do_sample=snake_case_ , num_return_sequences=2 , return_tensors=snake_case_ )
self.assertEqual(
snake_case_ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
UpperCamelCase_: int = generator.model.config.eos_token_id
UpperCamelCase_: Any = """<pad>"""
UpperCamelCase_: Any = generator(
["""This is a test""", """This is a second test"""] , do_sample=snake_case_ , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case_ , )
self.assertEqual(
snake_case_ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: int = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
UpperCamelCase_: Tuple = generator("""Something there""" , do_sample=snake_case_ )
self.assertEqual(snake_case_ , [{"""generated_text""": """"""}] )
| 223 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Construct model
if gpta_config_file == "":
UpperCamelCase_: Union[str, Any] = GPTaConfig()
else:
UpperCamelCase_: Optional[Any] = GPTaConfig.from_json_file(lowerCamelCase )
UpperCamelCase_: List[Any] = GPTaModel(lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
UpperCamelCase_: int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCamelCase_: Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
lowerCamelCase_ : List[str] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 223 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
A : Optional[Any] = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
A : Union[str, Any] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __lowerCamelCase ( __a :Optional[int] ) -> str:
"""simple docstring"""
if "://" in dataset_path:
A__ = dataset_path.split("""://""" )[1]
return dataset_path
def __lowerCamelCase ( __a :Union[str, Any] ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __lowerCamelCase ( __a :Tuple , __a :str , __a :Any ) -> Optional[int]:
"""simple docstring"""
A__ = not is_remote_filesystem(_lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowercase ) , fs._strip_protocol(_lowercase ) )
else:
fs.mv(_lowercase , _lowercase , recursive=_lowercase )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
A__ = None
A__ = None
A__ = threading.Lock()
| 274 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = parent
def A ( self : Optional[int] ):
'''simple docstring'''
return {}
def a_ ( ) -> Dict:
_snake_case = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
_snake_case = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = MarkupLMFeatureExtractor if is_bsa_available() else None
def A ( self : str ):
'''simple docstring'''
_snake_case = MarkupLMFeatureExtractionTester(self )
@property
def A ( self : Tuple ):
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def A ( self : int ):
'''simple docstring'''
_snake_case = self.feature_extraction_class()
# Test not batched input
_snake_case = get_html_strings()[0]
_snake_case = feature_extractor(lowercase )
# fmt: off
_snake_case = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
_snake_case = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , lowercase )
self.assertEqual(encoding.xpaths , lowercase )
# Test batched
_snake_case = get_html_strings()
_snake_case = feature_extractor(lowercase )
# fmt: off
_snake_case = expected_nodes + [['My First Heading', 'My first paragraph.']]
_snake_case = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowercase )
self.assertEqual(encoding.xpaths , lowercase ) | 130 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a_ ( __lowercase : Dict ) -> int:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : List[str] ) -> str:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Optional[Any]="facebook/mbart-large-en-ro" , __lowercase : Union[str, Any]=False , __lowercase : Optional[Any]=False ) -> int:
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = state_dict['encoder.embed_tokens.weight'].shape[0]
_snake_case = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_snake_case = 'relu'
_snake_case = state_dict['decoder.embed_tokens.weight']
_snake_case = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_snake_case = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase : Tuple = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 130 | 1 |