code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = LEDTokenizer
SCREAMING_SNAKE_CASE__ : Dict = LEDTokenizerFast
SCREAMING_SNAKE_CASE__ : List[str] = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase_ : Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase_ : Union[str, Any] = {"unk_token": "<unk>"}
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase_ : Dict = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : int = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase_ : str = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : str = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
self.assertIn("input_ids" , lowercase_ )
self.assertIn("attention_mask" , lowercase_ )
self.assertNotIn("labels" , lowercase_ )
self.assertNotIn("decoder_attention_mask" , lowercase_ )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : List[Any] = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : Tuple = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ["A long paragraph for summarization."]
UpperCAmelCase_ : Optional[int] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : Optional[Any] = tokenizer(lowercase_ , return_tensors="pt" )
UpperCAmelCase_ : List[str] = tokenizer(text_target=lowercase_ , return_tensors="pt" )
UpperCAmelCase_ : Optional[Any] = inputs["input_ids"]
UpperCAmelCase_ : int = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ : Dict = ["Summary of the text.", "Another summary."]
UpperCAmelCase_ : Optional[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase_ : Optional[Any] = tokenizer(lowercase_ , padding=lowercase_ )
UpperCAmelCase_ : Tuple = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]]
UpperCAmelCase_ : Optional[Any] = tokenizer.pad(lowercase_ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase_ : str = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase_ : Union[str, Any] = "A, <mask> AllenNLP sentence."
UpperCAmelCase_ : List[Any] = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
UpperCAmelCase_ : Dict = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase_ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 61 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A )
lowerCAmelCase_ = emb.weight.data
return lin_layer
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] )
lowerCAmelCase_ = checkpoint['''model''']
remove_ignore_keys_(_A )
lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
lowerCAmelCase_ = XGLMConfig(
vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCAmelCase_ = XGLMForCausalLM(_A )
lowerCAmelCase_ = model.load_state_dict(_A , strict=_A )
print(_A )
lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A = parser.parse_args()
_A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 278 | 0 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def lowercase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : List[str] = [0] * no_of_processes
__UpperCAmelCase : Optional[Any] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowerCAmelCase__ ):
__UpperCAmelCase : Union[str, Any] = burst_time[i]
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : Tuple = 999999999
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Optional[int] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowerCAmelCase__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__UpperCAmelCase : int = remaining_time[j]
__UpperCAmelCase : Tuple = j
__UpperCAmelCase : Union[str, Any] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__UpperCAmelCase : Optional[int] = remaining_time[short]
if minm == 0:
__UpperCAmelCase : List[str] = 999999999
if remaining_time[short] == 0:
complete += 1
__UpperCAmelCase : Tuple = False
# Find finish time of current process
__UpperCAmelCase : List[Any] = increment_time + 1
# Calculate waiting time
__UpperCAmelCase : str = finish_time - arrival_time[short]
__UpperCAmelCase : Any = finar - burst_time[short]
if waiting_time[short] < 0:
__UpperCAmelCase : List[str] = 0
# Increment time
increment_time += 1
return waiting_time
def lowercase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase : int = [0] * no_of_processes
for i in range(lowerCAmelCase__ ):
__UpperCAmelCase : List[str] = burst_time[i] + waiting_time[i]
return turn_around_time
def lowercase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Tuple = 0
for i in range(lowerCAmelCase__ ):
__UpperCAmelCase : Optional[int] = total_waiting_time + waiting_time[i]
__UpperCAmelCase : Dict = total_turn_around_time + turn_around_time[i]
print(f'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
_UpperCamelCase = int(input())
_UpperCamelCase = [0] * no_of_processes
_UpperCamelCase = [0] * no_of_processes
_UpperCamelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
_UpperCamelCase , _UpperCamelCase = map(int, input().split())
_UpperCamelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_UpperCamelCase = burst_time
_UpperCamelCase = no_of_processes
_UpperCamelCase = waiting_time
_UpperCamelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_UpperCamelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 16 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_UpperCamelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class _A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : int = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_SCREAMING_SNAKE_CASE : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__UpperCAmelCase : List[Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__UpperCAmelCase : int = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__UpperCAmelCase : Optional[int] = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__UpperCAmelCase : Dict = text_classifier("""This is great !""" , return_all_scores=__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__UpperCAmelCase : str = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__UpperCAmelCase : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def __A ( self ) -> Dict:
'''simple docstring'''
import torch
__UpperCAmelCase : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__UpperCAmelCase : int = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : int = pipeline("""text-classification""" )
__UpperCAmelCase : int = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__UpperCAmelCase : Any = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : str = pipeline("""text-classification""" , framework="""tf""" )
__UpperCAmelCase : Union[str, Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__UpperCAmelCase : int = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__UpperCAmelCase : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = TextClassificationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__UpperCAmelCase : Union[str, Any] = """HuggingFace is in"""
__UpperCAmelCase : Any = text_classifier(__UpperCAmelCase )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__UpperCAmelCase : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__UpperCAmelCase : Any = text_classifier(__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}, {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__UpperCAmelCase : Any = text_classifier(__UpperCAmelCase , top_k=__UpperCAmelCase )
__UpperCAmelCase : Any = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [[{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N, [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] * N] , )
__UpperCAmelCase : str = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__UpperCAmelCase : Optional[int] = text_classifier(__UpperCAmelCase )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__UpperCAmelCase : Union[str, Any] = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(__UpperCAmelCase ):
text_classifier(__UpperCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__UpperCAmelCase : Tuple = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{"""label""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 16 | 1 |
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
SCREAMING_SNAKE_CASE__ = load_file(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
SCREAMING_SNAKE_CASE__ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
SCREAMING_SNAKE_CASE__ = pipeline.text_encoder
else:
SCREAMING_SNAKE_CASE__ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
SCREAMING_SNAKE_CASE__ = pipeline.unet
# find the target layer
SCREAMING_SNAKE_CASE__ = layer_infos.pop(0 )
while len(_SCREAMING_SNAKE_CASE ) > -1:
try:
SCREAMING_SNAKE_CASE__ = curr_layer.__getattr__(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
SCREAMING_SNAKE_CASE__ = layer_infos.pop(0 )
elif len(_SCREAMING_SNAKE_CASE ) == 0:
break
except Exception:
if len(_SCREAMING_SNAKE_CASE ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
SCREAMING_SNAKE_CASE__ = layer_infos.pop(0 )
SCREAMING_SNAKE_CASE__ = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(_SCREAMING_SNAKE_CASE )
else:
pair_keys.append(_SCREAMING_SNAKE_CASE )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
SCREAMING_SNAKE_CASE__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
SCREAMING_SNAKE_CASE__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).unsqueeze(2 ).unsqueeze(3 )
else:
SCREAMING_SNAKE_CASE__ = state_dict[pair_keys[0]].to(torch.floataa )
SCREAMING_SNAKE_CASE__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# update visited list
for item in pair_keys:
visited.append(_SCREAMING_SNAKE_CASE )
return pipeline
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
A_ : Dict = parser.parse_args()
A_ : List[str] = args.base_model_path
A_ : List[str] = args.checkpoint_path
A_ : Tuple = args.dump_path
A_ : Union[str, Any] = args.lora_prefix_unet
A_ : Optional[int] = args.lora_prefix_text_encoder
A_ : Any = args.alpha
A_ : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
A_ : int = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 165 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
_snake_case = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(UpperCAmelCase )
def lowercase (self ) -> Optional[int]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Dict:
_snake_case = """sgugger/tiny-distilbert-classification"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , only_pretrain_model=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Optional[Any]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , torchscript=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def lowercase (self ) -> Optional[int]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , fpaa=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Union[str, Any]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = AutoConfig.from_pretrained(UpperCAmelCase )
# set architectures equal to `None`
_snake_case = None
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Optional[int]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def lowercase (self ) -> Tuple:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCAmelCase , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase (self ) -> Union[str, Any]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = AutoConfig.from_pretrained(UpperCAmelCase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Dict:
_snake_case = """sshleifer/tinier_bart"""
_snake_case = AutoConfig.from_pretrained(UpperCAmelCase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Any:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = AutoConfig.from_pretrained(UpperCAmelCase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase (self ) -> int:
_snake_case = """sshleifer/tinier_bart"""
_snake_case = AutoConfig.from_pretrained(UpperCAmelCase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase (self ) -> str:
_snake_case = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , save_to_csv=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(UpperCAmelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(UpperCAmelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(UpperCAmelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(UpperCAmelCase , """env.csv""" ) , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCAmelCase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , """env.csv""" ) ).exists() )
def lowercase (self ) -> int:
_snake_case = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(UpperCAmelCase ):
self.assertTrue(hasattr(UpperCAmelCase , """sequential""" ) )
self.assertTrue(hasattr(UpperCAmelCase , """cumulative""" ) )
self.assertTrue(hasattr(UpperCAmelCase , """current""" ) )
self.assertTrue(hasattr(UpperCAmelCase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase , """log.txt""" ) , log_print=UpperCAmelCase , trace_memory_line_by_line=UpperCAmelCase , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(UpperCAmelCase , """log.txt""" ) ).exists() ) | 341 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : List[str]=10 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : Union[str, Any]=32 * 4 , _UpperCAmelCase : List[Any]=32 * 6 , _UpperCAmelCase : int=4 , _UpperCAmelCase : Optional[int]=32 , ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : str = use_auxiliary_loss
_lowerCAmelCase : Dict = num_queries
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : str = min_size
_lowerCAmelCase : List[Any] = max_size
_lowerCAmelCase : str = num_labels
_lowerCAmelCase : Tuple = mask_feature_size
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
'''simple docstring'''
_lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
_lowerCAmelCase : Dict = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
_lowerCAmelCase : Any = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
_lowerCAmelCase : Union[str, Any] = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
_lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = self.prepare_config_and_inputs()
_lowerCAmelCase : List[str] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = output.encoder_hidden_states
_lowerCAmelCase : int = output.pixel_decoder_hidden_states
_lowerCAmelCase : List[str] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_config.decoder_layers )
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : str=False ) -> Dict:
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = MaskFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
_lowerCAmelCase : List[str] = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Tuple = MaskFormerForInstanceSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCAmelCase : List[str] = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
_lowerCAmelCase : str = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
_lowerCAmelCase : Any = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __snake_case (_a , _a , unittest.TestCase ):
lowerCAmelCase__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCAmelCase__ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MaskFormerModelTester(self )
_lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(_UpperCAmelCase )
_lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
_lowerCAmelCase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCAmelCase : str = MaskFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : str = (self.model_tester.min_size,) * 2
_lowerCAmelCase : List[Any] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
"""class_labels""": torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
_lowerCAmelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_UpperCAmelCase )
_lowerCAmelCase : List[str] = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
_lowerCAmelCase : Dict = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCAmelCase : Tuple = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_lowerCAmelCase : Any = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
'''simple docstring'''
_lowerCAmelCase : Dict = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_lowerCAmelCase : Optional[Any] = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCAmelCase : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCAmelCase : Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCAmelCase : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCamelCase : Union[str, Any] = 1e-4
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __snake_case (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : Tuple = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = self.default_image_processor
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Optional[int] = image_processor(_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
_lowerCAmelCase : int = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_lowerCAmelCase : int = model(**_UpperCAmelCase )
_lowerCAmelCase : int = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
_lowerCAmelCase : str = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(_UpperCAmelCase )
.eval()
)
_lowerCAmelCase : Union[str, Any] = self.default_image_processor
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : Any = image_processor(_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
_lowerCAmelCase : int = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_lowerCAmelCase : Any = model(**_UpperCAmelCase )
# masks_queries_logits
_lowerCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCAmelCase : Dict = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
_lowerCAmelCase : Dict = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
_lowerCAmelCase : Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCAmelCase : str = torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(_UpperCAmelCase )
.eval()
)
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[Any] = image_processor(_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
_lowerCAmelCase : List[str] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**_UpperCAmelCase )
# masks_queries_logits
_lowerCAmelCase : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCAmelCase : int = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_lowerCAmelCase : str = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
_lowerCAmelCase : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCAmelCase : List[str] = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(_UpperCAmelCase )
.eval()
)
_lowerCAmelCase : int = self.default_image_processor
_lowerCAmelCase : Union[str, Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
_lowerCAmelCase : str = inputs["""pixel_values"""].to(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = [el.to(_UpperCAmelCase ) for el in inputs["""mask_labels"""]]
_lowerCAmelCase : int = [el.to(_UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
_lowerCAmelCase : int = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 159 |
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : str = len(UpperCamelCase_ ) + 1
_lowerCAmelCase : List[Any] = len(UpperCamelCase_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_lowerCAmelCase : List[Any] = [[0 for i in range(UpperCamelCase_ )] for j in range(UpperCamelCase_ )]
# since string of zero length match pattern of zero length
_lowerCAmelCase : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , UpperCamelCase_ ):
_lowerCAmelCase : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , UpperCamelCase_ ):
_lowerCAmelCase : Tuple = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , UpperCamelCase_ ):
for j in range(1 , UpperCamelCase_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_lowerCAmelCase : Dict = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_lowerCAmelCase : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_lowerCAmelCase : int = dp[i - 1][j]
else:
_lowerCAmelCase : List[str] = 0
else:
_lowerCAmelCase : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_lowerCamelCase : Any = "aab"
_lowerCamelCase : List[str] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 159 | 1 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowercase = 128022
__lowercase = 128028
@require_sentencepiece
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[Any] = MaMaaaTokenizer
UpperCAmelCase : List[str] = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : str = True
def __snake_case ( self : int):
super().setUp()
a : Any = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
a : Any = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase))))
a : List[str] = Path(self.tmpdirname)
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["spm_file"])
a : Tuple = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def __snake_case ( self : str , **__UpperCAmelCase : str):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def __snake_case ( self : int , __UpperCAmelCase : List[Any]):
return (
"This is a test",
"This is a test",
)
def __snake_case ( self : Tuple):
a : int = "</s>"
a : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase) , __UpperCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : Tuple = self.get_tokenizer()
a : Optional[Any] = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , "</s>")
self.assertEqual(vocab_keys[1] , "<unk>")
self.assertEqual(vocab_keys[-1] , "<s>")
self.assertEqual(len(__UpperCAmelCase) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip("Skip this test while all models are still to be uploaded.")
def __snake_case ( self : str):
pass
def __snake_case ( self : Optional[int]):
a : Tuple = self.get_tokenizer()
a : Dict = tokenizer.tokenize("This is a test")
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [2, 3, 4, 5, 6] , )
a : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
a : Optional[Any] = tokenizer.convert_tokens_to_string(__UpperCAmelCase)
self.assertEqual(__UpperCAmelCase , "This is a test")
@slow
def __snake_case ( self : Tuple):
# fmt: off
a : Optional[int] = {"input_ids": [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = """facebook/m2m100_418M"""
UpperCAmelCase : Union[str, Any] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
UpperCAmelCase : Dict = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
UpperCAmelCase : List[str] = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def __snake_case ( cls : List[str]):
a : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr")
a : List[str] = 1
return cls
def __snake_case ( self : Union[str, Any]):
self.assertEqual(self.tokenizer.get_lang_id("ar") , 128006)
self.assertEqual(self.tokenizer.get_lang_id("en") , 128022)
self.assertEqual(self.tokenizer.get_lang_id("ro") , 128076)
self.assertEqual(self.tokenizer.get_lang_id("mr") , 128063)
def __snake_case ( self : Tuple):
a : Union[str, Any] = self.tokenizer.get_vocab()
self.assertEqual(len(__UpperCAmelCase) , self.tokenizer.vocab_size)
self.assertEqual(vocab["<unk>"] , 3)
self.assertIn(self.tokenizer.get_lang_token("en") , __UpperCAmelCase)
def __snake_case ( self : List[Any]):
a : int = "en"
a : int = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase)
def __snake_case ( self : Any):
self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids)
# fmt: off
a : List[Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
a : str = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase)
a : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : Optional[int] = tempfile.mkdtemp()
a : Union[str, Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__UpperCAmelCase)
a : int = MaMaaaTokenizer.from_pretrained(__UpperCAmelCase)
self.assertDictEqual(new_tok.lang_token_to_id , __UpperCAmelCase)
@require_torch
def __snake_case ( self : Optional[int]):
a : Dict = "en"
a : List[str] = "fr"
a : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , return_tensors="pt")
a : Any = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
a : str = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __snake_case ( self : Union[str, Any]):
a : Dict = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
a : List[Any] = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def __snake_case ( self : Optional[int]):
a : Any = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
a : List[Any] = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def __snake_case ( self : Any):
a : str = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar")
self.assertEqual(
nested_simplify(__UpperCAmelCase) , {
# en_XX, A, test, EOS
"input_ids": [[128022, 58, 4183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128006,
} , )
| 40 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=32 ,__UpperCAmelCase=2 ,__UpperCAmelCase=4 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,__UpperCAmelCase=0 ,) -> Dict:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = projection_dim
def snake_case__ ( self ) -> Optional[Any]:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A__ = ids_tensor([self.batch_size] ,self.num_choices )
A__ = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCAmelCase ,initializer_range=self.initializer_range ,)
A__ = DPRConfig(projection_dim=self.projection_dim ,**config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
A__ = TFDPRContextEncoder(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = TFDPRQuestionEncoder(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
A__ = TFDPRReader(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape ,(self.batch_size,) )
def snake_case__ ( self ) -> int:
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class UpperCamelCase__( __A , __A , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : str = False
def snake_case__ ( self ) -> str:
A__ = TFDPRModelTester(self )
A__ = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=37 )
def snake_case__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__UpperCAmelCase )
def snake_case__ ( self ) -> List[str]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__UpperCAmelCase )
@slow
def snake_case__ ( self ) -> int:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRContextEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRContextEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRQuestionEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRReader.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class UpperCamelCase__( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> Optional[Any]:
A__ = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
A__ = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
A__ = model(__UpperCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
A__ = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 221 | 0 |
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase__ : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''input_values''', '''attention_mask''']
def __init__( self , _UpperCAmelCase = 1 , _UpperCAmelCase = 1_6000 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = False , _UpperCAmelCase = 80 , _UpperCAmelCase = 16 , _UpperCAmelCase = 64 , _UpperCAmelCase = "hann_window" , _UpperCAmelCase = 1.0 , _UpperCAmelCase = 80 , _UpperCAmelCase = 7600 , _UpperCAmelCase = 1e-1_0 , _UpperCAmelCase = 2 , _UpperCAmelCase = True , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase)
__A : Union[str, Any] = do_normalize
__A : Dict = return_attention_mask
__A : Any = num_mel_bins
__A : Union[str, Any] = hop_length
__A : List[str] = win_length
__A : int = win_function
__A : Any = frame_signal_scale
__A : List[str] = fmin
__A : Optional[Any] = fmax
__A : Dict = mel_floor
__A : Optional[Any] = reduction_factor
__A : Optional[Any] = win_length * sampling_rate // 1000
__A : str = hop_length * sampling_rate // 1000
__A : Tuple = optimal_fft_length(self.sample_size)
__A : str = (self.n_fft // 2) + 1
__A : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase)
__A : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0.0):
'''simple docstring'''
if attention_mask is not None:
__A : List[Any] = np.array(_UpperCAmelCase , np.intaa)
__A : int = []
for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1)):
__A : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
__A : Dict = padding_value
normed_input_values.append(_UpperCAmelCase)
else:
__A : List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = spectrogram(
_UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.')
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
if audio is not None:
__A : Dict = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
else:
__A : Union[str, Any] = None
if audio_target is not None:
__A : Optional[int] = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
__A : Any = inputs_target['input_values']
__A : Union[str, Any] = inputs_target.get('attention_mask')
if decoder_attention_mask is not None:
__A : List[str] = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = isinstance(_UpperCAmelCase , np.ndarray) and len(speech.shape) > 1
if is_batched_numpy and len(speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
__A : Optional[int] = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple)) and (isinstance(speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__A : Dict = [np.asarray(_UpperCAmelCase , dtype=np.floataa) for speech in speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray):
__A : List[str] = np.asarray(_UpperCAmelCase , dtype=np.floataa)
elif isinstance(_UpperCAmelCase , np.ndarray) and speech.dtype is np.dtype(np.floataa):
__A : List[Any] = speech.astype(np.floataa)
# always return batch
if not is_batched:
__A : Tuple = [speech]
# needed to make pad() work on spectrogram inputs
__A : Tuple = self.feature_size
# convert into correct format for padding
if is_target:
__A : Dict = [self._extract_mel_features(_UpperCAmelCase) for waveform in speech]
__A : Dict = BatchFeature({'input_values': features})
__A : Any = self.num_mel_bins
else:
__A : int = BatchFeature({'input_values': speech})
__A : Union[str, Any] = self.pad(
_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
__A : Any = feature_size_hack
# convert input values to correct format
__A : Union[str, Any] = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray):
__A : List[str] = [np.asarray(_UpperCAmelCase , dtype=np.floataa) for array in input_values]
elif (
not isinstance(_UpperCAmelCase , np.ndarray)
and isinstance(input_values[0] , np.ndarray)
and input_values[0].dtype is np.dtype(np.floataa)
):
__A : Any = [array.astype(np.floataa) for array in input_values]
elif isinstance(_UpperCAmelCase , np.ndarray) and input_values.dtype is np.dtype(np.floataa):
__A : List[Any] = input_values.astype(np.floataa)
# convert attention_mask to correct format
__A : int = padded_inputs.get('attention_mask')
if attention_mask is not None:
__A : Optional[int] = [np.asarray(_UpperCAmelCase , dtype=np.intaa) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__A : Optional[int] = (
attention_mask
if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase) is not PaddingStrategy.DO_NOT_PAD
else None
)
__A : str = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value)
if return_tensors is not None:
__A : Dict = padded_inputs.convert_to_tensors(_UpperCAmelCase)
return padded_inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__A : str = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output | 365 |
'''simple docstring'''
import itertools
import math
def _lowerCAmelCase ( __snake_case : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( ) -> List[Any]:
__A : Optional[Any] = 2
while True:
if is_prime(__snake_case ):
yield num
num += 1
def _lowerCAmelCase ( __snake_case : int = 1_00_01 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , __snake_case ) )
if __name__ == "__main__":
print(f"""{solution() = }""") | 190 | 0 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
a__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
a__ , a__ : Optional[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
a__ : List[str] = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
a__ : Union[str, Any] = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
a__ : Any = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 54 |
"""simple docstring"""
import math
import random
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
a__ : Tuple = 0.02
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(lowerCAmelCase_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE = (expected / 100) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE = layer_1_error * sigmoid_function(lowerCAmelCase_ , lowerCAmelCase_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = int(input('''Expected value: '''))
a__ : str = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 54 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Union[str, Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 251 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """facebook/bart-large-mnli"""
lowerCAmelCase_ = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
lowerCAmelCase_ = """text_classifier"""
lowerCAmelCase_ = AutoTokenizer
lowerCAmelCase_ = AutoModelForSequenceClassification
lowerCAmelCase_ = ["""text""", ["""text"""]]
lowerCAmelCase_ = ["""text"""]
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
super().setup()
UpperCamelCase = self.model.config
UpperCamelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
UpperCamelCase = int(A_ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCAmelCase_ ( self , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = labels
return self.pre_processor(
[text] * len(A_ ) , [F'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCAmelCase_ ( self , A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = outputs.logits
UpperCamelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 251 | 1 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def UpperCamelCase ( __lowerCamelCase : str ):
return "".join(sorted(__lowerCamelCase ) )
def UpperCamelCase ( __lowerCamelCase : str ):
return word_by_signature[signature(__lowerCamelCase )]
__lowerCamelCase = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
__lowerCamelCase = sorted({word.strip().lower() for word in data.splitlines()})
__lowerCamelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__lowerCamelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 59 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def UpperCamelCase ( __lowerCamelCase : Dataset , __lowerCamelCase : Dict[str, str] ):
snake_case : int = args.log_outputs
snake_case : Dict = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case : List[str] = load_metric("wer" )
snake_case : Tuple = load_metric("cer" )
# compute metrics
snake_case : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case : int = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case : int = f"""WER: {wer_result}\nCER: {cer_result}"""
print(__lowerCamelCase )
with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f:
f.write(__lowerCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case : int = f"""log_{dataset_id}_predictions.txt"""
snake_case : List[Any] = f"""log_{dataset_id}_targets.txt"""
with open(__lowerCamelCase , "w" ) as p, open(__lowerCamelCase , "w" ) as t:
# mapping function to write output
def write_to_file(__lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
p.write(f"""{i}""" + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f"""{i}""" + "\n" )
t.write(batch["target"] + "\n" )
result.map(__lowerCamelCase , with_indices=__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case : List[Any] = re.sub(__lowerCamelCase , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case : Dict = " ".join(text.split(__lowerCamelCase ) )
return text
def UpperCamelCase ( __lowerCamelCase : int ):
# load dataset
snake_case : str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__lowerCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case : Union[str, Any] = feature_extractor.sampling_rate
# resample audio
snake_case : Union[str, Any] = dataset.cast_column("audio" , Audio(sampling_rate=__lowerCamelCase ) )
# load eval pipeline
if args.device is None:
snake_case : List[str] = 0 if torch.cuda.is_available() else -1
snake_case : str = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__lowerCamelCase : int ):
snake_case : Dict = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case : str = prediction["text"]
snake_case : Tuple = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case : Dict = dataset.map(__lowerCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase = parser.parse_args()
main(args)
| 59 | 1 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def A ( a_ ,a_ ,a_ = None ) -> Optional[Any]:
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__UpperCamelCase : str =quote(__lowerCamelCase )
return hfh.hf_hub_url(__lowerCamelCase ,__lowerCamelCase ,repo_type='dataset' ,revision=__lowerCamelCase )
| 351 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ :Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[Any] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A_ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 245 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : Optional[Any] = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = ['''LayoutLMv3FeatureExtractor''']
a : int = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->str:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
a : Tuple = precision
a : str = ceil(precision / 14 )
a : List[Any] = 42_6880 * Decimal(1_0005 ).sqrt()
a : Union[str, Any] = 1
a : Dict = 1359_1409
a : Optional[int] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
a : int = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
a : Optional[Any] = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 105 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
__lowerCamelCase : Dict = None
if token is not None:
__lowerCamelCase : Tuple = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
__lowerCamelCase : int = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
__lowerCamelCase : str = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
__lowerCamelCase : Any = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
__lowerCamelCase : Dict = math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowerCAmelCase__ ):
__lowerCamelCase : List[Any] = requests.get(url + f'&page={i + 2}' , headers=lowerCAmelCase__ ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
__lowerCamelCase : List[Any] = None
if token is not None:
__lowerCamelCase : Dict = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
__lowerCamelCase : Dict = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
__lowerCamelCase : int = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
__lowerCamelCase : Optional[Any] = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
__lowerCamelCase : Union[str, Any] = math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowerCAmelCase__ ):
__lowerCamelCase : int = requests.get(url + f'&page={i + 2}' , headers=lowerCAmelCase__ ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Any = None
if token is not None:
__lowerCamelCase : Optional[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
__lowerCamelCase : str = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ )
__lowerCamelCase : Any = result.headers['Location']
__lowerCamelCase : List[str] = requests.get(lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ )
__lowerCamelCase : Optional[int] = os.path.join(lowerCAmelCase__ , f'{artifact_name}.zip' )
with open(lowerCAmelCase__ , 'wb' ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
__lowerCamelCase : str = []
__lowerCamelCase : List[Any] = []
__lowerCamelCase : int = None
with zipfile.ZipFile(lowerCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCAmelCase__ ) as f:
for line in f:
__lowerCamelCase : Union[str, Any] = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__lowerCamelCase : int = line[: line.index(': ' )]
__lowerCamelCase : Dict = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
__lowerCamelCase : Optional[Any] = line[len('FAILED ' ) :]
failed_tests.append(lowerCAmelCase__ )
elif filename == "job_name.txt":
__lowerCamelCase : Optional[Any] = line
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCAmelCase__ )} for `errors` '
f'and {len(lowerCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
' problem.' )
__lowerCamelCase : int = None
if job_name and job_links:
__lowerCamelCase : Dict = job_links.get(lowerCAmelCase__ , lowerCAmelCase__ )
# A list with elements of the form (line of error, error, failed test)
__lowerCamelCase : Optional[int] = [x + [y] + [job_link] for x, y in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
return result
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Any = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for p in os.listdir(lowerCAmelCase__ ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCAmelCase__ , job_links=lowerCAmelCase__ ) )
return errors
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
__lowerCamelCase : int = Counter()
counter.update([x[1] for x in logs] )
__lowerCamelCase : List[Any] = counter.most_common()
__lowerCamelCase : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__lowerCamelCase : Optional[Any] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
__lowerCamelCase : Any = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] , reverse=lowerCAmelCase__ ) )
return r
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = test.split('::' )[0]
if test.startswith('tests/models/' ):
__lowerCamelCase : Union[str, Any] = test.split('/' )[2]
else:
__lowerCamelCase : Optional[Any] = None
return test
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
__lowerCamelCase : Any = [(x[0], x[1], get_model(x[2] )) for x in logs]
__lowerCamelCase : List[str] = [x for x in logs if x[2] is not None]
__lowerCamelCase : List[Any] = {x[2] for x in logs}
__lowerCamelCase : str = {}
for test in tests:
__lowerCamelCase : Any = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__lowerCamelCase : Any = counter.most_common()
__lowerCamelCase : Dict = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__lowerCamelCase : str = sum(error_counts.values() )
if n_errors > 0:
__lowerCamelCase : Any = {'count': n_errors, 'errors': error_counts}
__lowerCamelCase : List[str] = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] , reverse=lowerCAmelCase__ ) )
return r
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = '| no. | error | status |'
__lowerCamelCase : List[Any] = '|-:|:-|:-|'
__lowerCamelCase : Tuple = [header, sep]
for error in reduced_by_error:
__lowerCamelCase : int = reduced_by_error[error]['count']
__lowerCamelCase : List[Any] = f'| {count} | {error[:100]} | |'
lines.append(lowerCAmelCase__ )
return "\n".join(lowerCAmelCase__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = '| model | no. of errors | major error | count |'
__lowerCamelCase : Optional[Any] = '|-:|-:|-:|-:|'
__lowerCamelCase : str = [header, sep]
for model in reduced_by_model:
__lowerCamelCase : Dict = reduced_by_model[model]['count']
__lowerCamelCase , __lowerCamelCase : str = list(reduced_by_model[model]['errors'].items() )[0]
__lowerCamelCase : Dict = f'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(lowerCAmelCase__ )
return "\n".join(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowercase_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowercase_ = get_job_links(args.workflow_run_id, token=args.token)
lowercase_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowercase_ = k.find(' / ')
lowercase_ = k[index + len(' / ') :]
lowercase_ = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowercase_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowercase_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowercase_ = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowercase_ = reduce_by_error(errors)
lowercase_ = reduce_by_model(errors)
lowercase_ = make_github_table(reduced_by_error)
lowercase_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 361 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE__ ) as metadata_file:
__lowerCamelCase : List[str] = json.load(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__lowerCamelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
# Load the entity vocab file
__lowerCamelCase : List[Any] = load_entity_vocab(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__lowerCamelCase : str = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Initialize the embeddings of the special tokens
__lowerCamelCase : Union[str, Any] = state_dict['embeddings.word_embeddings.weight']
__lowerCamelCase : Tuple = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
__lowerCamelCase : Any = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
__lowerCamelCase : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__lowerCamelCase : Optional[int] = f'encoder.layer.{layer_index}.attention.self.'
__lowerCamelCase : Dict = state_dict[prefix + matrix_name]
__lowerCamelCase : List[Any] = state_dict[prefix + matrix_name]
__lowerCamelCase : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__lowerCamelCase : Optional[int] = state_dict['entity_embeddings.entity_embeddings.weight']
__lowerCamelCase : Union[str, Any] = entity_emb[entity_vocab['[MASK]']]
__lowerCamelCase : Optional[Any] = LukeModel(config=SCREAMING_SNAKE_CASE__ ).eval()
__lowerCamelCase , __lowerCamelCase : List[Any] = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
if not (len(SCREAMING_SNAKE_CASE__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(SCREAMING_SNAKE_CASE__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
__lowerCamelCase : Optional[Any] = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , task='entity_classification' )
__lowerCamelCase : Dict = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
__lowerCamelCase : Union[str, Any] = (39, 42)
__lowerCamelCase : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , add_prefix_space=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE__ )
# Verify word hidden states
if model_size == "large":
__lowerCamelCase : Dict = torch.Size((1, 42, 1_024) )
__lowerCamelCase : int = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
__lowerCamelCase : Union[str, Any] = torch.Size((1, 42, 768) )
__lowerCamelCase : Tuple = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__lowerCamelCase : Union[str, Any] = torch.Size((1, 1, 1_024) )
__lowerCamelCase : Dict = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
__lowerCamelCase : int = torch.Size((1, 1, 768) )
__lowerCamelCase : Dict = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE__ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = {}
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase , __lowerCamelCase : List[Any] = line.rstrip().split('\t' )
__lowerCamelCase : Any = index
return entity_vocab
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
lowercase_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 194 | 0 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@require_torch
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
UpperCAmelCase__ : Optional[int] = load_dataset('''ashraq/esc50''' )
UpperCAmelCase__ : Optional[Any] = dataset['''train''']['''audio'''][-1]['''array''']
UpperCAmelCase__ : str = audio_classifier(__A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__A ) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
pass
@slow
@require_torch
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
UpperCAmelCase__ : Tuple = load_dataset('''ashraq/esc50''' )
UpperCAmelCase__ : Optional[Any] = dataset['''train''']['''audio'''][-1]['''array''']
UpperCAmelCase__ : List[str] = audio_classifier(__A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__A ) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
UpperCAmelCase__ : str = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__A ) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
UpperCAmelCase__ : Dict = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(__A ) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
| 181 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = DistilBertTokenizer
__lowerCAmelCase = DistilBertTokenizerFast
__lowerCAmelCase = True
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
a =tokenizer.encode('''sequence builders''' , add_special_tokens=__A )
a =tokenizer.encode('''multi-sequence build''' , add_special_tokens=__A )
a =tokenizer.build_inputs_with_special_tokens(__A )
a =tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
] | 81 | 0 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__lowerCamelCase : Any = logging.get_logger(__name__)
# General docstring
__lowerCamelCase : Dict = """PoolFormerConfig"""
# Base docstring
__lowerCamelCase : Dict = """sail/poolformer_s12"""
__lowerCamelCase : Any = [1, 512, 7, 7]
# Image classification docstring
__lowerCamelCase : Optional[int] = """sail/poolformer_s12"""
__lowerCamelCase : Dict = """tabby, tabby cat"""
__lowerCamelCase : Dict = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : float = 0.0 , snake_case_ : bool = False ):
if drop_prob == 0.0 or not training:
return input
snake_case__ : str = 1 - drop_prob
snake_case__ : Dict = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case__ : List[Any] = keep_prob + torch.rand(snake_case_ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
snake_case__ : Tuple = input.div(snake_case_ ) * random_tensor
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : str , __A : Optional[float] = None ):
super().__init__()
snake_case__ : Union[str, Any] = drop_prob
def _lowercase ( self : List[Any] , __A : torch.Tensor ):
return drop_path(__A , self.drop_prob , self.training )
def _lowercase ( self : Tuple ):
return "p={}".format(self.drop_prob )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Optional[Any] , __A : List[Any] , __A : str , __A : List[Any] , __A : List[str] , __A : List[Any] , __A : Optional[int]=None ):
super().__init__()
snake_case__ : str = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case__ : int = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride)
snake_case__ : str = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding)
snake_case__ : Union[str, Any] = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A )
snake_case__ : Tuple = norm_layer(__A ) if norm_layer else nn.Identity()
def _lowercase ( self : Any , __A : List[str] ):
snake_case__ : str = self.projection(__A )
snake_case__ : List[str] = self.norm(__A )
return embeddings
class SCREAMING_SNAKE_CASE__ ( nn.GroupNorm ):
def __init__( self : Dict , __A : Tuple , **__A : Optional[Any] ):
super().__init__(1 , __A , **__A )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[Any] , __A : List[Any] ):
super().__init__()
snake_case__ : Union[str, Any] = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A )
def _lowercase ( self : Dict , __A : str ):
return self.pool(__A ) - hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Optional[int] , __A : Optional[int] , __A : str , __A : Dict , __A : Dict ):
super().__init__()
snake_case__ : int = nn.Convad(__A , __A , 1 )
snake_case__ : Optional[Any] = nn.Convad(__A , __A , 1 )
snake_case__ : Union[str, Any] = PoolFormerDropPath(__A )
if isinstance(config.hidden_act , __A ):
snake_case__ : Any = ACTaFN[config.hidden_act]
else:
snake_case__ : Union[str, Any] = config.hidden_act
def _lowercase ( self : Dict , __A : List[str] ):
snake_case__ : str = self.conva(__A )
snake_case__ : Tuple = self.act_fn(__A )
snake_case__ : Tuple = self.drop(__A )
snake_case__ : List[str] = self.conva(__A )
snake_case__ : Tuple = self.drop(__A )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[Any] , __A : int , __A : Tuple , __A : Tuple , __A : str , __A : Dict , __A : Union[str, Any] ):
super().__init__()
snake_case__ : List[Any] = PoolFormerPooling(__A )
snake_case__ : Optional[Any] = PoolFormerOutput(__A , __A , __A , __A )
snake_case__ : Optional[Any] = PoolFormerGroupNorm(__A )
snake_case__ : int = PoolFormerGroupNorm(__A )
# Useful for training neural nets
snake_case__ : Union[str, Any] = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity()
snake_case__ : Any = config.use_layer_scale
if config.use_layer_scale:
snake_case__ : Optional[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
snake_case__ : Any = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
def _lowercase ( self : Any , __A : Any ):
if self.use_layer_scale:
snake_case__ : Optional[Any] = self.pooling(self.before_norm(__A ) )
snake_case__ : Tuple = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case__ : Dict = hidden_states + self.drop_path(__A )
snake_case__ : Optional[Any] = ()
snake_case__ : Optional[Any] = self.output(self.after_norm(__A ) )
snake_case__ : Tuple = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case__ : int = hidden_states + self.drop_path(__A )
snake_case__ : Any = (output,) + outputs
return outputs
else:
snake_case__ : str = self.drop_path(self.pooling(self.before_norm(__A ) ) )
# First residual connection
snake_case__ : Tuple = pooling_output + hidden_states
snake_case__ : Union[str, Any] = ()
# Second residual connection inside the PoolFormerOutput block
snake_case__ : Optional[int] = self.drop_path(self.output(self.after_norm(__A ) ) )
snake_case__ : Tuple = hidden_states + layer_output
snake_case__ : str = (output,) + outputs
return outputs
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[Any] , __A : str ):
super().__init__()
snake_case__ : Optional[int] = config
# stochastic depth decay rule
snake_case__ : List[Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case__ : Any = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case__ : List[str] = nn.ModuleList(__A )
# Transformer blocks
snake_case__ : Dict = []
snake_case__ : List[str] = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case__ : List[str] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__A ) )
snake_case__ : Optional[int] = nn.ModuleList(__A )
def _lowercase ( self : Optional[Any] , __A : Tuple , __A : List[str]=False , __A : int=True ):
snake_case__ : Dict = () if output_hidden_states else None
snake_case__ : str = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case__ : Tuple = layers
# Get patch embeddings from hidden_states
snake_case__ : str = embedding_layer(__A )
# Send the embeddings through the blocks
for _, blk in enumerate(__A ):
snake_case__ : int = blk(__A )
snake_case__ : List[Any] = layer_outputs[0]
if output_hidden_states:
snake_case__ : Optional[int] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
a_ = PoolFormerConfig
a_ = "poolformer"
a_ = "pixel_values"
a_ = True
def _lowercase ( self : Dict , __A : Any ):
if isinstance(__A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _lowercase ( self : Tuple , __A : List[Any] , __A : Dict=False ):
if isinstance(__A , __A ):
snake_case__ : str = value
__lowerCamelCase : Optional[int] = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__lowerCamelCase : Dict = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase_ , )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
def __init__( self : str , __A : Optional[int] ):
super().__init__(__A )
snake_case__ : str = config
snake_case__ : Union[str, Any] = PoolFormerEncoder(__A )
# Initialize weights and apply final processing
self.post_init()
def _lowercase ( self : Union[str, Any] ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : Any , __A : Optional[torch.FloatTensor] = None , __A : Optional[bool] = None , __A : Optional[bool] = None , ):
snake_case__ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
snake_case__ : Any = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
snake_case__ : List[str] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[Any] , __A : Optional[Any] ):
super().__init__()
snake_case__ : Dict = nn.Linear(config.hidden_size , config.hidden_size )
def _lowercase ( self : Dict , __A : List[Any] ):
snake_case__ : int = self.dense(__A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase_ , )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
def __init__( self : Any , __A : Tuple ):
super().__init__(__A )
snake_case__ : Optional[Any] = config.num_labels
snake_case__ : int = PoolFormerModel(__A )
# Final norm
snake_case__ : Any = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case__ : int = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : int , __A : Optional[torch.FloatTensor] = None , __A : Optional[torch.LongTensor] = None , __A : Optional[bool] = None , __A : Optional[bool] = None , ):
snake_case__ : str = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : List[str] = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
snake_case__ : Optional[Any] = outputs[0]
snake_case__ : str = self.classifier(self.norm(__A ).mean([-2, -1] ) )
snake_case__ : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case__ : List[str] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case__ : str = "single_label_classification"
else:
snake_case__ : Any = "multi_label_classification"
if self.config.problem_type == "regression":
snake_case__ : Tuple = MSELoss()
if self.num_labels == 1:
snake_case__ : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case__ : List[Any] = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
snake_case__ : Optional[int] = CrossEntropyLoss()
snake_case__ : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case__ : int = BCEWithLogitsLoss()
snake_case__ : Union[str, Any] = loss_fct(__A , __A )
if not return_dict:
snake_case__ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
| 357 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , __A : int , __A : str=7 , __A : Union[str, Any]=3 , __A : Union[str, Any]=3_0 , __A : Optional[int]=4_0_0 , __A : Optional[Any]=True , __A : Optional[int]=None , __A : Union[str, Any]=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : Any=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : Optional[Any]=1 / 2_5_5 , __A : Union[str, Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : Optional[Any] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : List[Any] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Tuple = num_channels
snake_case__ : List[Any] = min_resolution
snake_case__ : Optional[Any] = max_resolution
snake_case__ : str = do_resize
snake_case__ : List[str] = size
snake_case__ : List[Any] = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : List[Any] = image_std
snake_case__ : int = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : str = do_pad
def _lowercase ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : Dict , __A : Union[str, Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Any = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : List[str] = int(self.size["shortest_edge"] * h / w )
snake_case__ : Tuple = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Optional[Any] = self.size["shortest_edge"]
snake_case__ : List[Any] = self.size["shortest_edge"]
else:
snake_case__ : Union[str, Any] = []
for image in image_inputs:
snake_case__, snake_case__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Any = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Optional[int] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : Optional[int] ):
snake_case__ : str = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Tuple ):
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Any ):
pass
def _lowercase ( self : Optional[int] ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : Union[str, Any] = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Any ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Union[str, Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image and target
snake_case__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : List[str] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Optional[Any] = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : int = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : int = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Union[str, Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Union[str, Any] ):
# prepare image, target and masks_path
snake_case__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Optional[int] = json.loads(f.read() )
snake_case__ : Any = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : List[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Dict = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Dict = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 286 | 0 |
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class A_ (lowercase__ ,lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 1
@register_to_config
def __init__( self , lowercase_ = 1000 , lowercase_ = None ):
"""simple docstring"""
self.set_timesteps(__lowerCAmelCase )
# standard deviation of the initial noise distribution
UpperCAmelCase_ : List[str] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCAmelCase_ : Union[str, Any] = 4
# running values
UpperCAmelCase_ : Optional[Any] = []
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = num_inference_steps
UpperCAmelCase_ : Any = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCAmelCase_ : int = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCAmelCase_ : Any = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCAmelCase_ : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
UpperCAmelCase_ : Optional[int] = (1.0 - self.betas**2) ** 0.5
UpperCAmelCase_ : str = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCAmelCase_ : str = timesteps.to(__lowerCAmelCase )
UpperCAmelCase_ : Dict = []
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
UpperCAmelCase_ : Tuple = (self.timesteps == timestep).nonzero().item()
UpperCAmelCase_ : List[str] = timestep_index + 1
UpperCAmelCase_ : Optional[Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowerCAmelCase )
if len(self.ets ) == 1:
UpperCAmelCase_ : Optional[int] = self.ets[-1]
elif len(self.ets ) == 2:
UpperCAmelCase_ : int = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCAmelCase_ : List[str] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
UpperCAmelCase_ : Tuple = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
UpperCAmelCase_ : Optional[Any] = self._get_prev_sample(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def UpperCamelCase__ ( self , lowercase_ , *lowercase_ , **lowercase_ ):
"""simple docstring"""
return sample
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.alphas[timestep_index]
UpperCAmelCase_ : Tuple = self.betas[timestep_index]
UpperCAmelCase_ : List[Any] = self.alphas[prev_timestep_index]
UpperCAmelCase_ : str = self.betas[prev_timestep_index]
UpperCAmelCase_ : str = (sample - sigma * ets) / max(__lowerCAmelCase , 1E-8 )
UpperCAmelCase_ : int = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 61 | """simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _A :
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError()
def A__ ( self ):
"""simple docstring"""
raise NotImplementedError()
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = False , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = tokenizer
lowercase = skip_prompt
lowercase = decode_kwargs
# variables used in the streaming process
lowercase = []
lowercase = 0
lowercase = True
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
lowercase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowercase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowercase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
lowercase = text[self.print_len :]
lowercase = []
lowercase = 0
# If the last token is a CJK character, we print the characters.
elif len(__lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowercase = text[self.print_len :]
self.print_len += len(__lowerCAmelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowercase = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(__lowerCAmelCase )
self.on_finalized_text(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
if len(self.token_cache ) > 0:
lowercase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowercase = text[self.print_len :]
lowercase = []
lowercase = 0
else:
lowercase = """"""
lowercase = True
self.on_finalized_text(__lowerCAmelCase , stream_end=__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
"""simple docstring"""
print(__lowerCAmelCase , flush=__lowerCAmelCase , end="""""" if not stream_end else None )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
lowercase = Queue()
lowercase = None
lowercase = timeout
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
"""simple docstring"""
self.text_queue.put(__lowerCAmelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
"""simple docstring"""
return self
def A__ ( self ):
"""simple docstring"""
lowercase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 197 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MgpstrTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = {}
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
super().setUp()
# fmt: off
__a : Dict = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__a : Dict = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : List[Any] = '''tester'''
__a : Union[str, Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
__a : List[str] = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__a : List[Any] = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__a : List[str] = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
__a : List[Any] = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__a , __a : Optional[Any] = self.get_input_output_texts(_UpperCAmelCase )
__a : Tuple = tokenizer.tokenize(_UpperCAmelCase )
__a : Any = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
__a : List[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ) , 0 )
__a : Union[str, Any] = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _UpperCAmelCase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def _lowerCamelCase ( self ):
pass | 188 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
A = datasets.logging.get_logger(__name__)
A = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
A = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
A = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.config_name == "default":
__a : List[str] = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
__a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ):
if gpus is None:
__a : str = 1 if torch.cuda.is_available() else 0
__a : Optional[Any] = {'''src''': sources, '''mt''': predictions, '''ref''': references}
__a : Dict = [dict(zip(_UpperCAmelCase , _UpperCAmelCase ) ) for t in zip(*data.values() )]
__a , __a : int = self.scorer.predict(_UpperCAmelCase , gpus=_UpperCAmelCase , progress_bar=_UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores} | 188 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ (metaclass=lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = ["torch", "scipy"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def _lowercase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def _lowercase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
requires_backends(cls , ["torch", "scipy"] )
| 48 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
__SCREAMING_SNAKE_CASE = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__SCREAMING_SNAKE_CASE = 1
if upper_limit > 0:
__SCREAMING_SNAKE_CASE = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCAmelCase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
a__ : List[str] = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 54 | 0 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = "T5Config"
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : int = '''mt5'''
SCREAMING_SNAKE_CASE_ : str = MTaConfig
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = '''mt5'''
SCREAMING_SNAKE_CASE_ : List[Any] = MTaConfig
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Tuple = '''mt5'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = MTaConfig | 239 |
"""simple docstring"""
def __lowerCamelCase ( a_ : str ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(a_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod() | 239 | 1 |
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE = "scheduler_config.json"
class UpperCAmelCase_ ( A_ ):
lowercase__ = 1
lowercase__ = 2
lowercase__ = 3
lowercase__ = 4
lowercase__ = 5
lowercase__ = 6
lowercase__ = 7
lowercase__ = 8
lowercase__ = 9
lowercase__ = 10
lowercase__ = 11
lowercase__ = 12
lowercase__ = 13
lowercase__ = 14
@dataclass
class UpperCAmelCase_ ( A_ ):
lowercase__ = 42
class UpperCAmelCase_ :
lowercase__ = SCHEDULER_CONFIG_NAME
lowercase__ = []
lowercase__ = True
@classmethod
def __magic_name__ ( cls : int , snake_case_ : List[str] = None , snake_case_ : Dict = None , snake_case_ : str=False , **snake_case_ : Optional[int] , ) -> Optional[Any]:
'''simple docstring'''
A__ = cls.load_config(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , return_unused_kwargs=SCREAMING_SNAKE_CASE__ , return_commit_hash=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
return cls.from_config(SCREAMING_SNAKE_CASE__ , return_unused_kwargs=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : List[str] , snake_case_ : Tuple = False , **snake_case_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.save_config(save_directory=SCREAMING_SNAKE_CASE__ , push_to_hub=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __magic_name__ ( self : int ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def __magic_name__ ( cls : Optional[int] ) -> int:
'''simple docstring'''
A__ = list(set([cls.__name__] + cls._compatibles ) )
A__ = importlib.import_module(__name__.split("." )[0] )
A__ = [
getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for c in compatible_classes_str if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
return compatible_classes
| 247 |
"""simple docstring"""
import qiskit
def __lowerCamelCase ( a_ : int , a_ : int ) -> qiskit.result.counts.Counts:
__SCREAMING_SNAKE_CASE :Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__SCREAMING_SNAKE_CASE :Union[str, Any] = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__SCREAMING_SNAKE_CASE :Tuple = qiskit.execute(a_ , a_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
lowerCamelCase_ = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}') | 191 | 0 |
import argparse
import os
import re
import packaging.version
_lowercase : List[str] ="examples/"
_lowercase : List[str] ={
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
_lowercase : Optional[int] ={
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
_lowercase : Dict ="README.md"
def lowerCAmelCase_ ( _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : List[str]) -> str:
"""simple docstring"""
with open(_lowercase , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
a__ : int = f.read()
a__ , a__ : Any = REPLACE_PATTERNS[pattern]
a__ : str = replace.replace("""VERSION""" , _lowercase)
a__ : Optional[int] = re_pattern.sub(_lowercase , _lowercase)
with open(_lowercase , """w""" , encoding="""utf-8""" , newline="""\n""") as f:
f.write(_lowercase)
def lowerCAmelCase_ ( _lowercase : Any) -> Tuple:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowercase):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""")
if "legacy" in directories:
directories.remove("""legacy""")
for fname in fnames:
if fname.endswith(""".py"""):
update_version_in_file(os.path.join(_lowercase , _lowercase) , _lowercase , pattern="""examples""")
def lowerCAmelCase_ ( _lowercase : List[Any] , _lowercase : Dict=False) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowercase , _lowercase , _lowercase)
if not patch:
update_version_in_examples(_lowercase)
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
a__ : Optional[int] = """🤗 Transformers currently provides the following architectures"""
a__ : Union[str, Any] = """1. Want to contribute a new model?"""
with open(_lowercase , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
a__ : List[Any] = f.readlines()
# Find the start of the list.
a__ : int = 0
while not lines[start_index].startswith(_start_prompt):
start_index += 1
start_index += 1
a__ : Union[str, Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt):
if lines[index].startswith("""1."""):
a__ : int = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(_lowercase , """w""" , encoding="""utf-8""" , newline="""\n""") as f:
f.writelines(_lowercase)
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""") as f:
a__ : str = f.read()
a__ : Tuple = REPLACE_PATTERNS["""init"""][0].search(_lowercase).groups()[0]
return packaging.version.parse(_lowercase)
def lowerCAmelCase_ ( _lowercase : Optional[int]=False) -> Any:
"""simple docstring"""
a__ : Tuple = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""")
if default_version.is_devrelease:
a__ : List[Any] = default_version.base_version
elif patch:
a__ : Dict = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
a__ : Union[str, Any] = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
a__ : List[Any] = input(F'''Which version are you releasing? [{default_version}]''')
if len(_lowercase) == 0:
a__ : Tuple = default_version
print(F'''Updating version to {version}.''')
global_version_update(_lowercase , patch=_lowercase)
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
a__ : Any = get_version()
a__ : Any = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
a__ : Tuple = current_version.base_version
# Check with the user we got that right.
a__ : Dict = input(F'''Which version are we developing now? [{dev_version}]''')
if len(_lowercase) == 0:
a__ : List[str] = dev_version
print(F'''Updating version to {version}.''')
global_version_update(_lowercase)
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase : Any =argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
_lowercase : Dict =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 266 |
from __future__ import annotations
import math
def lowerCAmelCase_ ( _lowercase : float , _lowercase : int) -> float:
"""simple docstring"""
a__ : Union[str, Any] = u
for i in range(1 , _lowercase):
a__ : Optional[int] = temp * (u - i)
return temp
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
a__ : Tuple = int(input("""enter the numbers of values: """))
a__ : list[list[float]] = []
for _ in range(_lowercase):
y.append([])
for i in range(_lowercase):
for j in range(_lowercase):
y[i].append(_lowercase)
a__ : Optional[Any] = 0
print("""enter the values of parameters in a list: """)
a__ : List[Any] = list(map(_lowercase , input().split()))
print("""enter the values of corresponding parameters: """)
for i in range(_lowercase):
a__ : Optional[Any] = float(input())
a__ : Tuple = int(input("""enter the value to interpolate: """))
a__ : int = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowercase):
for j in range(n - i):
a__ : int = y[j + 1][i - 1] - y[j][i - 1]
a__ : Optional[int] = y[0][0]
for i in range(1 , _lowercase):
summ += (ucal(_lowercase , _lowercase) * y[0][i]) / math.factorial(_lowercase)
print(F'''the value at {value} is {summ}''')
if __name__ == "__main__":
main()
| 266 | 1 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__a = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
__a = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
__a = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
return float((preds == labels).mean() )
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Any = simple_accuracy(_lowercase, _lowercase )
snake_case_ :List[str] = float(fa_score(y_true=_lowercase, y_pred=_lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = np.array(_lowercase )
snake_case_ :Tuple = np.array(_lowercase )
snake_case_ :Dict = en_sentvecs.shape[0]
# mean centering
snake_case_ :int = en_sentvecs - np.mean(_lowercase, axis=0 )
snake_case_ :Dict = in_sentvecs - np.mean(_lowercase, axis=0 )
snake_case_ :int = cdist(_lowercase, _lowercase, """cosine""" )
snake_case_ :Tuple = np.array(range(_lowercase ) )
snake_case_ :Optional[Any] = sim.argsort(axis=1 )[:, :10]
snake_case_ :List[str] = np.any(preds == actual[:, None], axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def lowerCAmelCase_ ( self: int , snake_case: Tuple , snake_case: Optional[int] ) -> Tuple:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(snake_case , snake_case )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(snake_case , snake_case )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(snake_case , snake_case )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 66 |
"""simple docstring"""
import re
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[int] = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(_lowercase, _lowercase ) )
if __name__ == "__main__":
__a = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 66 | 1 |
'''simple docstring'''
from typing import Any
class __magic_name__ :
def __init__( self , snake_case) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =data
_UpperCAmelCase : str =None
def __repr__( self) -> str:
'''simple docstring'''
return f"Node({self.data})"
class __magic_name__ :
def __init__( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : str =None
def __iter__( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.head
while node:
yield node.data
_UpperCAmelCase : List[Any] =node.next
def __len__( self) -> int:
'''simple docstring'''
return sum(1 for _ in self)
def __repr__( self) -> str:
'''simple docstring'''
return "->".join([str(SCREAMING_SNAKE_CASE_) for item in self])
def __getitem__( self , snake_case) -> Any:
'''simple docstring'''
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
for i, node in enumerate(self):
if i == index:
return node
return None
def __setitem__( self , snake_case , snake_case) -> None:
'''simple docstring'''
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
_UpperCAmelCase : List[str] =self.head
for _ in range(SCREAMING_SNAKE_CASE_):
_UpperCAmelCase : Union[str, Any] =current.next
_UpperCAmelCase : Dict =data
def lowerCAmelCase ( self , snake_case) -> None:
'''simple docstring'''
self.insert_nth(len(self) , SCREAMING_SNAKE_CASE_)
def lowerCAmelCase ( self , snake_case) -> None:
'''simple docstring'''
self.insert_nth(0 , SCREAMING_SNAKE_CASE_)
def lowerCAmelCase ( self , snake_case , snake_case) -> None:
'''simple docstring'''
if not 0 <= index <= len(self):
raise IndexError('list index out of range')
_UpperCAmelCase : Any =Node(SCREAMING_SNAKE_CASE_)
if self.head is None:
_UpperCAmelCase : Optional[int] =new_node
elif index == 0:
_UpperCAmelCase : Any =self.head # link new_node to head
_UpperCAmelCase : Tuple =new_node
else:
_UpperCAmelCase : str =self.head
for _ in range(index - 1):
_UpperCAmelCase : Dict =temp.next
_UpperCAmelCase : int =temp.next
_UpperCAmelCase : Optional[int] =new_node
def lowerCAmelCase ( self) -> None: # print every node data
'''simple docstring'''
print(self)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
return self.delete_nth(0)
def lowerCAmelCase ( self) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self) - 1)
def lowerCAmelCase ( self , snake_case = 0) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self) - 1: # test if index is valid
raise IndexError('List index out of range.')
_UpperCAmelCase : List[str] =self.head # default first node
if index == 0:
_UpperCAmelCase : int =self.head.next
else:
_UpperCAmelCase : List[str] =self.head
for _ in range(index - 1):
_UpperCAmelCase : str =temp.next
_UpperCAmelCase : Tuple =temp.next
_UpperCAmelCase : Any =temp.next.next
return delete_node.data
def lowerCAmelCase ( self) -> bool:
'''simple docstring'''
return self.head is None
def lowerCAmelCase ( self) -> None:
'''simple docstring'''
_UpperCAmelCase : int =None
_UpperCAmelCase : Optional[Any] =self.head
while current:
# Store the current node's next node.
_UpperCAmelCase : str =current.next
# Make the current node's next point backwards
_UpperCAmelCase : List[str] =prev
# Make the previous node be the current node
_UpperCAmelCase : Tuple =current
# Make the current node the next node (to progress iteration)
_UpperCAmelCase : Optional[Any] =next_node
# Return prev in order to put the head at the end
_UpperCAmelCase : List[Any] =prev
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(snake_case__ ) == i
linked_list.insert_nth(snake_case__ , i + 1 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(snake_case__ ) == 9
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_UpperCAmelCase : Union[str, Any] =-i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(-8 , 1 ) )
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =[
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
'dlrow olleH',
7,
5_5_5_5,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(1_0 ),
None,
None,
12.20,
]
_UpperCAmelCase : Any =LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_UpperCAmelCase : Dict =linked_list.delete_head()
assert result == -9
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_UpperCAmelCase : Any =linked_list.delete_tail()
assert result == 12.2
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_UpperCAmelCase : Tuple =linked_list.delete_nth(1_0 )
assert result is None
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(snake_case__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case__ )
assert (
str(snake_case__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase__ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
_UpperCAmelCase : Union[str, Any] =LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(snake_case__ )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
_UpperCAmelCase : int =input('Enter New Value: ' ).strip()
print('New list:' )
print(snake_case__ )
print(f"length of linked_list is : {len(snake_case__ )}" )
if __name__ == "__main__":
main()
| 365 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : list[int] ):
'''simple docstring'''
if not nums:
return 0
_UpperCAmelCase : Tuple =nums[0]
_UpperCAmelCase : int =0
for num in nums[1:]:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] =(
max_excluding + num,
max(__lowerCamelCase , __lowerCamelCase ),
)
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : str , __snake_case : Union[str, Any]=2 , __snake_case : Optional[int]=8 , __snake_case : Any=True , __snake_case : Union[str, Any]=True , __snake_case : Dict=True , __snake_case : int=True , __snake_case : List[Any]=99 , __snake_case : str=16 , __snake_case : Tuple=5 , __snake_case : Tuple=2 , __snake_case : str=36 , __snake_case : Dict="gelu" , __snake_case : str=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : Optional[int]=512 , __snake_case : Optional[Any]=16 , __snake_case : int=2 , __snake_case : int=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : str=None , ) -> Optional[int]:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : int = use_input_mask
UpperCAmelCase : Any = use_token_type_ids
UpperCAmelCase : str = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : Optional[int] = num_choices
UpperCAmelCase : Any = scope
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Dict = None
if self.use_token_type_ids:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : str = None
UpperCAmelCase : Tuple = None
UpperCAmelCase : int = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int ) -> Tuple:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[Any] = self.get_config()
UpperCAmelCase : int = 300
return config
def A ( self : Optional[Any] ) -> Any:
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict , __snake_case : Tuple , __snake_case : Optional[Any] ) -> List[str]:
UpperCAmelCase : int = MraModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
UpperCAmelCase : Optional[int] = model(__snake_case , token_type_ids=__snake_case )
UpperCAmelCase : Dict = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Optional[Any] , ) -> Tuple:
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = MraModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
UpperCAmelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , )
UpperCAmelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : int ) -> Any:
UpperCAmelCase : Dict = MraForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Tuple , __snake_case : Tuple , __snake_case : Dict , __snake_case : Dict , __snake_case : Any , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Tuple ) -> Optional[int]:
UpperCAmelCase : List[str] = MraForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> int:
UpperCAmelCase : int = self.num_labels
UpperCAmelCase : Union[str, Any] = MraForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[str] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : str , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict ) -> int:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : List[str] = MraForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : str , __snake_case : int , __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.num_choices
UpperCAmelCase : int = MraForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : str ) -> Dict:
UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[str] = config_and_inputs
UpperCAmelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = ()
def A ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = MraModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def A ( self : Optional[Any] ) -> str:
self.config_tester.run_common_tests()
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : Tuple ) -> Dict:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def A ( self : int ) -> Dict:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def A ( self : Any ) -> Optional[int]:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def A ( self : Dict ) -> Any:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = MraModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''MRA does not output attentions''' )
def A ( self : str ) -> Optional[Any]:
return
@require_torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Tuple ) -> List[Any]:
UpperCAmelCase : int = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
UpperCAmelCase : Optional[Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(__snake_case )[0]
UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Any = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[int] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
UpperCAmelCase : Dict = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(__snake_case )[0]
UpperCAmelCase : int = 50265
UpperCAmelCase : int = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def A ( self : str ) -> List[Any]:
UpperCAmelCase : List[Any] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
UpperCAmelCase : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : Tuple = model(__snake_case )[0]
UpperCAmelCase : Optional[int] = 50265
UpperCAmelCase : Tuple = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Optional[int] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
| 23 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def lowerCamelCase ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
__UpperCAmelCase : List[Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
snake_case : int = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : str = object_detector(examples[0] , threshold=0.0 )
snake_case : str = len(UpperCamelCase__ )
self.assertGreater(UpperCamelCase__ , 0 )
self.assertEqual(
UpperCamelCase__ , [
{
"score": ANY(UpperCamelCase__ ),
"label": ANY(UpperCamelCase__ ),
"box": {"xmin": ANY(UpperCamelCase__ ), "ymin": ANY(UpperCamelCase__ ), "xmax": ANY(UpperCamelCase__ ), "ymax": ANY(UpperCamelCase__ )},
}
for i in range(UpperCamelCase__ )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Dict = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
snake_case : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
snake_case : Dict = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Optional[int] = pipeline("zero-shot-object-detection" )
snake_case : Tuple = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
snake_case : List[Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
pass
@require_torch
@slow
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = 0.2
snake_case : List[str] = pipeline("zero-shot-object-detection" )
snake_case : List[Any] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = 2
snake_case : Optional[Any] = pipeline("zero-shot-object-detection" )
snake_case : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 112 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "ViltImageProcessor"
lowercase__ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : List[Any] ,lowercase_ : int=None ,lowercase_ : int=None ,**lowercase_ : Union[str, Any] ):
lowerCAmelCase__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,lowercase_ ,)
lowerCAmelCase__ : Optional[Any] = kwargs.pop('''feature_extractor''' )
lowerCAmelCase__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Optional[Any] = self.image_processor
def __call__( self : Tuple ,lowercase_ : Tuple ,lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,lowercase_ : bool = True ,lowercase_ : Union[bool, str, PaddingStrategy] = False ,lowercase_ : Union[bool, str, TruncationStrategy] = None ,lowercase_ : Optional[int] = None ,lowercase_ : int = 0 ,lowercase_ : Optional[int] = None ,lowercase_ : Optional[bool] = None ,lowercase_ : Optional[bool] = None ,lowercase_ : bool = False ,lowercase_ : bool = False ,lowercase_ : bool = False ,lowercase_ : bool = False ,lowercase_ : bool = True ,lowercase_ : Optional[Union[str, TensorType]] = None ,**lowercase_ : List[str] ,):
lowerCAmelCase__ : List[Any] = self.tokenizer(
text=lowercase_ ,add_special_tokens=lowercase_ ,padding=lowercase_ ,truncation=lowercase_ ,max_length=lowercase_ ,stride=lowercase_ ,pad_to_multiple_of=lowercase_ ,return_token_type_ids=lowercase_ ,return_attention_mask=lowercase_ ,return_overflowing_tokens=lowercase_ ,return_special_tokens_mask=lowercase_ ,return_offsets_mapping=lowercase_ ,return_length=lowercase_ ,verbose=lowercase_ ,return_tensors=lowercase_ ,**lowercase_ ,)
# add pixel_values + pixel_mask
lowerCAmelCase__ : Optional[Any] = self.image_processor(lowercase_ ,return_tensors=lowercase_ )
encoding.update(lowercase_ )
return encoding
def __lowerCAmelCase ( self : Tuple ,*lowercase_ : Any ,**lowercase_ : List[str] ):
return self.tokenizer.batch_decode(*lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Tuple ,*lowercase_ : str ,**lowercase_ : Tuple ):
return self.tokenizer.decode(*lowercase_ ,**lowercase_ )
@property
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Tuple = self.tokenizer.model_input_names
lowerCAmelCase__ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,lowercase_ ,)
return self.image_processor_class
@property
def __lowerCAmelCase ( self : int ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,lowercase_ ,)
return self.image_processor
| 106 |
'''simple docstring'''
a__ : Union[str, Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a__ : Optional[Any] = True
a__ : Optional[Any] = False
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ = chain(next_number(__A ) )
UpperCamelCase__ = number_chain
while number < 10000000:
UpperCamelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( __A = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , __A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 80 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False ) -> Any:
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
A__ = len(set_a.intersection(lowercase_ ) )
if alternative_union:
A__ = len(lowercase_ ) + len(lowercase_ )
else:
A__ = len(set_a.union(lowercase_ ) )
return intersection / union
if isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) ):
A__ = [element for element in set_a if element in set_b]
if alternative_union:
A__ = len(lowercase_ ) + len(lowercase_ )
return len(lowercase_ ) / union
else:
A__ = set_a + [element for element in set_b if element not in set_a]
return len(lowercase_ ) / len(lowercase_ )
return len(lowercase_ ) / len(lowercase_ )
return None
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = {"a", "b", "c", "d", "e"}
SCREAMING_SNAKE_CASE = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 356 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=10_00 ) -> Optional[Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
A__ = n - 1
A__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
A__ = 0
while count < prec:
A__ = random.randint(2 , n - 1 )
A__ = bin_exp_mod(lowercase_ , lowercase_ , lowercase_ )
if b != 1:
A__ = True
for _ in range(lowercase_ ):
if b == n - 1:
A__ = False
break
A__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 230 | 0 |
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[Any] = min(a__) # min() finds the minimum value
a_ : List[str] = max(a__) # max() finds the maximum value
a_ : str = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
a_ : Any = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(a__ , a__), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
a_ : Tuple = 0
for count in range(a__):
while holes[count] > 0:
holes[count] -= 1
a_ : Optional[Any] = count + min_val
i += 1
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(a__)
print("""Sorted order is:""" , """ """.join(a__))
if __name__ == "__main__":
main()
| 248 |
from __future__ import annotations
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[str] = str(a__)
return len(a__) == 9 and set(a__) == set("""123456789""")
def _UpperCAmelCase ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1):
a_ : Dict = 1_0_0_0_0_2 * base_num
if is_9_pandigital(a__):
return candidate
for base_num in range(3_3_3 , 9_9 , -1):
a_ : Tuple = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(a__):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 248 | 1 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowercase_ ,lowercase_ ,lowercase_ = False, False, False
@dataclass
class A_ :
'''simple docstring'''
__snake_case = None
__snake_case = True
__snake_case = True
__snake_case = None
# Automatically constructed
__snake_case = "dict"
__snake_case = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
__snake_case = field(default="""Audio""" , init=__UpperCamelCase , repr=__UpperCamelCase )
def __call__( self: int ):
return self.pa_type
def _snake_case ( self: Tuple , a: Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(a , a ):
return {"bytes": None, "path": value}
elif isinstance(a , a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__lowerCamelCase : List[str] = BytesIO()
sf.write(a , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__lowerCamelCase : int = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
__lowerCamelCase : Dict = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 3_2767
__lowerCamelCase : Dict = BytesIO(bytes() )
sf.write(a , a , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _snake_case ( self: Tuple , a: dict , a: Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
__lowerCamelCase : Dict = xsplitext(a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
__lowerCamelCase : Union[str, Any] = token_per_repo_id or {}
__lowerCamelCase : str = path.split('::' )[-1]
try:
__lowerCamelCase : Optional[Any] = string_to_dict(a , config.HUB_DATASETS_URL )['repo_id']
__lowerCamelCase : List[str] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__lowerCamelCase : Optional[int] = None
with xopen(a , 'rb' , use_auth_token=a ) as f:
__lowerCamelCase , __lowerCamelCase : Optional[int] = sf.read(a )
else:
__lowerCamelCase , __lowerCamelCase : str = sf.read(a )
__lowerCamelCase : Any = array.T
if self.mono:
__lowerCamelCase : List[str] = librosa.to_mono(a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__lowerCamelCase : List[Any] = librosa.resample(a , orig_sr=a , target_sr=self.sampling_rate )
__lowerCamelCase : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _snake_case ( self: Tuple ):
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def _snake_case ( self: Optional[Any] , a: Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
__lowerCamelCase : Dict = pa.array([None] * len(a ) , type=pa.binary() )
__lowerCamelCase : List[str] = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowerCamelCase : str = pa.array([None] * len(a ) , type=pa.string() )
__lowerCamelCase : Any = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
__lowerCamelCase : Union[str, Any] = pa.array([Audio().encode_example(a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
__lowerCamelCase : Optional[Any] = storage.field('bytes' )
else:
__lowerCamelCase : List[str] = pa.array([None] * len(a ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
__lowerCamelCase : List[str] = storage.field('path' )
else:
__lowerCamelCase : Optional[Any] = pa.array([None] * len(a ) , type=pa.string() )
__lowerCamelCase : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(a , self.pa_type )
def _snake_case ( self: Optional[Any] , a: pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(a: Any ):
with xopen(a , 'rb' ) as f:
__lowerCamelCase : List[str] = f.read()
return bytes_
__lowerCamelCase : Dict = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__lowerCamelCase : List[Any] = pa.array(
[os.path.basename(a ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
__lowerCamelCase : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
| 194 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE__ ) as metadata_file:
__lowerCamelCase : List[str] = json.load(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__lowerCamelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
# Load the entity vocab file
__lowerCamelCase : List[Any] = load_entity_vocab(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__lowerCamelCase : str = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Initialize the embeddings of the special tokens
__lowerCamelCase : Union[str, Any] = state_dict['embeddings.word_embeddings.weight']
__lowerCamelCase : Tuple = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
__lowerCamelCase : Any = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
__lowerCamelCase : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__lowerCamelCase : Optional[int] = f'encoder.layer.{layer_index}.attention.self.'
__lowerCamelCase : Dict = state_dict[prefix + matrix_name]
__lowerCamelCase : List[Any] = state_dict[prefix + matrix_name]
__lowerCamelCase : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__lowerCamelCase : Optional[int] = state_dict['entity_embeddings.entity_embeddings.weight']
__lowerCamelCase : Union[str, Any] = entity_emb[entity_vocab['[MASK]']]
__lowerCamelCase : Optional[Any] = LukeModel(config=SCREAMING_SNAKE_CASE__ ).eval()
__lowerCamelCase , __lowerCamelCase : List[Any] = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
if not (len(SCREAMING_SNAKE_CASE__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(SCREAMING_SNAKE_CASE__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
__lowerCamelCase : Optional[Any] = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , task='entity_classification' )
__lowerCamelCase : Dict = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
__lowerCamelCase : Union[str, Any] = (39, 42)
__lowerCamelCase : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , add_prefix_space=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE__ )
# Verify word hidden states
if model_size == "large":
__lowerCamelCase : Dict = torch.Size((1, 42, 1_024) )
__lowerCamelCase : int = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
__lowerCamelCase : Union[str, Any] = torch.Size((1, 42, 768) )
__lowerCamelCase : Tuple = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__lowerCamelCase : Union[str, Any] = torch.Size((1, 1, 1_024) )
__lowerCamelCase : Dict = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
__lowerCamelCase : int = torch.Size((1, 1, 768) )
__lowerCamelCase : Dict = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE__ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = {}
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase , __lowerCamelCase : List[Any] = line.rstrip().split('\t' )
__lowerCamelCase : Any = index
return entity_vocab
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
lowercase_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 194 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
__lowercase = parser.parse_args()
if args.model_type == "bert":
__lowercase = BertForMaskedLM.from_pretrained(args.model_name)
__lowercase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
__lowercase = model.state_dict()
__lowercase = {}
for w in ["word_embeddings", "position_embeddings"]:
__lowercase = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__lowercase = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
__lowercase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__lowercase = state_dict["""cls.predictions.decoder.weight"""]
__lowercase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowercase = state_dict[f'''cls.predictions.transform.dense.{w}''']
__lowercase = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE :List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 22 | 0 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _snake_case ( lowercase__ ):
def wrapper(*lowercase__ , **lowercase__ ):
_lowerCamelCase : Tuple = timeit.default_timer()
_lowerCamelCase : Tuple = func(*lowercase__ , **lowercase__ )
_lowerCamelCase : str = timeit.default_timer() - starttime
return delta
_lowerCamelCase : List[Any] = func.__name__
return wrapper
def _snake_case ( lowercase__ , lowercase__=100 , lowercase__=None ):
_lowerCamelCase : Dict = []
_lowerCamelCase : List[str] = seq_shapes or {}
for i in range(lowercase__ ):
_lowerCamelCase : Optional[int] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowercase__ , _ArrayXD ):
_lowerCamelCase : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowercase__ , datasets.Value ):
if v.dtype == "string":
_lowerCamelCase : Any = 'The small grey turtle was surprisingly fast when challenged.'
else:
_lowerCamelCase : List[str] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowercase__ , datasets.Sequence ):
while isinstance(lowercase__ , datasets.Sequence ):
_lowerCamelCase : Tuple = v.feature
_lowerCamelCase : List[Any] = seq_shapes[k]
_lowerCamelCase : int = np.random.rand(*lowercase__ ).astype(v.dtype )
_lowerCamelCase : List[str] = data
dummy_data.append((i, example) )
return dummy_data
def _snake_case ( lowercase__ , lowercase__ , lowercase__=100 , lowercase__=None ):
_lowerCamelCase : List[str] = generate_examples(lowercase__ , num_examples=lowercase__ , seq_shapes=lowercase__ )
with ArrowWriter(features=lowercase__ , path=lowercase__ ) as writer:
for key, record in dummy_data:
_lowerCamelCase : str = features.encode_example(lowercase__ )
writer.write(lowercase__ )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
_lowerCamelCase : Optional[int] = datasets.Dataset.from_file(filename=lowercase__ , info=datasets.DatasetInfo(features=lowercase__ ) )
return dataset | 12 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = (UnCLIPScheduler,)
def A_ ( self , **lowercase ):
_lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowercase )
return config
def A_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A_ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase )
def A_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def A_ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase )
def A_ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase )
def A_ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase , prev_timestep=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' )
_lowerCamelCase : str = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def A_ ( self ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' )
_lowerCamelCase : int = scheduler_class(**lowercase )
_lowerCamelCase : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5
def A_ ( self ):
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(25 )
_lowerCamelCase : Optional[Any] = scheduler.timesteps
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
_lowerCamelCase : str = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : List[Any] = model(lowercase , lowercase )
if i + 1 == timesteps.shape[0]:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(
lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : List[Any] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def A_ ( self ):
pass
def A_ ( self ):
pass | 12 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _A ( unittest.TestCase ):
snake_case__ : List[str] = ViTImageProcessor if is_vision_available() else None
@property
def A__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ):
"""simple docstring"""
lowercase = (3, 32, 128)
lowercase = tempfile.mkdtemp()
# fmt: off
lowercase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowercase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
lowercase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowercase = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
lowercase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) )
return image_input
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
lowercase = self.get_image_processor()
lowercase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
lowercase = self.get_image_processor()
lowercase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
lowercase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(__lowerCAmelCase , return_tensors="""np""" )
lowercase = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowercase = """test"""
lowercase = processor(text=__lowerCAmelCase )
lowercase = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowercase = """test"""
lowercase = self.prepare_image_inputs()
lowercase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.char_decode(__lowerCAmelCase )
lowercase = tokenizer.batch_decode(__lowerCAmelCase )
lowercase = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowercase = None
lowercase = self.prepare_image_inputs()
lowercase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
lowercase = torch.randn(1 , 27 , 38 )
lowercase = torch.randn(1 , 27 , 5_0257 )
lowercase = torch.randn(1 , 27 , 3_0522 )
lowercase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 197 | """simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] ) -> int:
'''simple docstring'''
lowercase = [0 for i in range(r + 1 )]
# nc0 = 1
lowercase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase = min(lowerCAmelCase__ , lowerCAmelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 197 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowercase ( a ):
lowercase__ : Optional[torch.FloatTensor] = None
lowercase__ : torch.FloatTensor = None
lowercase__ : Optional[Tuple[torch.FloatTensor]] = None
lowercase__ : Optional[Tuple[torch.FloatTensor]] = None
class lowercase ( a ):
def __init__( self : Dict , _UpperCamelCase : Optional[int]=1 , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : List[Any]=512 , _UpperCamelCase : Optional[int]="cls" , _UpperCamelCase : List[str]=False , _UpperCamelCase : Any=True , **_UpperCamelCase : List[str] , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = project_dim
SCREAMING_SNAKE_CASE = pooler_fn
SCREAMING_SNAKE_CASE = learn_encoder
SCREAMING_SNAKE_CASE = use_attention_mask
class lowercase ( a ):
lowercase__ : Dict = [R"""pooler""", R"""logit_scale"""]
lowercase__ : Any = [R"""position_ids""", R"""predictions.decoder.bias"""]
lowercase__ : Optional[int] = """roberta"""
lowercase__ : Union[str, Any] = RobertaSeriesConfig
def __init__( self : List[str] , _UpperCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().__init__(_UpperCamelCase )
SCREAMING_SNAKE_CASE = XLMRobertaModel(_UpperCamelCase )
SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , "has_pre_transformation" , _UpperCamelCase )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __snake_case( self : Dict , _UpperCamelCase : Optional[torch.Tensor] = None , _UpperCamelCase : Optional[torch.Tensor] = None , _UpperCamelCase : Optional[torch.Tensor] = None , _UpperCamelCase : Optional[torch.Tensor] = None , _UpperCamelCase : Optional[torch.Tensor] = None , _UpperCamelCase : Optional[torch.Tensor] = None , _UpperCamelCase : Optional[torch.Tensor] = None , _UpperCamelCase : Optional[torch.Tensor] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[bool] = None , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.base_model(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , position_ids=_UpperCamelCase , head_mask=_UpperCamelCase , inputs_embeds=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , output_attentions=_UpperCamelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_UpperCamelCase , )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE = outputs["hidden_states"][-2]
SCREAMING_SNAKE_CASE = self.pre_LN(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.transformation_pre(_UpperCamelCase )
return TransformationModelOutput(
projection_state=_UpperCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
SCREAMING_SNAKE_CASE = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_UpperCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 362 | def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
while b:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = b, a % b
return a
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase__ , a % b )
def __lowerCamelCase ():
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 206 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=_SCREAMING_SNAKE_CASE )
_snake_case = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=_SCREAMING_SNAKE_CASE )
env_command_parser(subparsers=_SCREAMING_SNAKE_CASE )
launch_command_parser(subparsers=_SCREAMING_SNAKE_CASE )
tpu_command_parser(subparsers=_SCREAMING_SNAKE_CASE )
test_command_parser(subparsers=_SCREAMING_SNAKE_CASE )
# Let's go
_snake_case = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main() | 341 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowerCAmelCase = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ):
if attention_mask is None:
_snake_case = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_snake_case = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_snake_case = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=99 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=0.02 , ) -> Union[str, Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = initializer_range
def lowercase (self ) -> str:
_snake_case = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_snake_case = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_snake_case = shift_tokens_right(UpperCAmelCase , 1 , 2 )
_snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase , )
_snake_case = prepare_blenderbot_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def lowercase (self ) -> Dict:
_snake_case, _snake_case = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
_snake_case = 20
_snake_case = model_class_name(UpperCAmelCase )
_snake_case = model.encode(inputs_dict["""input_ids"""] )
_snake_case, _snake_case = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_snake_case = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase )
_snake_case = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_snake_case = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
_snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_snake_case = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase , )
_snake_case = model.decode(UpperCAmelCase , UpperCAmelCase )
_snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
_snake_case = 20
_snake_case = model_class_name(UpperCAmelCase )
_snake_case = model.encode(inputs_dict["""input_ids"""] )
_snake_case, _snake_case = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_snake_case = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_snake_case = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase )
_snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_snake_case = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
_snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_snake_case = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
_snake_case = model.decode(UpperCAmelCase , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase )
_snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = 99
def lowercase (self ) -> Any:
_snake_case = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_snake_case = input_ids.shape[0]
_snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase (self ) -> Optional[Any]:
_snake_case, _snake_case, _snake_case = self._get_config_and_data()
_snake_case = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase )
_snake_case = lm_model(input_ids=UpperCAmelCase )
_snake_case = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , UpperCAmelCase )
def lowercase (self ) -> int:
_snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_snake_case = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase )
_snake_case = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_snake_case = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_snake_case = lm_model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
_snake_case = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , UpperCAmelCase )
def lowercase (self ) -> Tuple:
_snake_case = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_snake_case = shift_tokens_right(UpperCAmelCase , 1 , 2 )
_snake_case = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum()
_snake_case = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _lowerCAmelCase ( __snake_case , unittest.TestCase , __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase (self ) -> Any:
_snake_case = FlaxBlenderbotModelTester(self )
def lowercase (self ) -> str:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowercase (self ) -> Dict:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_snake_case = model_class(UpperCAmelCase )
@jax.jit
def encode_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
return model.encode(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
_snake_case = encode_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_snake_case = encode_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase (self ) -> str:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case = model_class(UpperCAmelCase )
_snake_case = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_snake_case = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
return model.decode(
decoder_input_ids=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , encoder_outputs=UpperCAmelCase , )
with self.subTest("""JIT Enabled""" ):
_snake_case = decode_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_snake_case = decode_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase (self ) -> Any:
for model_class_name in self.all_model_classes:
_snake_case = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_snake_case = np.ones((1, 1) ) * model.config.eos_token_id
_snake_case = model(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def lowercase (self ) -> Dict:
_snake_case = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
_snake_case = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
_snake_case = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=UpperCAmelCase )
_snake_case = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
_snake_case = ["""Sam"""]
_snake_case = tokenizer(UpperCAmelCase , return_tensors="""jax""" )
_snake_case = model.generate(**UpperCAmelCase , **UpperCAmelCase )
_snake_case = """Sam is a great name. It means \"sun\" in Gaelic."""
_snake_case = tokenizer.batch_decode(UpperCAmelCase , **UpperCAmelCase )
assert generated_txt[0].strip() == tgt_text | 341 | 1 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Any ='%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
SCREAMING_SNAKE_CASE_: Optional[int] =f"https://www.google.com/search?q={query}&num=100"
SCREAMING_SNAKE_CASE_: int =requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE_: Union[str, Any] =(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
SCREAMING_SNAKE_CASE_: Tuple =parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 362 | '''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __A ( unittest.TestCase , UpperCamelCase__ ):
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_tool("text-to-speech" )
self.tool.setup()
def _lowercase (self : Union[str, Any] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCAmelCase_ = self.tool("hey" )
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def _lowercase (self : List[str] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCAmelCase_ = self.tool("hey" )
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
| 106 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __snake_case ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
def __init__( self : Any , _snake_case : int=None , **_snake_case : Dict):
"""simple docstring"""
super().__init__(features=_snake_case)
UpperCAmelCase_ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCamelCase ( self : Optional[Any] , _snake_case : Optional[int]):
"""simple docstring"""
import torch
if isinstance(_snake_case , _snake_case) and column:
if all(
isinstance(_snake_case , torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column):
return torch.stack(_snake_case)
return column
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
import torch
if isinstance(_snake_case , (str, bytes, type(_snake_case))):
return value
elif isinstance(_snake_case , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
UpperCAmelCase_ = {}
if isinstance(_snake_case , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
UpperCAmelCase_ = {'''dtype''': torch.intaa}
elif isinstance(_snake_case , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
UpperCAmelCase_ = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_snake_case , PIL.Image.Image):
UpperCAmelCase_ = np.asarray(_snake_case)
return torch.tensor(_snake_case , **{**default_dtype, **self.torch_tensor_kwargs})
def lowerCamelCase ( self : List[Any] , _snake_case : List[Any]):
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(_snake_case , '''__array__''') and not isinstance(_snake_case , torch.Tensor):
UpperCAmelCase_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_snake_case , np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_snake_case) for substruct in data_struct])
elif isinstance(_snake_case , (list, tuple)):
return self._consolidate([self.recursive_tensorize(_snake_case) for substruct in data_struct])
return self._tensorize(_snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : dict):
"""simple docstring"""
return map_nested(self._recursive_tensorize , _snake_case , map_list=_snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : pa.Table):
"""simple docstring"""
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_row(_snake_case)
UpperCAmelCase_ = self.python_features_decoder.decode_row(_snake_case)
return self.recursive_tensorize(_snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : pa.Table):
"""simple docstring"""
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_column(_snake_case)
UpperCAmelCase_ = self.python_features_decoder.decode_column(_snake_case , pa_table.column_names[0])
UpperCAmelCase_ = self.recursive_tensorize(_snake_case)
UpperCAmelCase_ = self._consolidate(_snake_case)
return column
def lowerCamelCase ( self : List[str] , _snake_case : pa.Table):
"""simple docstring"""
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_batch(_snake_case)
UpperCAmelCase_ = self.python_features_decoder.decode_batch(_snake_case)
UpperCAmelCase_ = self.recursive_tensorize(_snake_case)
for column_name in batch:
UpperCAmelCase_ = self._consolidate(batch[column_name])
return batch
| 51 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
snake_case_ : Optional[Any] = "pt"
elif is_tf_available():
snake_case_ : Union[str, Any] = "tf"
else:
snake_case_ : str = "jax"
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = ByTaTokenizer
UpperCAmelCase__ : int = False
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return ByTaTokenizer.from_pretrained('''google/byt5-small''')
def lowerCamelCase ( self : List[str] , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Dict , _snake_case : int , _snake_case : Tuple=False , _snake_case : Dict=20 , _snake_case : Optional[Any]=5):
"""simple docstring"""
UpperCAmelCase_ = []
for i in range(len(_snake_case)):
try:
UpperCAmelCase_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_snake_case)
except UnicodeDecodeError:
pass
toks.append((i, tok))
UpperCAmelCase_ = list(filter(lambda _snake_case: re.match(r'''^[ a-zA-Z]+$''' , t[1]) , _snake_case))
UpperCAmelCase_ = list(filter(lambda _snake_case: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_snake_case) , _snake_case))
if max_length is not None and len(_snake_case) > max_length:
UpperCAmelCase_ = toks[:max_length]
if min_length is not None and len(_snake_case) < min_length and len(_snake_case) > 0:
while len(_snake_case) < min_length:
UpperCAmelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase_ = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase_ = tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case)
if " " not in output_txt and len(_snake_case) > 1:
UpperCAmelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_snake_case)
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_snake_case)
)
if with_prefix_space:
UpperCAmelCase_ = ''' ''' + output_txt
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
return output_txt, output_ids
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''])
UpperCAmelCase_ = tokenizer(['''hi''', '''I went to the gym''', ''''''])
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''])
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = '''Unicode €.'''
UpperCAmelCase_ = tokenizer(_snake_case)
UpperCAmelCase_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _snake_case)
# decoding
UpperCAmelCase_ = tokenizer.decode(_snake_case)
self.assertEqual(_snake_case , '''Unicode €.</s>''')
UpperCAmelCase_ = tokenizer('''e è é ê ë''')
UpperCAmelCase_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _snake_case)
# decoding
UpperCAmelCase_ = tokenizer.decode(_snake_case)
self.assertEqual(_snake_case , '''e è é ê ë</s>''')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''')) , '''e è é ê ë</s>''')
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCAmelCase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
UpperCAmelCase_ = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
if FRAMEWORK != "jax":
UpperCAmelCase_ = list(batch.input_ids.numpy()[0])
else:
UpperCAmelCase_ = list(batch.input_ids.tolist()[0])
self.assertListEqual(_snake_case , _snake_case)
self.assertEqual((2, 37) , batch.input_ids.shape)
self.assertEqual((2, 37) , batch.attention_mask.shape)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _snake_case)
self.assertIn('''attention_mask''' , _snake_case)
self.assertNotIn('''decoder_input_ids''' , _snake_case)
self.assertNotIn('''decoder_attention_mask''' , _snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCAmelCase_ = tokenizer(
text_target=_snake_case , max_length=32 , padding='''max_length''' , truncation=_snake_case , return_tensors=_snake_case)
self.assertEqual(32 , targets['''input_ids'''].shape[1])
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization. </s>''']
UpperCAmelCase_ = ['''Summary of the text. </s>''']
# fmt: off
UpperCAmelCase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
UpperCAmelCase_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
UpperCAmelCase_ = tokenizer(_snake_case , text_target=_snake_case)
self.assertEqual(_snake_case , batch['''input_ids'''][0])
self.assertEqual(_snake_case , batch['''labels'''][0])
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case)
UpperCAmelCase_ = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
shutil.rmtree(_snake_case)
UpperCAmelCase_ = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''])
UpperCAmelCase_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''')
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens})
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case)
UpperCAmelCase_ = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case)
with open(os.path.join(_snake_case , '''special_tokens_map.json''') , encoding='''utf-8''') as json_file:
UpperCAmelCase_ = json.load(_snake_case)
with open(os.path.join(_snake_case , '''tokenizer_config.json''') , encoding='''utf-8''') as json_file:
UpperCAmelCase_ = json.load(_snake_case)
UpperCAmelCase_ = [F"""<extra_id_{i}>""" for i in range(125)]
UpperCAmelCase_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCAmelCase_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_snake_case , '''special_tokens_map.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(_snake_case , _snake_case)
with open(os.path.join(_snake_case , '''tokenizer_config.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(_snake_case , _snake_case)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ = tokenizer_class.from_pretrained(
_snake_case , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_snake_case)]
UpperCAmelCase_ = tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens)
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''])) , )
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer_class.from_pretrained(_snake_case)
self.assertTrue(tokenizer.decode([255]) == '''''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Dict):
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers(fast=_snake_case , do_lower_case=_snake_case)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
UpperCAmelCase_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
UpperCAmelCase_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
UpperCAmelCase_ = 0
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(
_snake_case , skip_special_tokens=_snake_case)
for attr in attributes_list:
setattr(_snake_case , attr + '''_id''' , _snake_case)
self.assertEqual(getattr(_snake_case , _snake_case) , _snake_case)
self.assertEqual(getattr(_snake_case , attr + '''_id''') , _snake_case)
setattr(_snake_case , attr + '''_id''' , _snake_case)
self.assertEqual(getattr(_snake_case , _snake_case) , _snake_case)
self.assertEqual(getattr(_snake_case , attr + '''_id''') , _snake_case)
setattr(_snake_case , '''additional_special_tokens_ids''' , [])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens''') , [])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens_ids''') , [])
setattr(_snake_case , '''additional_special_tokens_ids''' , [token_id_to_test_setters])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens''') , [token_to_test_setters])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens_ids''') , [token_id_to_test_setters])
| 51 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : dict[int, int] = {}
_lowerCAmelCase : str = 2
while True:
_lowerCAmelCase : Optional[Any] = factor_map.pop(_A , _A )
if factor:
_lowerCAmelCase : Optional[int] = factor + prime
while x in factor_map:
x += factor
_lowerCAmelCase : Dict = factor
else:
_lowerCAmelCase : Optional[Any] = prime
yield prime
prime += 1
def lowercase (_A = 1E10 ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = sieve()
_lowerCAmelCase : Union[str, Any] = 1
while True:
_lowerCAmelCase : List[Any] = next(_A )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_A )
n += 2
if __name__ == "__main__":
print(solution())
| 25 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 145 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 0 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any=None , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
A: int = model
A: Union[str, Any] = kwargs.get('''model_save_dir''' , SCREAMING_SNAKE_CASE_ )
A: List[Any] = kwargs.get('''latest_model_name''' , SCREAMING_SNAKE_CASE_ )
def __call__( self : Any , **SCREAMING_SNAKE_CASE_ : str ) -> Any:
'''simple docstring'''
A: Optional[int] = {k: np.array(SCREAMING_SNAKE_CASE_ ) for k, v in kwargs.items()}
return self.model.run(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Path] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ) -> Tuple:
'''simple docstring'''
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
A: Optional[int] = '''CPUExecutionProvider'''
return ort.InferenceSession(SCREAMING_SNAKE_CASE_ , providers=[provider] , sess_options=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Path] , SCREAMING_SNAKE_CASE_ : Optional[str] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
'''simple docstring'''
A: Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A: Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
A: List[str] = Path(SCREAMING_SNAKE_CASE_ ).joinpath(SCREAMING_SNAKE_CASE_ )
try:
shutil.copyfile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A: Any = self.model_save_dir.joinpath(SCREAMING_SNAKE_CASE_ )
if src_path.exists():
A: Dict = Path(SCREAMING_SNAKE_CASE_ ).joinpath(SCREAMING_SNAKE_CASE_ )
try:
shutil.copyfile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except shutil.SameFileError:
pass
def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : Dict , ) -> Tuple:
'''simple docstring'''
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
# saving model weights/files
self._save_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def _snake_case ( cls : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Path] , SCREAMING_SNAKE_CASE_ : Optional[Union[bool, str, None]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, None]] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional["ort.SessionOptions"] = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
A: int = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(SCREAMING_SNAKE_CASE_ ):
A: Optional[Any] = OnnxRuntimeModel.load_model(
os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , provider=SCREAMING_SNAKE_CASE_ , sess_options=SCREAMING_SNAKE_CASE_ )
A: Optional[int] = Path(SCREAMING_SNAKE_CASE_ )
# load model from hub
else:
# download model
A: Optional[Any] = hf_hub_download(
repo_id=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , )
A: Optional[Any] = Path(SCREAMING_SNAKE_CASE_ ).parent
A: Optional[Any] = Path(SCREAMING_SNAKE_CASE_ ).name
A: Tuple = OnnxRuntimeModel.load_model(SCREAMING_SNAKE_CASE_ , provider=SCREAMING_SNAKE_CASE_ , sess_options=SCREAMING_SNAKE_CASE_ )
return cls(model=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def _snake_case ( cls : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Path] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Dict:
'''simple docstring'''
A: int = None
if len(str(SCREAMING_SNAKE_CASE_ ).split('''@''' ) ) == 2:
A , A: int = model_id.split('''@''' )
return cls._from_pretrained(
model_id=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
| 334 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 334 | 1 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def _lowercase ( __A ):
'''simple docstring'''
if not isinstance(__A ,__A ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__UpperCamelCase = precision
__UpperCamelCase = ceil(precision / 14 )
__UpperCamelCase = 426_880 * Decimal(10_005 ).sqrt()
__UpperCamelCase = 1
__UpperCamelCase = 13_591_409
__UpperCamelCase = Decimal(__A )
for k in range(1 ,__A ):
__UpperCamelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(__A ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
a__ : Union[str, Any] = 5_0
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 349 |
'''simple docstring'''
import string
def _lowercase ( __A ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(__A )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = input("""Encrypted message: """ )
__UpperCamelCase = message.upper()
decrypt(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 349 | 1 |
'''simple docstring'''
from torch import nn
def snake_case_ ( lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 349 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
A_ : Dict = logging.getLogger(__name__)
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """sequence-classification"""
def __init__( self ,a_ ) -> Dict:
if type(a_ ) == dict:
_UpperCAmelCase : Tuple = Namespace(**a_ )
_UpperCAmelCase : Optional[int] = glue_output_modes[hparams.task]
_UpperCAmelCase : Union[str, Any] = glue_tasks_num_labels[hparams.task]
super().__init__(a_ ,a_ ,self.mode )
def _snake_case ( self ,**a_ ) -> Optional[Any]:
return self.model(**a_ )
def _snake_case ( self ,a_ ,a_ ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : Any = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : Any = self(**a_ )
_UpperCAmelCase : int = outputs[0]
_UpperCAmelCase : Any = self.trainer.lr_schedulers[0]["""scheduler"""]
_UpperCAmelCase : Any = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _snake_case ( self ) -> int:
_UpperCAmelCase : Optional[int] = self.hparams
_UpperCAmelCase : int = processors[args.task]()
_UpperCAmelCase : str = processor.get_labels()
for mode in ["train", "dev"]:
_UpperCAmelCase : Tuple = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" ,a_ )
else:
logger.info("""Creating features from dataset file at %s""" ,args.data_dir )
_UpperCAmelCase : List[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
_UpperCAmelCase : Union[str, Any] = convert_examples_to_features(
a_ ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info("""Saving features into cached file %s""" ,a_ )
torch.save(a_ ,a_ )
def _snake_case ( self ,a_ ,a_ ,a_ = False ) -> DataLoader:
_UpperCAmelCase : Union[str, Any] = """dev""" if mode == """test""" else mode
_UpperCAmelCase : Tuple = self._feature_file(a_ )
logger.info("""Loading features from cached file %s""" ,a_ )
_UpperCAmelCase : Union[str, Any] = torch.load(a_ )
_UpperCAmelCase : List[str] = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
_UpperCAmelCase : Tuple = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
_UpperCAmelCase : str = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : Optional[int] = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : str = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(a_ ,a_ ,a_ ,a_ ) ,batch_size=a_ ,shuffle=a_ ,)
def _snake_case ( self ,a_ ,a_ ) -> Any:
_UpperCAmelCase : Any = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : int = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : List[str] = self(**a_ )
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = outputs[:2]
_UpperCAmelCase : List[str] = logits.detach().cpu().numpy()
_UpperCAmelCase : Union[str, Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _snake_case ( self ,a_ ) -> tuple:
_UpperCAmelCase : Optional[int] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
_UpperCAmelCase : Any = np.concatenate([x["""pred"""] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : int = np.argmax(a_ ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : Union[str, Any] = np.squeeze(a_ )
_UpperCAmelCase : str = np.concatenate([x["""target"""] for x in outputs] ,axis=0 )
_UpperCAmelCase : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task ,a_ ,a_ )}
_UpperCAmelCase : Dict = dict(results.items() )
_UpperCAmelCase : Any = results
return ret, preds_list, out_label_list
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _snake_case ( a_ ,a_ ) -> Any:
BaseTransformer.add_model_specific_args(a_ ,a_ )
parser.add_argument(
"""--max_seq_length""" ,default=128 ,type=a_ ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--task""" ,default="""""" ,type=a_ ,required=a_ ,help="""The GLUE task to run""" ,)
parser.add_argument(
"""--gpus""" ,default=0 ,type=a_ ,help="""The number of GPUs allocated for this, it is by default 0 meaning none""" ,)
parser.add_argument(
"""--overwrite_cache""" ,action="""store_true""" ,help="""Overwrite the cached training and evaluation sets""" )
return parser
def snake_case_ ( )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
add_generic_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = GLUETransformer.add_model_specific_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_UpperCAmelCase : Optional[int] = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_UpperCAmelCase : int = GLUETransformer(lowerCAmelCase_ )
_UpperCAmelCase : Any = generic_train(lowerCAmelCase_ , lowerCAmelCase_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_UpperCAmelCase : int = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=lowerCAmelCase_ ) )
_UpperCAmelCase : int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any]=7 , __magic_name__ :List[str]=3 , __magic_name__ :str=30 , __magic_name__ :Dict=400 , __magic_name__ :Union[str, Any]=True , __magic_name__ :Tuple=None , __magic_name__ :int=0.9 , __magic_name__ :Optional[Any]=None , __magic_name__ :List[Any]=True , __magic_name__ :Tuple=[0.5, 0.5, 0.5] , __magic_name__ :Dict=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
a = size if size is not None else {"""shortest_edge""": 30}
a = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize_and_center_crop
a = size
a = crop_pct
a = crop_size
a = do_normalize
a = image_mean
a = image_std
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = PoolFormerImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = PoolFormerImageProcessingTester(self )
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """crop_pct""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
a = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
a = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
a = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 228 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = GPTaTokenizer
UpperCamelCase__ = GPTaTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = {'''add_prefix_space''': True}
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
a = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
a = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
a = {"""unk_token""": """<unk>"""}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def lowerCamelCase__ ( self :Dict , **__magic_name__ :List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase__ ( self :List[str] , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase__ ( self :Dict , __magic_name__ :List[str] ):
'''simple docstring'''
a = """lower newer"""
a = """lower newer"""
return input_text, output_text
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = """lower newer"""
a = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
a = tokenizer.tokenize(__magic_name__ , add_prefix_space=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
a = tokens + [tokenizer.unk_token]
a = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer(add_prefix_space=__magic_name__ )
a = """lower newer"""
# Testing tokenization
a = tokenizer.tokenize(__magic_name__ , add_prefix_space=__magic_name__ )
a = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing conversion to ids without special tokens
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
a = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing conversion to ids with special tokens
a = self.get_rust_tokenizer(add_prefix_space=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_prefix_space=__magic_name__ )
a = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing the unknown token
a = tokens + [rust_tokenizer.unk_token]
a = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def lowerCamelCase__ ( self :Optional[int] , *__magic_name__ :Tuple , **__magic_name__ :str ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Union[str, Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
# Simple input
a = """This is a simple input"""
a = ["""This is a simple input 1""", """This is a simple input 2"""]
a = ("""This is a simple input""", """This is a pair""")
a = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
a = """This is a simple input"""
a = ["""This is a simple input looooooooong""", """This is a simple input"""]
a = ("""This is a simple input""", """This is a pair""")
a = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
a = tokenizer.pad_token_id
a = tokenizer(__magic_name__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
a = tokenizer(__magic_name__ , padding=__magic_name__ , truncate=__magic_name__ , return_tensors="""np""" )
a = tokenizer(*__magic_name__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
a = tokenizer(__magic_name__ , padding=__magic_name__ , truncate=__magic_name__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = """$$$"""
a = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__magic_name__ , add_bos_token=__magic_name__ )
a = """This is a simple input"""
a = ["""This is a simple input 1""", """This is a simple input 2"""]
a = tokenizer.bos_token_id
a = tokenizer(__magic_name__ )
a = tokenizer(__magic_name__ )
self.assertEqual(out_s.input_ids[0] , __magic_name__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
a = tokenizer.decode(out_s.input_ids )
a = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __magic_name__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = [self.get_tokenizer(do_lower_case=__magic_name__ , add_bos_token=__magic_name__ )]
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = """Encode this."""
a = """This one too please."""
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
encoded_sequence += tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.encode_plus(
__magic_name__ , __magic_name__ , add_special_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , )
a = encoded_sequence_dict["""input_ids"""]
a = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
a = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__magic_name__ )
]
a = [x for x in filtered_sequence if x is not None]
self.assertEqual(__magic_name__ , __magic_name__ )
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=__magic_name__ )
a = """A photo of a cat"""
a = tokenizer.encode(
__magic_name__ , )
self.assertEqual(__magic_name__ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""test_opt""" )
a = AutoTokenizer.from_pretrained("""./test_opt""" )
a = tokenizer.encode(
__magic_name__ , )
self.assertEqual(__magic_name__ , [2, 250, 1345, 9, 10, 4758] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=__magic_name__ )
a = """A photo of a cat"""
a = tokenizer.encode(
__magic_name__ , )
# Same as above
self.assertEqual(__magic_name__ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=__magic_name__ )
a = """bos"""
a = tokenizer.get_vocab()["""bos"""]
a = """A photo of a cat"""
a = tokenizer.encode(
__magic_name__ , )
# We changed the bos token
self.assertEqual(__magic_name__ , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""./tok""" )
a = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
a = tokenizer.encode(
__magic_name__ , )
self.assertEqual(__magic_name__ , [3_1957, 250, 1345, 9, 10, 4758] )
| 228 | 1 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCamelCase :
'''simple docstring'''
pass
| 350 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = CTRLTokenizer
__UpperCamelCase : int = False
__UpperCamelCase : List[str] = False
def lowerCAmelCase__ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase_: int = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCamelCase_: int = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase_: Union[str, Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCamelCase_: Tuple = {"""unk_token""": """<unk>"""}
UpperCamelCase_: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def lowerCAmelCase__ ( self : Optional[int] , **snake_case_ : int ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[str] ):
UpperCamelCase_: Dict = """adapt react readapt apt"""
UpperCamelCase_: List[str] = """adapt react readapt apt"""
return input_text, output_text
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase_: List[Any] = """adapt react readapt apt"""
UpperCamelCase_: Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCamelCase_: int = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase_: Union[str, Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
| 223 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
A__ : str = (3, 9, -11, 0, 7, 5, 1, -1)
A__ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Iterable[int] ):
'''simple docstring'''
lowercase__ = None
for i in sorted(lowerCamelCase, reverse=lowerCamelCase ):
lowercase__ = Node(lowerCamelCase, self.head )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.head
while node:
yield node.data
lowercase__ = node.next_node
def __len__( self : Optional[Any] ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self : Any ):
'''simple docstring'''
return " -> ".join([str(lowerCamelCase ) for node in self] )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return SortedLinkedList(list(lowerCamelCase_ ) + list(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : int = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 207 |
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 207 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=8 ) -> Optional[int]:
snake_case : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , ) -> int:
super().__init__()
self.register_modules(
unet=A , scheduler=A , movq=A , )
snake_case : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self , A , A , A , A , A , A ) -> str:
if latents is None:
snake_case : List[str] = randn_tensor(A , generator=A , device=A , dtype=A )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
snake_case : Union[str, Any] = latents.to(A )
snake_case : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self , A=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case : Dict = torch.device(f"""cuda:{gpu_id}""" )
snake_case : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A , A )
def UpperCAmelCase ( self , A=0 ) -> Optional[Any]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
snake_case : int = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case : Tuple = cpu_offload_with_hook(A , A , prev_module_hook=A )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self ) -> str:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A )
def __call__( self , A , A , A , A = 5_1_2 , A = 5_1_2 , A = 1_0_0 , A = 4.0 , A = 1 , A = None , A = None , A = "pil" , A = True , ) -> Any:
snake_case : int = self._execution_device
snake_case : Tuple = guidance_scale > 1.0
if isinstance(A , A ):
snake_case : str = torch.cat(A , dim=0 )
if isinstance(A , A ):
snake_case : int = torch.cat(A , dim=0 )
if isinstance(A , A ):
snake_case : Tuple = torch.cat(A , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Optional[int] = image_embeds.repeat_interleave(A , dim=0 )
snake_case : Dict = negative_image_embeds.repeat_interleave(A , dim=0 )
snake_case : Optional[Any] = hint.repeat_interleave(A , dim=0 )
snake_case : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A )
snake_case : Optional[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A )
self.scheduler.set_timesteps(A , device=A )
snake_case : Tuple = self.scheduler.timesteps
snake_case : Tuple = self.movq.config.latent_channels
snake_case : Optional[int] = downscale_height_and_width(A , A , self.movq_scale_factor )
# create initial latent
snake_case : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A , A , A , self.scheduler , )
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
snake_case : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Any = {"""image_embeds""": image_embeds, """hint""": hint}
snake_case : List[Any] = self.unet(
sample=A , timestep=A , encoder_hidden_states=A , added_cond_kwargs=A , return_dict=A , )[0]
if do_classifier_free_guidance:
snake_case : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
snake_case : Tuple = noise_pred.chunk(2 )
snake_case : Optional[Any] = variance_pred.chunk(2 )
snake_case : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : Dict = self.scheduler.step(
A , A , A , generator=A , )[0]
# post-processing
snake_case : Tuple = self.movq.decode(A , force_not_quantize=A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
snake_case : Optional[int] = image * 0.5 + 0.5
snake_case : Union[str, Any] = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 365 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=8 ) -> Optional[int]:
snake_case : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , ) -> int:
super().__init__()
self.register_modules(
unet=A , scheduler=A , movq=A , )
snake_case : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self , A , A , A , A , A , A ) -> str:
if latents is None:
snake_case : List[str] = randn_tensor(A , generator=A , device=A , dtype=A )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
snake_case : Union[str, Any] = latents.to(A )
snake_case : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self , A=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case : Dict = torch.device(f"""cuda:{gpu_id}""" )
snake_case : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A , A )
def UpperCAmelCase ( self , A=0 ) -> Optional[Any]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
snake_case : int = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Tuple = cpu_offload_with_hook(A , A , prev_module_hook=A )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self ) -> str:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A )
def __call__( self , A , A , A , A = 5_1_2 , A = 5_1_2 , A = 1_0_0 , A = 4.0 , A = 1 , A = None , A = None , A = "pil" , A = True , ) -> Any:
snake_case : int = self._execution_device
snake_case : Tuple = guidance_scale > 1.0
if isinstance(A , A ):
snake_case : str = torch.cat(A , dim=0 )
if isinstance(A , A ):
snake_case : int = torch.cat(A , dim=0 )
if isinstance(A , A ):
snake_case : Tuple = torch.cat(A , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Optional[int] = image_embeds.repeat_interleave(A , dim=0 )
snake_case : Dict = negative_image_embeds.repeat_interleave(A , dim=0 )
snake_case : Optional[Any] = hint.repeat_interleave(A , dim=0 )
snake_case : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A )
snake_case : Optional[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A )
self.scheduler.set_timesteps(A , device=A )
snake_case : Tuple = self.scheduler.timesteps
snake_case : Tuple = self.movq.config.latent_channels
snake_case , snake_case : Optional[int] = downscale_height_and_width(A , A , self.movq_scale_factor )
# create initial latent
snake_case : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A , A , A , self.scheduler , )
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
snake_case : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Any = {"""image_embeds""": image_embeds, """hint""": hint}
snake_case : List[Any] = self.unet(
sample=A , timestep=A , encoder_hidden_states=A , added_cond_kwargs=A , return_dict=A , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Tuple = noise_pred.chunk(2 )
snake_case , snake_case : Optional[Any] = variance_pred.chunk(2 )
snake_case : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : Dict = self.scheduler.step(
A , A , A , generator=A , )[0]
# post-processing
snake_case : Tuple = self.movq.decode(A , force_not_quantize=A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
snake_case : Optional[int] = image * 0.5 + 0.5
snake_case : Union[str, Any] = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 176 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_lowerCamelCase : List[Any] = (boundary[1] - boundary[0]) / steps
_lowerCamelCase : Tuple = boundary[0]
_lowerCamelCase : Dict = boundary[1]
_lowerCamelCase : List[Any] = make_points(lowercase__ , lowercase__ , lowercase__ )
_lowerCamelCase : List[Any] = 0.0
y += (h / 2.0) * f(lowercase__ )
for i in x_i:
# print(i)
y += h * f(lowercase__ )
y += (h / 2.0) * f(lowercase__ )
return y
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = a + h
while x < (b - h):
yield x
_lowerCamelCase : int = x + h
def _snake_case ( lowercase__ ): # enter your function here
_lowerCamelCase : Optional[Any] = (x - 0) * (x - 0)
return y
def _snake_case ( ):
_lowerCamelCase : int = 0.0 # Lower bound of integration
_lowerCamelCase : Optional[int] = 1.0 # Upper bound of integration
_lowerCamelCase : List[str] = 1_0.0 # define number of steps or resolution
_lowerCamelCase : List[Any] = [a, b] # define boundary of integration
_lowerCamelCase : Optional[Any] = method_a(lowercase__ , lowercase__ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main() | 96 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """deberta-v2"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Any=128_100 , SCREAMING_SNAKE_CASE : str=1_536 , SCREAMING_SNAKE_CASE : List[str]=24 , SCREAMING_SNAKE_CASE : List[str]=24 , SCREAMING_SNAKE_CASE : Any=6_144 , SCREAMING_SNAKE_CASE : Dict="gelu" , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Any=512 , SCREAMING_SNAKE_CASE : Optional[int]=0 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : Tuple=1E-7 , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]=-1 , SCREAMING_SNAKE_CASE : List[str]=0 , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : str="gelu" , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : List[Any] = num_attention_heads
lowercase__ : Optional[int] = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Dict = max_position_embeddings
lowercase__ : Tuple = type_vocab_size
lowercase__ : Optional[Any] = initializer_range
lowercase__ : List[str] = relative_attention
lowercase__ : str = max_relative_positions
lowercase__ : List[str] = pad_token_id
lowercase__ : int = position_biased_input
# Backwards compatibility
if type(SCREAMING_SNAKE_CASE ) == str:
lowercase__ : Dict = [x.strip() for x in pos_att_type.lower().split("|" )]
lowercase__ : List[Any] = pos_att_type
lowercase__ : int = vocab_size
lowercase__ : Dict = layer_norm_eps
lowercase__ : str = kwargs.get("pooler_hidden_size" , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = pooler_dropout
lowercase__ : str = pooler_hidden_act
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@property
def snake_case ( self : Optional[Any] ):
if self.task == "multiple-choice":
lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Any = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def snake_case ( self : Tuple ):
return 12
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : int = 40 , SCREAMING_SNAKE_CASE : int = 40 , SCREAMING_SNAKE_CASE : "PreTrainedTokenizerBase" = None , ):
lowercase__ : Dict = super().generate_dummy_inputs(preprocessor=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 369 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self : Any ):
torch.manual_seed(0 )
lowercase__ : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def snake_case ( self : List[str] ):
torch.manual_seed(0 )
lowercase__ : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def snake_case ( self : Dict ):
torch.manual_seed(0 )
lowercase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.dummy_uncond_unet
lowercase__ : Dict = DDIMScheduler()
lowercase__ : Optional[Any] = self.dummy_vq_model
lowercase__ : Union[str, Any] = LDMPipeline(unet=SCREAMING_SNAKE_CASE , vqvae=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
ldm.to(SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.manual_seed(0 )
lowercase__ : Optional[int] = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="numpy" ).images
lowercase__ : str = torch.manual_seed(0 )
lowercase__ : List[Any] = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="numpy" , return_dict=SCREAMING_SNAKE_CASE )[0]
lowercase__ : Any = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : List[Any] = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
lowercase__ : Optional[Any] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : int = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : Tuple = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=5 , output_type="numpy" ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ : Optional[Any] = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
lowercase__ : int = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 121 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[Any] ):
__A = tempfile.mkdtemp()
__A = 5
# Realm tok
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A = os.path.join(self.tmpdirname ,"realm_tokenizer" )
os.makedirs(A ,exist_ok=A )
__A = os.path.join(A ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__A = os.path.join(self.tmpdirname ,"realm_block_records" )
os.makedirs(A ,exist_ok=A )
def UpperCamelCase_ ( self : int ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"realm_tokenizer" ) )
def UpperCamelCase_ ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : int ):
__A = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def UpperCamelCase_ ( self : List[Any] ):
__A = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] ,dtype=A ,)
return block_records
def UpperCamelCase_ ( self : Tuple ):
__A = RealmRetriever(
block_records=self.get_dummy_block_records() ,tokenizer=self.get_tokenizer() ,)
return retriever
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_config()
__A = self.get_dummy_retriever()
__A = retriever.tokenizer
__A = np.array([0, 3] ,dtype="long" )
__A = tokenizer(["Test question"] ).input_ids
__A = tokenizer(
["the fourth"] ,add_special_tokens=A ,return_token_type_ids=A ,return_attention_mask=A ,).input_ids
__A = config.reader_seq_len
__A , __A , __A , __A = retriever(
A ,A ,answer_ids=A ,max_length=A ,return_tensors="np" )
self.assertEqual(len(A ) ,2 )
self.assertEqual(len(A ) ,2 )
self.assertEqual(len(A ) ,2 )
self.assertEqual(concat_inputs.input_ids.shape ,(2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape ,(2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape ,(2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape ,(2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) ,["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] ,)
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) ,["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] ,)
def UpperCamelCase_ ( self : Dict ):
__A = self.get_config()
__A = self.get_dummy_retriever()
__A = retriever.tokenizer
__A = np.array([0, 3, 5] ,dtype="long" )
__A = tokenizer(["Test question"] ).input_ids
__A = tokenizer(
["the fourth", "longer longer"] ,add_special_tokens=A ,return_token_type_ids=A ,return_attention_mask=A ,).input_ids
__A = config.reader_seq_len
__A , __A , __A , __A = retriever(
A ,A ,answer_ids=A ,max_length=A ,return_tensors="np" )
self.assertEqual([False, True, True] ,A )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] ,A )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname ,"realm_block_records" ) )
# Test local path
__A = retriever.from_pretrained(os.path.join(self.tmpdirname ,"realm_block_records" ) )
self.assertEqual(retriever.block_records[0] ,B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
__A = os.path.join(
os.path.join(self.tmpdirname ,"realm_block_records" ) ,_REALM_BLOCK_RECORDS_FILENAME )
__A = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] ,B"This is the first record" )
| 15 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(number**0.5 )
return number == sq * sq
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE_ = x_den * y_den * z_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A__ ( __lowerCamelCase = 35 ):
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = Fraction(0 )
SCREAMING_SNAKE_CASE_ = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
SCREAMING_SNAKE_CASE_ = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE_ = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
SCREAMING_SNAKE_CASE_ = x_num * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase, __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 177 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : str ) -> str:
UpperCAmelCase_ = AutoConfig.from_pretrained(__UpperCamelCase )
UpperCAmelCase_ = FlaxAutoModelForSeqaSeqLM.from_config(config=__UpperCamelCase )
UpperCAmelCase_ = checkpoints.load_tax_checkpoint(__UpperCamelCase )
UpperCAmelCase_ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
UpperCAmelCase_ = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCAmelCase_ = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase_ = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
UpperCAmelCase_ = f'layers_{str(__UpperCamelCase )}'
# Self-Attention
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
UpperCAmelCase_ = flax_model.params['''encoder''']['''block'''][str(__UpperCamelCase )]['''layer''']
UpperCAmelCase_ = tax_attention_key
UpperCAmelCase_ = tax_attention_out
UpperCAmelCase_ = tax_attention_query
UpperCAmelCase_ = tax_attention_value
UpperCAmelCase_ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase_ = tax_global_layer_norm
if split_mlp_wi:
UpperCAmelCase_ = tax_mlp_wi_a
UpperCAmelCase_ = tax_mlp_wi_a
else:
UpperCAmelCase_ = tax_mlp_wi
UpperCAmelCase_ = tax_mlp_wo
UpperCAmelCase_ = tax_mlp_layer_norm
UpperCAmelCase_ = flax_model_encoder_layer_block
# Only for layer 0:
UpperCAmelCase_ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
UpperCAmelCase_ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase_ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
UpperCAmelCase_ = tax_encoder_global_rel_embedding
# Assigning
UpperCAmelCase_ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
UpperCAmelCase_ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
UpperCAmelCase_ = f'layers_{str(__UpperCamelCase )}'
# Self-Attention
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
UpperCAmelCase_ = tax_enc_dec_attention_module['''key''']['''kernel''']
UpperCAmelCase_ = tax_enc_dec_attention_module['''out''']['''kernel''']
UpperCAmelCase_ = tax_enc_dec_attention_module['''query''']['''kernel''']
UpperCAmelCase_ = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
UpperCAmelCase_ = flax_model.params['''decoder''']['''block'''][str(__UpperCamelCase )]['''layer''']
UpperCAmelCase_ = tax_attention_key
UpperCAmelCase_ = tax_attention_out
UpperCAmelCase_ = tax_attention_query
UpperCAmelCase_ = tax_attention_value
UpperCAmelCase_ = tax_pre_attention_layer_norm
UpperCAmelCase_ = tax_enc_dec_attention_key
UpperCAmelCase_ = tax_enc_dec_attention_out
UpperCAmelCase_ = tax_enc_dec_attention_query
UpperCAmelCase_ = tax_enc_dec_attention_value
UpperCAmelCase_ = tax_cross_layer_norm
if split_mlp_wi:
UpperCAmelCase_ = tax_mlp_wi_a
UpperCAmelCase_ = tax_mlp_wi_a
else:
UpperCAmelCase_ = tax_mlp_wi
UpperCAmelCase_ = tax_mlp_wo
UpperCAmelCase_ = txa_mlp_layer_norm
UpperCAmelCase_ = flax_model_decoder_layer_block
# Decoder Normalization
UpperCAmelCase_ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
UpperCAmelCase_ = txa_decoder_norm
# Only for layer 0:
UpperCAmelCase_ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
UpperCAmelCase_ = tax_decoder_rel_embedding
# Token Embeddings
UpperCAmelCase_ = tax_model['''target''']['''token_embedder''']['''embedding''']
UpperCAmelCase_ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCAmelCase_ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__UpperCamelCase )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_lowerCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 177 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _snake_case :
def __init__( self , a) -> List[str]:
SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = None
class _snake_case :
def __init__( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __iter__( self) -> Iterator[Any]:
SCREAMING_SNAKE_CASE = self.head
while self.head:
yield node.data
SCREAMING_SNAKE_CASE = node.next
if node == self.head:
break
def __len__( self) -> int:
return sum(1 for _ in self)
def __repr__( self) -> Optional[int]:
return "->".join(str(a) for item in iter(self))
def SCREAMING_SNAKE_CASE__ ( self , a) -> None:
self.insert_nth(len(self) , a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> None:
self.insert_nth(0 , a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> None:
if index < 0 or index > len(self):
raise IndexError('list index out of range.')
SCREAMING_SNAKE_CASE = Node(a)
if self.head is None:
SCREAMING_SNAKE_CASE = new_node # first node points itself
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = new_node
elif index == 0: # insert at head
SCREAMING_SNAKE_CASE = self.head
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = new_node
else:
SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1):
SCREAMING_SNAKE_CASE = temp.next
SCREAMING_SNAKE_CASE = temp.next
SCREAMING_SNAKE_CASE = new_node
if index == len(self) - 1: # insert at tail
SCREAMING_SNAKE_CASE = new_node
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return self.delete_nth(0)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
return self.delete_nth(len(self) - 1)
def SCREAMING_SNAKE_CASE__ ( self , a = 0) -> Any:
if not 0 <= index < len(self):
raise IndexError('list index out of range.')
SCREAMING_SNAKE_CASE = self.head
if self.head == self.tail: # just one node
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
elif index == 0: # delete head node
SCREAMING_SNAKE_CASE = self.tail.next.next
SCREAMING_SNAKE_CASE = self.head.next
else:
SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1):
SCREAMING_SNAKE_CASE = temp.next
SCREAMING_SNAKE_CASE = temp.next
SCREAMING_SNAKE_CASE = temp.next.next
if index == len(self) - 1: # delete at tail
SCREAMING_SNAKE_CASE = temp
return delete_node.data
def SCREAMING_SNAKE_CASE__ ( self) -> bool:
return len(self) == 0
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = CircularLinkedList()
assert len(_UpperCAmelCase) == 0
assert circular_linked_list.is_empty() is True
assert str(_UpperCAmelCase) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1)
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0)
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5):
assert len(_UpperCAmelCase) == i
circular_linked_list.insert_nth(_UpperCAmelCase , i + 1)
assert str(_UpperCAmelCase) == "->".join(str(_UpperCAmelCase) for i in range(1 , 6))
circular_linked_list.insert_tail(6)
assert str(_UpperCAmelCase) == "->".join(str(_UpperCAmelCase) for i in range(1 , 7))
circular_linked_list.insert_head(0)
assert str(_UpperCAmelCase) == "->".join(str(_UpperCAmelCase) for i in range(0 , 7))
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_UpperCAmelCase) == "->".join(str(_UpperCAmelCase) for i in range(1 , 6))
assert circular_linked_list.delete_nth(2) == 3
circular_linked_list.insert_nth(2 , 3)
assert str(_UpperCAmelCase) == "->".join(str(_UpperCAmelCase) for i in range(1 , 6))
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 137 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
a_ : Optional[int] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
_lowercase : Optional[datasets.Features] = None
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , ):
import pyspark
def generate_fn():
SCREAMING_SNAKE_CASE = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id'))
for partition_id in partition_order:
SCREAMING_SNAKE_CASE = df_with_partition_id.select('*').where(F'''part_id = {partition_id}''').drop('part_id')
SCREAMING_SNAKE_CASE = partition_df.collect()
SCREAMING_SNAKE_CASE = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self , a , a=None , ) -> Tuple:
SCREAMING_SNAKE_CASE = df
SCREAMING_SNAKE_CASE = partition_order or range(self.df.rdd.getNumPartitions())
SCREAMING_SNAKE_CASE = _generate_iterable_examples(self.df , self.partition_order)
def __iter__( self) -> Dict:
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE__ ( self , a) -> "SparkExamplesIterable":
SCREAMING_SNAKE_CASE = list(range(self.df.rdd.getNumPartitions()))
generator.shuffle(a)
return SparkExamplesIterable(self.df , partition_order=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> "SparkExamplesIterable":
SCREAMING_SNAKE_CASE = self.split_shard_indices_by_worker(a , a)
return SparkExamplesIterable(self.df , partition_order=a)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return len(self.partition_order)
class _snake_case ( datasets.DatasetBuilder ):
_lowercase : int = SparkConfig
def __init__( self , a , a = None , a = None , **a , ) -> List[str]:
import pyspark
SCREAMING_SNAKE_CASE = pyspark.sql.SparkSession.builder.getOrCreate()
SCREAMING_SNAKE_CASE = df
SCREAMING_SNAKE_CASE = working_dir
super().__init__(
cache_dir=a , config_name=str(self.df.semanticHash()) , **a , )
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
# Returns the path of the created file.
def create_cache_and_write_probe(a):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a)
SCREAMING_SNAKE_CASE = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex)
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a , 'a')
return [probe_file]
if self._spark.conf.get('spark.master' , '').startswith('local'):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
SCREAMING_SNAKE_CASE = (
self._spark.sparkContext.parallelize(range(1) , 1).mapPartitions(a).collect()
)
if os.path.isfile(probe[0]):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir')
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[int]:
import pyspark
def get_arrow_batch_size(a):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]})
SCREAMING_SNAKE_CASE = self.df.count()
SCREAMING_SNAKE_CASE = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
SCREAMING_SNAKE_CASE = (
self.df.limit(a)
.repartition(1)
.mapInArrow(a , 'batch_bytes: long')
.agg(pyspark.sql.functions.sum('batch_bytes').alias('sample_bytes'))
.collect()[0]
.sample_bytes
/ sample_num_rows
)
SCREAMING_SNAKE_CASE = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
SCREAMING_SNAKE_CASE = min(a , int(approx_total_size / max_shard_size))
SCREAMING_SNAKE_CASE = self.df.repartition(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
SCREAMING_SNAKE_CASE = ParquetWriter if file_format == 'parquet' else ArrowWriter
SCREAMING_SNAKE_CASE = os.path.join(self._working_dir , os.path.basename(a)) if self._working_dir else fpath
SCREAMING_SNAKE_CASE = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
SCREAMING_SNAKE_CASE = self.config.features
SCREAMING_SNAKE_CASE = self._writer_batch_size
SCREAMING_SNAKE_CASE = self._fs.storage_options
def write_arrow(a):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
SCREAMING_SNAKE_CASE = pyspark.TaskContext().taskAttemptId()
SCREAMING_SNAKE_CASE = next(a , a)
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = writer_class(
features=a , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , writer_batch_size=a , storage_options=a , embed_local_files=a , )
SCREAMING_SNAKE_CASE = pa.Table.from_batches([first_batch])
writer.write_table(a)
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
SCREAMING_SNAKE_CASE = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , writer_batch_size=a , storage_options=a , embed_local_files=a , )
SCREAMING_SNAKE_CASE = pa.Table.from_batches([batch])
writer.write_table(a)
if writer._num_bytes > 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a)):
SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(a) , os.path.basename(a))
shutil.move(a , a)
SCREAMING_SNAKE_CASE = (
self.df.mapInArrow(a , 'task_id: long, num_examples: long, num_bytes: long')
.groupBy('task_id')
.agg(
pyspark.sql.functions.sum('num_examples').alias('total_num_examples') , pyspark.sql.functions.sum('num_bytes').alias('total_num_bytes') , pyspark.sql.functions.count('num_bytes').alias('num_shards') , pyspark.sql.functions.collect_list('num_examples').alias('shard_lengths') , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE__ ( self , a , a = "arrow" , a = None , a = None , **a , ) -> List[Any]:
self._validate_cache_dir()
SCREAMING_SNAKE_CASE = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE)
self._repartition_df_if_needed(a)
SCREAMING_SNAKE_CASE = not is_remote_filesystem(self._fs)
SCREAMING_SNAKE_CASE = os.path.join if is_local else posixpath.join
SCREAMING_SNAKE_CASE = '-TTTTT-SSSSS-of-NNNNN'
SCREAMING_SNAKE_CASE = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
SCREAMING_SNAKE_CASE = path_join(self._output_dir , a)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for task_id, content in self._prepare_split_single(a , a , a):
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards))
all_shard_lengths.extend(a)
SCREAMING_SNAKE_CASE = total_num_examples
SCREAMING_SNAKE_CASE = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''')
if total_shards > 1:
SCREAMING_SNAKE_CASE = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
SCREAMING_SNAKE_CASE = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a , a , a , ):
rename(
a , fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''').replace('NNNNN' , f'''{total_shards:05d}''') , )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
for i in range(len(a)):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = task_id_and_num_shards[i]
for shard_id in range(a):
args.append([task_id, shard_id, global_shard_id])
global_shard_id += 1
self._spark.sparkContext.parallelize(a , len(a)).map(lambda a: _rename_shard(*a)).collect()
else:
# don't use any pattern
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'''{shard_id:05d}''').replace('TTTTT' , f'''{task_id:05d}''') , fpath.replace(a , '') , )
def SCREAMING_SNAKE_CASE__ ( self , a , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df)
| 137 | 1 |
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = len(a__ )
__a = len(a__ )
__a = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__a = []
for char_count in range(a__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(a__ )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ') | 33 |
import functools
def __lowerCAmelCase ( a__ , a__ ) -> int:
__a = len(a__ )
__a = len(a__ )
@functools.cache
def min_distance(a__ , a__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__a = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a__ ) , 1 + min_distance(a__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 33 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if isinstance(__a , __a ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
lowerCAmelCase : Optional[int] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
lowerCAmelCase : List[str] = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
lowerCAmelCase : str = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
lowerCAmelCase : Any = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
lowerCAmelCase : Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
lowerCAmelCase : int = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
lowerCAmelCase : Union[str, Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
lowerCAmelCase : Union[str, Any] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
lowerCAmelCase : str = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
lowerCAmelCase : Optional[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
lowerCAmelCase : List[str] = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
lowerCAmelCase : List[str] = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
lowerCAmelCase : Dict = checkpoint[f"""{old_prefix}.norm.weight"""]
lowerCAmelCase : str = checkpoint[f"""{old_prefix}.norm.bias"""]
lowerCAmelCase : List[Any] = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : Union[str, Any] = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : Tuple = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : Optional[int] = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : List[str] = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : Optional[int] = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
lowerCAmelCase : Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = torch.load(__a , map_location="cpu" )
lowerCAmelCase : List[str] = {}
lowerCAmelCase : Any = checkpoint['time_embed.0.weight']
lowerCAmelCase : Any = checkpoint['time_embed.0.bias']
lowerCAmelCase : List[Any] = checkpoint['time_embed.2.weight']
lowerCAmelCase : Any = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
lowerCAmelCase : List[Any] = checkpoint['label_emb.weight']
lowerCAmelCase : Any = checkpoint['input_blocks.0.0.weight']
lowerCAmelCase : Optional[Any] = checkpoint['input_blocks.0.0.bias']
lowerCAmelCase : List[Any] = unet_config['down_block_types']
lowerCAmelCase : int = unet_config['layers_per_block']
lowerCAmelCase : Union[str, Any] = unet_config['attention_head_dim']
lowerCAmelCase : Union[str, Any] = unet_config['block_out_channels']
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : List[str] = channels_list[0]
for i, layer_type in enumerate(__a ):
lowerCAmelCase : Optional[Any] = channels_list[i]
lowerCAmelCase : List[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__a ):
lowerCAmelCase : List[Any] = f"""down_blocks.{i}.resnets.{j}"""
lowerCAmelCase : Tuple = f"""input_blocks.{current_layer}.0"""
lowerCAmelCase : List[Any] = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase : List[Any] = convert_resnet(__a , __a , __a , __a , has_skip=__a )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__a ):
lowerCAmelCase : Optional[Any] = f"""down_blocks.{i}.resnets.{j}"""
lowerCAmelCase : List[Any] = f"""input_blocks.{current_layer}.0"""
lowerCAmelCase : Union[str, Any] = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase : str = convert_resnet(__a , __a , __a , __a , has_skip=__a )
lowerCAmelCase : Tuple = f"""down_blocks.{i}.attentions.{j}"""
lowerCAmelCase : Optional[int] = f"""input_blocks.{current_layer}.1"""
lowerCAmelCase : Dict = convert_attention(
__a , __a , __a , __a , __a )
current_layer += 1
if i != len(__a ) - 1:
lowerCAmelCase : Tuple = f"""down_blocks.{i}.downsamplers.0"""
lowerCAmelCase : str = f"""input_blocks.{current_layer}.0"""
lowerCAmelCase : List[Any] = convert_resnet(__a , __a , __a , __a )
current_layer += 1
lowerCAmelCase : List[Any] = current_channels
# hardcoded the mid-block for now
lowerCAmelCase : Any = 'mid_block.resnets.0'
lowerCAmelCase : Optional[Any] = 'middle_block.0'
lowerCAmelCase : Optional[Any] = convert_resnet(__a , __a , __a , __a )
lowerCAmelCase : Union[str, Any] = 'mid_block.attentions.0'
lowerCAmelCase : Any = 'middle_block.1'
lowerCAmelCase : Tuple = convert_attention(__a , __a , __a , __a , __a )
lowerCAmelCase : Optional[int] = 'mid_block.resnets.1'
lowerCAmelCase : Dict = 'middle_block.2'
lowerCAmelCase : Optional[Any] = convert_resnet(__a , __a , __a , __a )
lowerCAmelCase : str = 0
lowerCAmelCase : str = unet_config['up_block_types']
for i, layer_type in enumerate(__a ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase : List[str] = f"""up_blocks.{i}.resnets.{j}"""
lowerCAmelCase : str = f"""output_blocks.{current_layer}.0"""
lowerCAmelCase : Dict = convert_resnet(__a , __a , __a , __a , has_skip=__a )
current_layer += 1
if i != len(__a ) - 1:
lowerCAmelCase : Dict = f"""up_blocks.{i}.upsamplers.0"""
lowerCAmelCase : Union[str, Any] = f"""output_blocks.{current_layer-1}.1"""
lowerCAmelCase : Dict = convert_resnet(__a , __a , __a , __a )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase : Union[str, Any] = f"""up_blocks.{i}.resnets.{j}"""
lowerCAmelCase : int = f"""output_blocks.{current_layer}.0"""
lowerCAmelCase : List[str] = convert_resnet(__a , __a , __a , __a , has_skip=__a )
lowerCAmelCase : Union[str, Any] = f"""up_blocks.{i}.attentions.{j}"""
lowerCAmelCase : int = f"""output_blocks.{current_layer}.1"""
lowerCAmelCase : List[Any] = convert_attention(
__a , __a , __a , __a , __a )
current_layer += 1
if i != len(__a ) - 1:
lowerCAmelCase : str = f"""up_blocks.{i}.upsamplers.0"""
lowerCAmelCase : int = f"""output_blocks.{current_layer-1}.2"""
lowerCAmelCase : List[str] = convert_resnet(__a , __a , __a , __a )
lowerCAmelCase : List[str] = checkpoint['out.0.weight']
lowerCAmelCase : int = checkpoint['out.0.bias']
lowerCAmelCase : Optional[int] = checkpoint['out.2.weight']
lowerCAmelCase : Optional[int] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(F"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.")
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 108 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCAmelCase = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = 'https://pypi.org/pypi/diffusers/json'
_a : int = json.loads(request.urlopen(__a ).read() )['releases'].keys()
return sorted(__a , key=lambda __a : version.Version(__a ) )
def UpperCAmelCase_ ():
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__a )
os.makedirs(__a , exist_ok=__a )
_a : str = Path(__a ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
_a : Dict = Path(__a ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__a , exist_ok=__a )
_a : Optional[int] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : int = f.read()
# Imports of the form `import .xxx`
_a : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , __a , flags=re.MULTILINE )
# Unique-ify
return list(set(__a ) )
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
_a : Optional[int] = False
_a : Optional[int] = [module_file]
_a : List[str] = []
# Let's recurse through all relative imports
while not no_change:
_a : str = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__a ) )
_a : Union[str, Any] = Path(__a ).parent
_a : str = [str(module_path / m ) for m in new_imports]
_a : Tuple = [f for f in new_import_files if f not in all_relative_imports]
_a : Dict = [f"""{f}.py""" for f in new_import_files]
_a : List[str] = len(__a ) == 0
all_relative_imports.extend(__a )
return all_relative_imports
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : Dict = f.read()
# Imports of the form `import xxx`
_a : Optional[int] = re.findall('^\s*import\s+(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , __a , flags=re.MULTILINE )
# Only keep the top-level module
_a : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
_a : Optional[int] = list(set(__a ) )
_a : List[str] = []
for imp in imports:
try:
importlib.import_module(__a )
except ImportError:
missing_packages.append(__a )
if len(__a ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f"""{', '.join(__a )}. Run `pip install {' '.join(__a )}`""" )
return get_relative_imports(__a )
def UpperCAmelCase_ (__a : Any , __a : str ):
"""simple docstring"""
_a : Any = module_path.replace(os.path.sep , '.' )
_a : Union[str, Any] = importlib.import_module(__a )
if class_name is None:
return find_pipeline_class(__a )
return getattr(__a , __a )
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_a : List[str] = dict(inspect.getmembers(__a , inspect.isclass ) )
_a : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __a )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
_a : Any = cls
return pipeline_class
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , ):
"""simple docstring"""
_a : str = str(__a )
_a : Optional[Any] = os.path.join(__a , __a )
if os.path.isfile(__a ):
_a : Tuple = module_file_or_url
_a : Optional[Any] = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
_a : int = get_diffusers_versions()
# cut ".dev0"
_a : Any = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
_a : Any = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
_a : Any = f"""v{revision}"""
elif revision == "main":
_a : Optional[int] = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
_a : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__a , pipeline=__a )
try:
_a : Any = cached_download(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = 'git'
_a : Any = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
_a : Optional[Any] = hf_hub_download(
__a , __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
_a : Optional[int] = check_imports(__a )
# Now we move the module inside our cached dynamic modules.
_a : Optional[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__a )
_a : Any = Path(__a ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__a , submodule_path / module_file )
for module_needed in modules_needed:
_a : Dict = f"""{module_needed}.py"""
shutil.copy(os.path.join(__a , __a ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__a , __a ):
_a : Optional[Any] = use_auth_token
elif use_auth_token is True:
_a : List[Any] = HfFolder.get_token()
else:
_a : Dict = None
_a : int = model_info(__a , revision=__a , token=__a ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_a : Optional[int] = submodule_path / commit_hash
_a : str = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__a )
if not (submodule_path / module_file).exists():
shutil.copy(__a , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__a , f"""{module_needed}.py""" , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return os.path.join(__a , __a )
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[str] = None , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , **__a : str , ):
"""simple docstring"""
_a : Dict = get_cached_module_file(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return get_class_in_module(__a , final_module.replace('.py' , '' ) )
| 271 | 0 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__A = logging.get_logger(__name__)
class _snake_case ( a__ ):
snake_case__ = "vision-encoder-decoder"
snake_case__ = True
def __init__( self : Dict , **UpperCAmelCase : Dict ):
super().__init__(**UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
__lowerCamelCase : Dict = kwargs.pop("encoder" )
__lowerCamelCase : Any = encoder_config.pop("model_type" )
__lowerCamelCase : str = kwargs.pop("decoder" )
__lowerCamelCase : Optional[Any] = decoder_config.pop("model_type" )
__lowerCamelCase : Tuple = AutoConfig.for_model(UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Tuple = AutoConfig.for_model(UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Dict = True
@classmethod
def lowerCamelCase__ ( cls : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[int] ):
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
__lowerCamelCase : Tuple = True
__lowerCamelCase : Optional[int] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : int = copy.deepcopy(self.__dict__ )
__lowerCamelCase : List[Any] = self.encoder.to_dict()
__lowerCamelCase : Union[str, Any] = self.decoder.to_dict()
__lowerCamelCase : Dict = self.__class__.model_type
return output
class _snake_case ( a__ ):
snake_case__ = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : Optional[int] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase__ ( self : List[str] ):
return 1E-4
@property
def lowerCamelCase__ ( self : str ):
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class _snake_case ( a__ ):
@property
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Optional[int] = OrderedDict()
__lowerCamelCase : int = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
__lowerCamelCase : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
__lowerCamelCase : Any = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple = -1 , UpperCAmelCase : Union[str, Any] = -1 , UpperCAmelCase : int = False , UpperCAmelCase : List[Any] = None , ):
import torch
__lowerCamelCase : str = OrderedDict()
__lowerCamelCase : Union[str, Any] = super().generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
__lowerCamelCase : Optional[int] = dummy_input['input_ids'].shape
__lowerCamelCase : Optional[Any] = (batch, encoder_sequence, self._config.encoder_hidden_size)
__lowerCamelCase : Optional[int] = dummy_input.pop("input_ids" )
__lowerCamelCase : str = dummy_input.pop("attention_mask" )
__lowerCamelCase : int = torch.zeros(UpperCAmelCase )
return common_inputs
class _snake_case ( a__ ):
@property
def lowerCamelCase__ ( self : Tuple ):
pass
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Tuple ):
return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Any = "default" ):
__lowerCamelCase : str = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase , UpperCAmelCase ) | 362 | """simple docstring"""
def lowercase_ ( _lowerCamelCase: int = 4000000 ) -> int:
'''simple docstring'''
__lowerCamelCase : Tuple = [0, 1]
__lowerCamelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__lowerCamelCase : Tuple = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"""{solution() = }""") | 64 | 0 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
UpperCamelCase__ = MaskFormerConfig(backbone_config=__a )
UpperCamelCase__ = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
UpperCamelCase__ = 847
UpperCamelCase__ = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
UpperCamelCase__ = 150
UpperCamelCase__ = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
UpperCamelCase__ = 171
UpperCamelCase__ = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
UpperCamelCase__ = 133
UpperCamelCase__ = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
UpperCamelCase__ = 19
UpperCamelCase__ = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
UpperCamelCase__ = 65
UpperCamelCase__ = """mapillary-vistas-id2label.json"""
UpperCamelCase__ = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase__ = {int(__a ): v for k, v in idalabel.items()}
return config
def __magic_name__ ( __a : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.layers.{i}.downsample.reduction.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.layers.{i}.downsample.norm.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.layers.{i}.downsample.norm.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"sem_seg_head.adapter_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", f"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", f"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", f"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", f"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.weight", f"mask_embedder.{i}.0.weight") )
rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.bias", f"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def __magic_name__ ( __a : Any , __a : List[str] , __a : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ = dct.pop(__a )
UpperCamelCase__ = val
def __magic_name__ ( __a : Union[str, Any] , __a : List[Any] ):
'''simple docstring'''
UpperCamelCase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
UpperCamelCase__ = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[:dim, :]
UpperCamelCase__ = in_proj_bias[: dim]
UpperCamelCase__ = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase__ = in_proj_bias[
dim : dim * 2
]
UpperCamelCase__ = in_proj_weight[
-dim :, :
]
UpperCamelCase__ = in_proj_bias[-dim :]
# fmt: on
def __magic_name__ ( __a : Tuple , __a : int ):
'''simple docstring'''
UpperCamelCase__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
UpperCamelCase__ = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[: hidden_size, :]
UpperCamelCase__ = in_proj_bias[:config.hidden_size]
UpperCamelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase__ = in_proj_weight[-hidden_size :, :]
UpperCamelCase__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
UpperCamelCase__ = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[: hidden_size, :]
UpperCamelCase__ = in_proj_bias[:config.hidden_size]
UpperCamelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase__ = in_proj_weight[-hidden_size :, :]
UpperCamelCase__ = in_proj_bias[-hidden_size :]
# fmt: on
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase__ = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __a : str , __a : Optional[Any] , __a : Any , __a : Tuple = False ):
'''simple docstring'''
UpperCamelCase__ = get_maskformer_config(__a )
# load original state_dict
with open(__a , """rb""" ) as f:
UpperCamelCase__ = pickle.load(__a )
UpperCamelCase__ = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCamelCase__ = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_swin_q_k_v(__a , config.backbone_config )
read_in_decoder_q_k_v(__a , __a )
# update to torch tensors
for key, value in state_dict.items():
UpperCamelCase__ = torch.from_numpy(__a )
# load 🤗 model
UpperCamelCase__ = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a , param.shape )
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(__a , strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, f"Unexpected keys: {unexpected_keys}"
# verify results
UpperCamelCase__ = prepare_img()
if "vistas" in model_name:
UpperCamelCase__ = 65
elif "cityscapes" in model_name:
UpperCamelCase__ = 65_535
else:
UpperCamelCase__ = 255
UpperCamelCase__ = True if """ade""" in model_name else False
UpperCamelCase__ = MaskFormerImageProcessor(ignore_index=__a , reduce_labels=__a )
UpperCamelCase__ = image_processor(__a , return_tensors="""pt""" )
UpperCamelCase__ = model(**__a )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCamelCase__ = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __a , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f"nielsr/{model_name}" )
image_processor.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 244 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCAmelCase__ : str = KandinskyImgaImgPipeline
UpperCAmelCase__ : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
UpperCAmelCase__ : Union[str, Any] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
UpperCAmelCase__ : Union[str, Any] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
UpperCAmelCase__ : Any = False
@property
def lowercase_ ( self :Tuple ) -> Any:
'''simple docstring'''
return 32
@property
def lowercase_ ( self :Optional[int] ) -> str:
'''simple docstring'''
return 32
@property
def lowercase_ ( self :Optional[Any] ) -> str:
'''simple docstring'''
return self.time_input_dim
@property
def lowercase_ ( self :Optional[Any] ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def lowercase_ ( self :Tuple ) -> Tuple:
'''simple docstring'''
__A = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def lowercase_ ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__A = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
__A = MultilingualCLIP(_A )
__A = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__A = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__A = UNetaDConditionModel(**_A )
return model
@property
def lowercase_ ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__A = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self :List[str] ) -> str:
'''simple docstring'''
__A = self.dummy_text_encoder
__A = self.dummy_tokenizer
__A = self.dummy_unet
__A = self.dummy_movq
__A = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
__A = DDIMScheduler(**_A )
__A = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase_ ( self :Dict , _A :Union[str, Any] , _A :Optional[int]=0 ) -> str:
'''simple docstring'''
__A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A )
__A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A )
# create init_image
__A = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
__A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((256, 256) )
if str(_A ).startswith('mps' ):
__A = torch.manual_seed(_A )
else:
__A = torch.Generator(device=_A ).manual_seed(_A )
__A = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def lowercase_ ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__A = 'cpu'
__A = self.get_dummy_components()
__A = self.pipeline_class(**_A )
__A = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A = pipe(**self.get_dummy_inputs(_A ) )
__A = output.images
__A = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__A = image[0, -3:, -3:, -1]
__A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__A = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self :Dict ) -> Optional[int]:
'''simple docstring'''
__A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
__A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__A = 'A red cartoon frog, 4k'
__A = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
__A = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
__A = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
__A = torch.Generator(device='cpu' ).manual_seed(0 )
__A , __A = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__A = pipeline(
_A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
__A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
| 161 | 0 |
"""simple docstring"""
import argparse
import os
import re
lowercase_ = "src/transformers"
# Pattern that looks at the indentation in a line.
lowercase_ = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase_ = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase_ = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase_ = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase_ = re.compile(r"\[([^\]]+)\]")
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
__a = _re_indent.search(lowerCAmelCase__ )
return "" if search is None else search.groups()[0]
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any="" , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Union[str, Any]=None ) -> Optional[Any]:
__a = 0
__a = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase__ ):
index += 1
__a = ['''\n'''.join(lines[:index] )]
else:
__a = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__a = [lines[index]]
index += 1
while index < len(lowerCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCAmelCase__ ) )
if index < len(lowerCAmelCase__ ) - 1:
__a = [lines[index + 1]]
index += 1
else:
__a = []
else:
blocks.append('''\n'''.join(lowerCAmelCase__ ) )
__a = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase__ ) > 0:
blocks.append('''\n'''.join(lowerCAmelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def lowercase ( lowerCAmelCase__ : str ) -> Dict:
def _inner(lowerCAmelCase__ : List[str] ):
return key(lowerCAmelCase__ ).lower().replace('''_''' , '''''' )
return _inner
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str=None ) -> int:
# If no key is provided, we use a noop.
def noop(lowerCAmelCase__ : int ):
return x
if key is None:
__a = noop
# Constants are all uppercase, they go first.
__a = [obj for obj in objects if key(lowerCAmelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__a = [obj for obj in objects if key(lowerCAmelCase__ )[0].isupper() and not key(lowerCAmelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
__a = [obj for obj in objects if not key(lowerCAmelCase__ )[0].isupper()]
__a = ignore_underscore(lowerCAmelCase__ )
return sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : List[str] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(lowerCAmelCase__ : Union[str, Any] ):
__a = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
__a = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__a = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(lowerCAmelCase__ )] ) + "]"
__a = import_statement.split('''\n''' )
if len(lowerCAmelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__a = 2 if lines[1].strip() == '''[''' else 1
__a = [(i, _re_strip_line.search(lowerCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__a = sort_objects(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] )
__a = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCAmelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__a = _re_bracket_content.sub(_replace , lines[1] )
else:
__a = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__a = keys[:-1]
__a = get_indent(lines[1] ) + ''', '''.join([f'''"{k}"''' for k in sort_objects(lowerCAmelCase__ )] )
return "\n".join(lowerCAmelCase__ )
else:
# Finally we have to deal with imports fitting on one line
__a = _re_bracket_content.sub(_replace , lowerCAmelCase__ )
return import_statement
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple=True ) -> str:
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
__a = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__a = split_code_in_indented_blocks(
lowerCAmelCase__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCAmelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__a = main_blocks[block_idx]
__a = block.split('''\n''' )
# Get to the start of the imports.
__a = 0
while line_idx < len(lowerCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__a = len(lowerCAmelCase__ )
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__a = '''\n'''.join(block_lines[line_idx:-1] )
__a = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__a = split_code_in_indented_blocks(lowerCAmelCase__ , indent_level=lowerCAmelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__a = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__a = [(pattern.search(lowerCAmelCase__ ).groups()[0] if pattern.search(lowerCAmelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__a = [(i, key) for i, key in enumerate(lowerCAmelCase__ ) if key is not None]
__a = [x[0] for x in sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__a = 0
__a = []
for i in range(len(lowerCAmelCase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__a = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCAmelCase__ )
count += 1
# And we put our main block back together with its first and last line.
__a = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCAmelCase__ ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCAmelCase__ ) )
def lowercase ( lowerCAmelCase__ : Optional[int]=True ) -> Optional[int]:
__a = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
__a = sort_imports(os.path.join(lowerCAmelCase__ , '''__init__.py''' ) , check_only=lowerCAmelCase__ )
if result:
__a = [os.path.join(lowerCAmelCase__ , '''__init__.py''' )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(f'''Would overwrite {len(lowerCAmelCase__ )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowercase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 357 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a , _a ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def __UpperCAmelCase ( self ):
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __UpperCAmelCase ( self ):
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices('''CPU''' )
if len(_a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type='''CPU''' )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a , __a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=_a )
def accumulate_on_replica(_a ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_a , _a ):
with strategy.scope():
__a = strategy.experimental_local_results(_a )
local_variables[0].assign(_a )
local_variables[1].assign(_a )
strategy.run(_a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_a )
def _check_local_values(_a , _a ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _a , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _a , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 11 | 0 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE = 1_000 ) -> int:
snake_case_ = -1
snake_case_ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
snake_case_ = (n * n - 2 * a * n) // (2 * n - 2 * a)
snake_case_ = n - a - b
if c * c == (a * a + b * b):
snake_case_ = a * b * c
if candidate >= product:
snake_case_ = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347 | """simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowerCamelCase = logging.get_logger(__name__)
# General docstring
__lowerCamelCase = "ResNetConfig"
# Base docstring
__lowerCamelCase = "microsoft/resnet-50"
__lowerCamelCase = [1, 20_48, 7, 7]
# Image classification docstring
__lowerCamelCase = "microsoft/resnet-50"
__lowerCamelCase = "tiger cat"
__lowerCamelCase = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 3 ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ) -> Any:
super().__init__()
A__ = nn.Convad(
__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=__UpperCAmelCase ,stride=__UpperCAmelCase ,padding=kernel_size // 2 ,bias=__UpperCAmelCase )
A__ = nn.BatchNormad(__UpperCAmelCase )
A__ = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case__ ( self ,__UpperCAmelCase ) -> Tensor:
A__ = self.convolution(__UpperCAmelCase )
A__ = self.normalization(__UpperCAmelCase )
A__ = self.activation(__UpperCAmelCase )
return hidden_state
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ) -> Any:
super().__init__()
A__ = ResNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act )
A__ = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 )
A__ = config.num_channels
def snake_case__ ( self ,__UpperCAmelCase ) -> Tensor:
A__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
A__ = self.embedder(__UpperCAmelCase )
A__ = self.pooler(__UpperCAmelCase )
return embedding
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 2 ) -> Optional[Any]:
super().__init__()
A__ = nn.Convad(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ,stride=__UpperCAmelCase ,bias=__UpperCAmelCase )
A__ = nn.BatchNormad(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ) -> Tensor:
A__ = self.convolution(__UpperCAmelCase )
A__ = self.normalization(__UpperCAmelCase )
return hidden_state
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ) -> int:
super().__init__()
A__ = in_channels != out_channels or stride != 1
A__ = (
ResNetShortCut(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
A__ = nn.Sequential(
ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,activation=__UpperCAmelCase ) ,)
A__ = ACTaFN[activation]
def snake_case__ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = hidden_state
A__ = self.layer(__UpperCAmelCase )
A__ = self.shortcut(__UpperCAmelCase )
hidden_state += residual
A__ = self.activation(__UpperCAmelCase )
return hidden_state
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ,__UpperCAmelCase = 4 ) -> int:
super().__init__()
A__ = in_channels != out_channels or stride != 1
A__ = out_channels // reduction
A__ = (
ResNetShortCut(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
A__ = nn.Sequential(
ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ,activation=__UpperCAmelCase ) ,)
A__ = ACTaFN[activation]
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[Any]:
A__ = hidden_state
A__ = self.layer(__UpperCAmelCase )
A__ = self.shortcut(__UpperCAmelCase )
hidden_state += residual
A__ = self.activation(__UpperCAmelCase )
return hidden_state
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 2 ,__UpperCAmelCase = 2 ,) -> Any:
super().__init__()
A__ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
A__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ,activation=config.hidden_act ) ,*[layer(__UpperCAmelCase ,__UpperCAmelCase ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,)
def snake_case__ ( self ,__UpperCAmelCase ) -> Tensor:
A__ = input
for layer in self.layers:
A__ = layer(__UpperCAmelCase )
return hidden_state
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ) -> Optional[Any]:
super().__init__()
A__ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__UpperCAmelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
A__ = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__UpperCAmelCase ,config.depths[1:] ):
self.stages.append(ResNetStage(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,depth=__UpperCAmelCase ) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ,__UpperCAmelCase = True ) -> BaseModelOutputWithNoAttention:
A__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A__ = hidden_states + (hidden_state,)
A__ = stage_module(__UpperCAmelCase )
if output_hidden_states:
A__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__UpperCAmelCase ,hidden_states=__UpperCAmelCase ,)
class UpperCamelCase__( __A ):
lowerCAmelCase__ : str = ResNetConfig
lowerCAmelCase__ : str = 'resnet'
lowerCAmelCase__ : int = 'pixel_values'
lowerCAmelCase__ : Any = True
def snake_case__ ( self ,__UpperCAmelCase ) -> List[Any]:
if isinstance(__UpperCAmelCase ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='fan_out' ,nonlinearity='relu' )
elif isinstance(__UpperCAmelCase ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Any:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
A__ = value
__lowerCamelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__lowerCamelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , __A , )
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ) -> Union[str, Any]:
super().__init__(__UpperCAmelCase )
A__ = config
A__ = ResNetEmbeddings(__UpperCAmelCase )
A__ = ResNetEncoder(__UpperCAmelCase )
A__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ) -> BaseModelOutputWithPoolingAndNoAttention:
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.embedder(__UpperCAmelCase )
A__ = self.encoder(
__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase )
A__ = encoder_outputs[0]
A__ = self.pooler(__UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCAmelCase ,pooler_output=__UpperCAmelCase ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __A , )
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ) -> Tuple:
super().__init__(__UpperCAmelCase )
A__ = config.num_labels
A__ = ResNetModel(__UpperCAmelCase )
# classification head
A__ = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def snake_case__ ( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,) -> ImageClassifierOutputWithNoAttention:
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.resnet(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(__UpperCAmelCase )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = 'single_label_classification'
else:
A__ = 'multi_label_classification'
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
A__ = loss_fct(__UpperCAmelCase ,__UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(__UpperCAmelCase ,__UpperCAmelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase ,logits=__UpperCAmelCase ,hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __A , )
class UpperCamelCase__( __A , __A ):
def __init__( self ,__UpperCAmelCase ) -> Optional[Any]:
super().__init__(__UpperCAmelCase )
super()._init_backbone(__UpperCAmelCase )
A__ = [config.embedding_size] + config.hidden_sizes
A__ = ResNetEmbeddings(__UpperCAmelCase )
A__ = ResNetEncoder(__UpperCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@replace_return_docstrings(output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ) -> BackboneOutput:
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = self.embedder(__UpperCAmelCase )
A__ = self.encoder(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase )
A__ = outputs.hidden_states
A__ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
A__ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__UpperCAmelCase ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=__UpperCAmelCase ,)
| 221 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = ["model.decoder.embed_positions.weights"]
def lowercase__( __UpperCamelCase: Tuple ):
"""simple docstring"""
if "emb" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace('emb' ,'model.decoder.embed_tokens' )
if "transformer" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('transformer' ,'model.decoder' )
if "cross_attention" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('cross_attention' ,'encoder_attn' )
if "linear1" in name:
SCREAMING_SNAKE_CASE : Any = name.replace('linear1' ,'fc1' )
if "linear2" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('linear2' ,'fc2' )
if "norm1" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('norm1' ,'self_attn_layer_norm' )
if "norm_cross" in name:
SCREAMING_SNAKE_CASE : int = name.replace('norm_cross' ,'encoder_attn_layer_norm' )
if "norm2" in name:
SCREAMING_SNAKE_CASE : str = name.replace('norm2' ,'final_layer_norm' )
if "out_norm" in name:
SCREAMING_SNAKE_CASE : int = name.replace('out_norm' ,'model.decoder.layer_norm' )
if "linears" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('linears' ,'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('condition_provider.conditioners.description.output_proj' ,'enc_to_dec_proj' )
return name
def lowercase__( __UpperCamelCase: OrderedDict ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = list(state_dict.keys() )
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for key in keys:
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = rename_keys(__UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
SCREAMING_SNAKE_CASE : Optional[Any] = val[:hidden_size, :]
SCREAMING_SNAKE_CASE : Optional[int] = val[hidden_size : 2 * hidden_size, :]
SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = val
else:
SCREAMING_SNAKE_CASE : Optional[Any] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
if checkpoint == "small":
# default config values
SCREAMING_SNAKE_CASE : Tuple = 10_24
SCREAMING_SNAKE_CASE : List[str] = 24
SCREAMING_SNAKE_CASE : Optional[Any] = 16
elif checkpoint == "medium":
SCREAMING_SNAKE_CASE : int = 15_36
SCREAMING_SNAKE_CASE : str = 48
SCREAMING_SNAKE_CASE : int = 24
elif checkpoint == "large":
SCREAMING_SNAKE_CASE : Any = 20_48
SCREAMING_SNAKE_CASE : int = 48
SCREAMING_SNAKE_CASE : Optional[int] = 32
else:
raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
SCREAMING_SNAKE_CASE : Dict = MusicgenDecoderConfig(
hidden_size=__UpperCamelCase ,ffn_dim=hidden_size * 4 ,num_hidden_layers=__UpperCamelCase ,num_attention_heads=__UpperCamelCase ,)
return config
@torch.no_grad()
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Any=None ,__UpperCamelCase: int=None ,__UpperCamelCase: int="cpu" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = MusicGen.get_pretrained(__UpperCamelCase ,device=__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = rename_state_dict(
__UpperCamelCase ,hidden_size=decoder_config.hidden_size )
SCREAMING_SNAKE_CASE : str = TaEncoderModel.from_pretrained('t5-base' )
SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained('facebook/encodec_32khz' )
SCREAMING_SNAKE_CASE : Optional[int] = MusicgenForCausalLM(__UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = decoder.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" )
if len(__UpperCamelCase ) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
SCREAMING_SNAKE_CASE : List[Any] = MusicgenForConditionalGeneration(text_encoder=__UpperCamelCase ,audio_encoder=__UpperCamelCase ,decoder=__UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__UpperCamelCase )
# check we can do a forward pass
SCREAMING_SNAKE_CASE : Any = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__UpperCamelCase ,decoder_input_ids=__UpperCamelCase ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained('t5-base' )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' ,padding_side='left' )
SCREAMING_SNAKE_CASE : List[str] = MusicgenProcessor(feature_extractor=__UpperCamelCase ,tokenizer=__UpperCamelCase )
# set the appropriate bos/pad token ids
SCREAMING_SNAKE_CASE : Any = 20_48
SCREAMING_SNAKE_CASE : str = 20_48
# set other default generation config params
SCREAMING_SNAKE_CASE : int = int(30 * audio_encoder.config.frame_rate )
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : str = 3.0
if pytorch_dump_folder is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__UpperCamelCase )
processor.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
UpperCamelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 246 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCamelCase_ = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCamelCase_ = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCamelCase_ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ), reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
], )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(A, A, sample_weight=A ) ),
}
| 246 | 1 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase_ :
def __init__( self , a , a , a , a , a , a=0.2 , a=0.2 ) -> Dict:
lowercase__ : Any = bp_numa
lowercase__ : Optional[int] = bp_numa
lowercase__ : Tuple = bp_numa
lowercase__ : Optional[Any] = conva_get[:2]
lowercase__ : Optional[int] = conva_get[2]
lowercase__ : Optional[Any] = size_pa
lowercase__ : Union[str, Any] = rate_w
lowercase__ : Union[str, Any] = rate_t
lowercase__ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__ : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__ : Any = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__ : int = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__ : int = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self , a ) -> Union[str, Any]:
# save model dict with pickle
lowercase__ : Optional[Any] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(a , 'wb' ) as f:
pickle.dump(a , a )
print(f"""Model saved: {save_path}""" )
@classmethod
def _UpperCAmelCase ( cls , a ) -> Any:
# read saved model
with open(a , 'rb' ) as f:
lowercase__ : Optional[int] = pickle.load(a ) # noqa: S301
lowercase__ : Optional[int] = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase__ : List[Any] = model_dic.get('size_pooling1' )
lowercase__ : Tuple = model_dic.get('num_bp1' )
lowercase__ : int = model_dic.get('num_bp2' )
lowercase__ : int = model_dic.get('num_bp3' )
lowercase__ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase__ : Tuple = model_dic.get('rate_thre' )
# create model instance
lowercase__ : Tuple = CNN(a , a , a , a , a , a , a )
# modify model parameter
lowercase__ : str = model_dic.get('w_conv1' )
lowercase__ : Optional[int] = model_dic.get('wkj' )
lowercase__ : Tuple = model_dic.get('vji' )
lowercase__ : str = model_dic.get('thre_conv1' )
lowercase__ : Union[str, Any] = model_dic.get('thre_bp2' )
lowercase__ : List[str] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self , a ) -> str:
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self , a ) -> Any:
return round(a , 3 )
def _UpperCAmelCase ( self , a , a , a , a , a ) -> List[str]:
# convolution process
lowercase__ : int = convs[0]
lowercase__ : Optional[Any] = convs[1]
lowercase__ : int = np.shape(a )[0]
# get the data slice of original image data, data_focus
lowercase__ : Optional[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , a ):
for j_focus in range(0 , size_data - size_conv + 1 , a ):
lowercase__ : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(a )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__ : Union[str, Any] = []
lowercase__ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(a ):
lowercase__ : Any = []
for i_focus in range(len(a ) ):
lowercase__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(a ) )
lowercase__ : Optional[Any] = np.asmatrix(a ).reshape(
a , a )
data_featuremap.append(a )
# expanding the data slice to One dimenssion
lowercase__ : str = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(a ) )
lowercase__ : int = np.asarray(a )
return focus_list, data_featuremap
def _UpperCAmelCase ( self , a , a , a="average_pool" ) -> str:
# pooling process
lowercase__ : List[str] = len(featuremaps[0] )
lowercase__ : List[str] = int(size_map / size_pooling )
lowercase__ : str = []
for i_map in range(len(a ) ):
lowercase__ : List[str] = featuremaps[i_map]
lowercase__ : Optional[int] = []
for i_focus in range(0 , a , a ):
for j_focus in range(0 , a , a ):
lowercase__ : List[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(a ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(a ) )
lowercase__ : List[Any] = np.asmatrix(a ).reshape(a , a )
featuremap_pooled.append(a )
return featuremap_pooled
def _UpperCAmelCase ( self , a ) -> List[str]:
# expanding three dimension data to one dimension list
lowercase__ : Any = []
for i in range(len(a ) ):
lowercase__ : Optional[int] = np.shape(data[i] )
lowercase__ : int = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase__ : str = data_listed.getA().tolist()[0]
data_expanded.extend(a )
lowercase__ : int = np.asarray(a )
return data_expanded
def _UpperCAmelCase ( self , a ) -> Dict:
# expanding matrix to one dimension list
lowercase__ : Dict = np.asarray(a )
lowercase__ : Union[str, Any] = np.shape(a )
lowercase__ : Optional[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self , a , a , a , a , a ) -> List[Any]:
lowercase__ : Dict = []
lowercase__ : int = 0
for i_map in range(a ):
lowercase__ : str = np.ones((size_map, size_map) )
for i in range(0 , a , a ):
for j in range(0 , a , a ):
lowercase__ : Optional[Any] = pd_pool[
i_pool
]
lowercase__ : Union[str, Any] = i_pool + 1
lowercase__ : List[Any] = np.multiply(
a , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(a )
return pd_all
def _UpperCAmelCase ( self , a , a , a , a , a , a=bool ) -> str:
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(a )) )
print((' - - Shape: Teach_Data ', np.shape(a )) )
lowercase__ : int = 0
lowercase__ : List[Any] = []
lowercase__ : Union[str, Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
lowercase__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(a ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__ : Optional[int] = np.asmatrix(datas_train[p] )
lowercase__ : int = np.asarray(datas_teach[p] )
lowercase__ , lowercase__ : Union[str, Any] = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Optional[Any] = self.pooling(a , self.size_poolinga )
lowercase__ : Tuple = np.shape(a )
lowercase__ : List[str] = self._expand(a )
lowercase__ : Optional[int] = data_bp_input
lowercase__ : Optional[Any] = np.dot(a , self.vji.T ) - self.thre_bpa
lowercase__ : str = self.sig(a )
lowercase__ : Tuple = np.dot(a , self.wkj.T ) - self.thre_bpa
lowercase__ : Any = self.sig(a )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__ : int = np.multiply(
(data_teach - bp_outa) , np.multiply(a , (1 - bp_outa) ) )
lowercase__ : Any = np.multiply(
np.dot(a , self.wkj ) , np.multiply(a , (1 - bp_outa) ) )
lowercase__ : Optional[int] = np.dot(a , self.vji )
lowercase__ : Union[str, Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__ : Any = pd_conva_pooled.T.getA().tolist()
lowercase__ : List[str] = self._calculate_gradient_from_pool(
a , a , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__ : Optional[int] = self._expand_mat(pd_conva_all[k_conv] )
lowercase__ : Tuple = self.rate_weight * np.dot(a , a )
lowercase__ : Union[str, Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__ : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__ : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__ : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__ : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__ : Dict = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__ : str = rp + 1
lowercase__ : List[str] = error_count / patterns
all_mse.append(a )
def draw_error():
lowercase__ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(a , '+-' )
plt.plot(a , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(a , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self , a ) -> List[Any]:
# model predict
lowercase__ : Optional[int] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(a )) )
for p in range(len(a ) ):
lowercase__ : List[str] = np.asmatrix(datas_test[p] )
lowercase__ , lowercase__ : Tuple = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Any = self.pooling(a , self.size_poolinga )
lowercase__ : Union[str, Any] = self._expand(a )
lowercase__ : Optional[Any] = data_bp_input
lowercase__ : str = bp_outa * self.vji.T - self.thre_bpa
lowercase__ : Optional[Any] = self.sig(a )
lowercase__ : Dict = bp_outa * self.wkj.T - self.thre_bpa
lowercase__ : List[str] = self.sig(a )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__ : Optional[int] = [list(map(self.do_round , a ) ) for each in produce_out]
return np.asarray(a )
def _UpperCAmelCase ( self , a ) -> List[str]:
# return the data of image after convoluting process so we can check it out
lowercase__ : Any = np.asmatrix(a )
lowercase__ , lowercase__ : str = self.convolute(
a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__ : Tuple = self.pooling(a , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 77 | """simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : int = FunnelConfig.from_json_file(_lowerCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowercase__ : List[Any] = FunnelBaseModel(_lowerCAmelCase ) if base_model else FunnelModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_UpperCamelCase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 77 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
a : int = 1
@register_to_config
def __init__(self ,_lowerCamelCase=2000 ,_lowerCamelCase=0.1 ,_lowerCamelCase=20 ,_lowerCamelCase=1E-3 ) -> List[str]:
'''simple docstring'''
__lowercase = None
__lowercase = None
__lowercase = None
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Dict:
'''simple docstring'''
__lowercase = torch.linspace(1 ,self.config.sampling_eps ,_lowerCamelCase ,device=_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase = std.unsqueeze(-1 )
__lowercase = -score / std
# compute
__lowercase = -1.0 / len(self.timesteps )
__lowercase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase = beta_t.unsqueeze(-1 )
__lowercase = -0.5 * beta_t * x
__lowercase = torch.sqrt(_lowerCamelCase )
__lowercase = drift - diffusion**2 * score
__lowercase = x + drift * dt
# add noise
__lowercase = randn_tensor(x.shape ,layout=x.layout ,generator=_lowerCamelCase ,device=x.device ,dtype=x.dtype )
__lowercase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__(self ) -> Optional[int]:
'''simple docstring'''
return self.config.num_train_timesteps
| 217 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _lowerCAmelCase ( ):
print('''Making key files...''' )
make_key_files('''rsa''' , 1_0_2_4 )
print('''Key files generation successful.''' )
def _lowerCAmelCase ( lowerCamelCase_ : int ):
print('''Generating prime p...''' )
__lowercase = rabinMiller.generate_large_prime(lowerCamelCase_ )
print('''Generating prime q...''' )
__lowercase = rabinMiller.generate_large_prime(lowerCamelCase_ )
__lowercase = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' )
while True:
__lowercase = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(lowerCamelCase_ , (p - 1) * (q - 1) ) == 1:
break
print('''Calculating d that is mod inverse of e...''' )
__lowercase = cryptoMath.find_mod_inverse(lowerCamelCase_ , (p - 1) * (q - 1) )
__lowercase = (n, e)
__lowercase = (n, d)
return (public_key, private_key)
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
if os.path.exists(f"{name}_pubkey.txt" ) or os.path.exists(f"{name}_privkey.txt" ):
print('''\nWARNING:''' )
print(
f"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
__lowercase , __lowercase = generate_key(lowerCamelCase_ )
print(f"\nWriting public key to file {name}_pubkey.txt..." )
with open(f"{name}_pubkey.txt" , '''w''' ) as out_file:
out_file.write(f"{key_size},{public_key[0]},{public_key[1]}" )
print(f"Writing private key to file {name}_privkey.txt..." )
with open(f"{name}_privkey.txt" , '''w''' ) as out_file:
out_file.write(f"{key_size},{private_key[0]},{private_key[1]}" )
if __name__ == "__main__":
main()
| 217 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : int=1_0_2_4 ):
a__ , a__ = [], []
a__ = list(zip(UpperCamelCase__ , UpperCamelCase__ ) )
a__ , a__ = sorted_examples[0]
def is_too_big(__lowerCAmelCase : Union[str, Any] ):
return tok(UpperCamelCase__ , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
a__ = new_src + ' ' + src
a__ = new_tgt + ' ' + tgt
if is_too_big(UpperCamelCase__ ) or is_too_big(UpperCamelCase__ ): # cant fit, finalize example
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
a__ , a__ = src, tgt
else: # can fit, keep adding
a__ , a__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
return finished_src, finished_tgt
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ):
a__ = Path(UpperCamelCase__ )
save_path.mkdir(exist_ok=UpperCamelCase__ )
for split in ["train"]:
a__ , a__ = data_dir / F'{split}.source', data_dir / F'{split}.target'
a__ = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
a__ = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
a__ , a__ = pack_examples(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print(F'packed {split} split from {len(UpperCamelCase__ )} examples -> {len(UpperCamelCase__ )}.' )
Path(save_path / F'{split}.source' ).open('w' ).write('\n'.join(UpperCamelCase__ ) )
Path(save_path / F'{split}.target' ).open('w' ).write('\n'.join(UpperCamelCase__ ) )
for split in ["val", "test"]:
a__ , a__ = data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(UpperCamelCase__ , save_path / F'{split}.source' )
shutil.copyfile(UpperCamelCase__ , save_path / F'{split}.target' )
def __lowercase ( ):
a__ = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=UpperCamelCase__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=UpperCamelCase__ , default=1_2_8 )
parser.add_argument('--data_dir' , type=UpperCamelCase__ )
parser.add_argument('--save_path' , type=UpperCamelCase__ )
a__ = parser.parse_args()
a__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 240 |
import cva
import numpy as np
class A_ :
def __init__( self , _A , _A ):
'''simple docstring'''
if k in (0.04, 0.06):
UpperCAmelCase = k
UpperCAmelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ):
'''simple docstring'''
return str(self.k )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = cva.imread(_A , 0 )
UpperCAmelCase , UpperCAmelCase = img.shape
UpperCAmelCase = []
UpperCAmelCase = img.copy()
UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
UpperCAmelCase , UpperCAmelCase = np.gradient(_A )
UpperCAmelCase = dx**2
UpperCAmelCase = dy**2
UpperCAmelCase = dx * dy
UpperCAmelCase = 0.04
UpperCAmelCase = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = (wxx * wyy) - (wxy**2)
UpperCAmelCase = wxx + wyy
UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__A : Tuple = HarrisCorner(0.04, 3)
__A , __A : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 273 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
return (-y * np.log(SCREAMING_SNAKE_CASE_ ) - (1 - y) * np.log(1 - h )).mean()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return np.sum(y * scores - np.log(1 + np.exp(SCREAMING_SNAKE_CASE_ ) ) )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=70000 ) -> Dict:
"""simple docstring"""
snake_case__ : List[Any] = np.zeros(x.shape[1] )
for iterations in range(SCREAMING_SNAKE_CASE_ ):
snake_case__ : Dict = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case__ : List[str] = sigmoid_function(SCREAMING_SNAKE_CASE_ )
snake_case__ : Any = np.dot(x.T , h - y ) / y.size
snake_case__ : int = theta - alpha * gradient # updating the weights
snake_case__ : Optional[Any] = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[Any] = sigmoid_function(SCREAMING_SNAKE_CASE_ )
snake_case__ : Any = cost_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if iterations % 100 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
A__ = datasets.load_iris()
A__ = iris.data[:, :2]
A__ = (iris.target != 0) * 1
A__ = 0.1
A__ = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def _lowerCAmelCase ( __lowerCAmelCase ) -> Dict:
"""simple docstring"""
return sigmoid_function(
np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((A__) , (A__)) = (x[:, 0].min(), x[:, 0].max())
((A__) , (A__)) = (x[:, 1].min(), x[:, 1].max())
((A__) , (A__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
A__ = np.c_[xxa.ravel(), xxa.ravel()]
A__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 366 |
from sklearn.metrics import mean_squared_error
import datasets
A__ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A__ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A__ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __lowerCamelCase ( self :List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] ,)
def __lowerCamelCase ( self :Tuple ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __lowerCamelCase ( self :List[str] ,__lowercase :Optional[int] ,__lowercase :int ,__lowercase :Any=None ,__lowercase :List[str]="uniform_average" ,__lowercase :List[Any]=True ):
snake_case__ : Union[str, Any] = mean_squared_error(
__lowercase ,__lowercase ,sample_weight=__lowercase ,multioutput=__lowercase ,squared=__lowercase )
return {"mse": mse}
| 44 | 0 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class _UpperCAmelCase ( _lowerCAmelCase ):
def a ( self : Tuple , _lowercase : Dict=None , _lowercase : str=None , _lowercase : Union[str, Any]=None , **_lowercase : Tuple ):
if tokenize_kwargs is None:
__UpperCAmelCase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
__UpperCAmelCase = truncation
__UpperCAmelCase = tokenize_kwargs
__UpperCAmelCase = {}
if return_tensors is not None:
__UpperCAmelCase = return_tensors
return preprocess_params, {}, postprocess_params
def a ( self : int , _lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
__UpperCAmelCase = self.framework
__UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
return model_inputs
def a ( self : List[str] , _lowercase : Tuple ):
__UpperCAmelCase = self.model(**_lowercase )
return model_outputs
def a ( self : int , _lowercase : Tuple , _lowercase : str=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[Any] , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
return super().__call__(*_lowercase , **_lowercase )
| 332 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowercase__ ( snake_case_ :Union[str, Any]=None ):
if subparsers is not None:
__UpperCAmelCase = subparsers.add_parser('''env''' )
else:
__UpperCAmelCase = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=snake_case_ , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = torch.__version__
__UpperCAmelCase = torch.cuda.is_available()
__UpperCAmelCase = is_xpu_available()
__UpperCAmelCase = is_npu_available()
__UpperCAmelCase = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(snake_case_ ):
__UpperCAmelCase = load_config_from_file(args.config_file ).to_dict()
__UpperCAmelCase = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(snake_case_ ),
'''PyTorch NPU available''': str(snake_case_ ),
'''System RAM''': F'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''',
}
if pt_cuda_available:
__UpperCAmelCase = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
__UpperCAmelCase = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(snake_case_ , snake_case_ )
else F'''\t{accelerate_config}'''
)
print(snake_case_ )
__UpperCAmelCase = accelerate_config
return info
def lowercase__ ( ):
__UpperCAmelCase = env_command_parser()
__UpperCAmelCase = parser.parse_args()
env_command(snake_case_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 332 | 1 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowerCamelCase = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCAmelCase : Optional[datasets.Features] = None
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : "pyspark.sql.DataFrame" , __UpperCamelCase : List[int] , ) -> List[Any]:
import pyspark
def generate_fn():
UpperCAmelCase_ = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
UpperCAmelCase_ = df_with_partition_id.select('''*''' ).where(f'part_id = {partition_id}' ).drop('''part_id''' )
UpperCAmelCase_ = partition_df.collect()
UpperCAmelCase_ = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class a ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__( self : int , __snake_case : "pyspark.sql.DataFrame" , __snake_case : Optional[int]=None , ):
UpperCAmelCase_ = df
UpperCAmelCase_ = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCAmelCase_ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[str] ):
yield from self.generate_examples_fn()
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : np.random.Generator ):
UpperCAmelCase_ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__snake_case )
return SparkExamplesIterable(self.df , partition_order=__snake_case )
def lowerCamelCase_ ( self : Dict , __snake_case : int , __snake_case : int ):
UpperCAmelCase_ = self.split_shard_indices_by_worker(__snake_case , __snake_case )
return SparkExamplesIterable(self.df , partition_order=__snake_case )
@property
def lowerCamelCase_ ( self : Tuple ):
return len(self.partition_order )
class a ( datasets.DatasetBuilder ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = SparkConfig
def __init__( self : int , __snake_case : "pyspark.sql.DataFrame" , __snake_case : str = None , __snake_case : str = None , **__snake_case : Tuple , ):
import pyspark
UpperCAmelCase_ = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCAmelCase_ = df
UpperCAmelCase_ = working_dir
super().__init__(
cache_dir=__snake_case , config_name=str(self.df.semanticHash() ) , **__snake_case , )
def lowerCamelCase_ ( self : int ):
# Returns the path of the created file.
def create_cache_and_write_probe(__snake_case : Union[str, Any] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__snake_case )
UpperCAmelCase_ = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__snake_case , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCAmelCase_ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__snake_case ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def lowerCamelCase_ ( self : int ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCamelCase_ ( self : str , __snake_case : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(__snake_case : str ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
UpperCAmelCase_ = self.df.count()
UpperCAmelCase_ = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCAmelCase_ = (
self.df.limit(__snake_case )
.repartition(1 )
.mapInArrow(__snake_case , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCAmelCase_ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCAmelCase_ = min(__snake_case , int(approx_total_size / max_shard_size ) )
UpperCAmelCase_ = self.df.repartition(__snake_case )
def lowerCamelCase_ ( self : Optional[int] , __snake_case : str , __snake_case : str , __snake_case : int , ):
import pyspark
UpperCAmelCase_ = ParquetWriter if file_format == '''parquet''' else ArrowWriter
UpperCAmelCase_ = os.path.join(self._working_dir , os.path.basename(__snake_case ) ) if self._working_dir else fpath
UpperCAmelCase_ = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCAmelCase_ = self.config.features
UpperCAmelCase_ = self._writer_batch_size
UpperCAmelCase_ = self._fs.storage_options
def write_arrow(__snake_case : Union[str, Any] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCAmelCase_ = pyspark.TaskContext().taskAttemptId()
UpperCAmelCase_ = next(__snake_case , __snake_case )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
UpperCAmelCase_ = 0
UpperCAmelCase_ = writer_class(
features=__snake_case , path=working_fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , writer_batch_size=__snake_case , storage_options=__snake_case , embed_local_files=__snake_case , )
UpperCAmelCase_ = pa.Table.from_batches([first_batch] )
writer.write_table(__snake_case )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCAmelCase_ , UpperCAmelCase_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
UpperCAmelCase_ = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , writer_batch_size=__snake_case , storage_options=__snake_case , embed_local_files=__snake_case , )
UpperCAmelCase_ = pa.Table.from_batches([batch] )
writer.write_table(__snake_case )
if writer._num_bytes > 0:
UpperCAmelCase_ , UpperCAmelCase_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__snake_case ) ):
UpperCAmelCase_ = os.path.join(os.path.dirname(__snake_case ) , os.path.basename(__snake_case ) )
shutil.move(__snake_case , __snake_case )
UpperCAmelCase_ = (
self.df.mapInArrow(__snake_case , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCamelCase_ ( self : List[str] , __snake_case : "datasets.SplitGenerator" , __snake_case : str = "arrow" , __snake_case : Optional[Union[str, int]] = None , __snake_case : Optional[int] = None , **__snake_case : int , ):
self._validate_cache_dir()
UpperCAmelCase_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__snake_case )
UpperCAmelCase_ = not is_remote_filesystem(self._fs )
UpperCAmelCase_ = os.path.join if is_local else posixpath.join
UpperCAmelCase_ = '''-TTTTT-SSSSS-of-NNNNN'''
UpperCAmelCase_ = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
UpperCAmelCase_ = path_join(self._output_dir , __snake_case )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for task_id, content in self._prepare_split_single(__snake_case , __snake_case , __snake_case ):
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__snake_case )
UpperCAmelCase_ = total_num_examples
UpperCAmelCase_ = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
UpperCAmelCase_ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCAmelCase_ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__snake_case : int , __snake_case : int , __snake_case : int , ):
rename(
__snake_case , fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , fpath.replace('''TTTTT-SSSSS''' , F'{global_shard_id:05d}' ).replace('''NNNNN''' , F'{total_shards:05d}' ) , )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
for i in range(len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ = task_id_and_num_shards[i]
for shard_id in range(__snake_case ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__snake_case , len(__snake_case ) ).map(lambda __snake_case : _rename_shard(*__snake_case ) ).collect()
else:
# don't use any pattern
UpperCAmelCase_ = 0
UpperCAmelCase_ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , fpath.replace(__snake_case , '''''' ) , )
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 350 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['ConditionalDetrFeatureExtractor']
_lowerCamelCase = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 177 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__lowerCAmelCase = {'''allegro/herbert-base-cased''': 514}
__lowerCAmelCase = {}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Any = HerbertTokenizer
def __init__( self : Dict ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : int=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Optional[int]="<s>" ,_UpperCAmelCase : str="<unk>" ,_UpperCAmelCase : Dict="<pad>" ,_UpperCAmelCase : List[Any]="<mask>" ,_UpperCAmelCase : Optional[int]="</s>" ,**_UpperCAmelCase : Union[str, Any] ,):
super().__init__(
_UpperCAmelCase ,_UpperCAmelCase ,tokenizer_file=_UpperCAmelCase ,cls_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : str ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
_a : List[Any] = [self.cls_token_id]
_a : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ,_UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def __lowercase ( self : int ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
_a : Tuple = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ):
_a : Union[str, Any] = self._tokenizer.model.save(_UpperCAmelCase ,name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 89 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''distilbert'''
lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) -> Optional[Any]:
A_ : Tuple = vocab_size
A_ : List[Any] = max_position_embeddings
A_ : int = sinusoidal_pos_embds
A_ : int = n_layers
A_ : str = n_heads
A_ : Optional[int] = dim
A_ : int = hidden_dim
A_ : Tuple = dropout
A_ : List[Any] = attention_dropout
A_ : int = activation
A_ : Dict = initializer_range
A_ : List[Any] = qa_dropout
A_ : int = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 344 | 0 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_UpperCAmelCase : str = True
except (ImportError, ModuleNotFoundError):
_UpperCAmelCase : List[Any] = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __magic_name__( lowerCamelCase):
re.sub('''<n>''', '''''', lowerCamelCase) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCamelCase))
| 9 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ):
__lowerCAmelCase = 1.0 if scale is None else scale
__lowerCAmelCase = 0.0 if loc is None else loc
super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] )
@property
def _snake_case (self ):
return self.base_dist.mean * self.scale + self.loc
@property
def _snake_case (self ):
return self.base_dist.variance * self.scale**2
@property
def _snake_case (self ):
return self.variance.sqrt()
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ):
super().__init__(**__lowercase )
__lowerCAmelCase = args_dim
__lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] )
__lowerCAmelCase = domain_map
def _snake_case (self , __lowercase ):
__lowerCAmelCase = [proj(__lowercase ) for proj in self.proj]
return self.domain_map(*__lowercase )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = function
def _snake_case (self , __lowercase , *__lowercase ):
return self.function(__lowercase , *__lowercase )
class a__ :
"""simple docstring"""
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__(self , __lowercase = 1 ):
__lowerCAmelCase = dim
__lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def _snake_case (self , __lowercase ):
if self.dim == 1:
return self.distribution_class(*__lowercase )
else:
return Independent(self.distribution_class(*__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ):
__lowerCAmelCase = self._base_distribution(__lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim )
@property
def _snake_case (self ):
return () if self.dim == 1 else (self.dim,)
@property
def _snake_case (self ):
return len(self.event_shape )
@property
def _snake_case (self ):
return 0.0
def _snake_case (self , __lowercase ):
return ParameterProjection(
in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _snake_case (self , *__lowercase ):
raise NotImplementedError()
@staticmethod
def _snake_case (__lowercase ):
return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def _snake_case (cls , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__lowerCAmelCase = 2.0 + cls.squareplus(__lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _snake_case (self , __lowercase ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__lowercase , logits=__lowercase )
else:
return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 9 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 15 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
SCREAMING_SNAKE_CASE :Union[str, Any] = False
SCREAMING_SNAKE_CASE :Any = True
SCREAMING_SNAKE_CASE :Tuple = False
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE :Dict = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
SCREAMING_SNAKE_CASE :Optional[int] = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
SCREAMING_SNAKE_CASE :int = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
SCREAMING_SNAKE_CASE :Dict = reader.read()
SCREAMING_SNAKE_CASE :List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
SCREAMING_SNAKE_CASE :Optional[int] = UNetaDModel(**config)
else:
SCREAMING_SNAKE_CASE :Optional[Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
SCREAMING_SNAKE_CASE :List[str] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
SCREAMING_SNAKE_CASE :List[str] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
SCREAMING_SNAKE_CASE :Optional[Any] = config[key]
del config[key]
SCREAMING_SNAKE_CASE :Optional[Any] = [k.replace('UNetRes', '') for k in config['down_block_types']]
SCREAMING_SNAKE_CASE :List[Any] = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
SCREAMING_SNAKE_CASE :Tuple = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
SCREAMING_SNAKE_CASE :Any = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
SCREAMING_SNAKE_CASE :List[str] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
SCREAMING_SNAKE_CASE :List[Any] = param_value
SCREAMING_SNAKE_CASE :str = True
if not has_changed:
SCREAMING_SNAKE_CASE :List[str] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 15 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __snake_case ( _SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = RoCBertTokenizer
lowercase = None
lowercase = False
lowercase = True
lowercase = filter_non_english
def __lowercase ( self : List[Any] ) -> Union[str, Any]:
super().setUp()
lowerCAmelCase_ : Dict = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
lowerCAmelCase_ : Union[str, Any] = {}
lowerCAmelCase_ : Union[str, Any] = {}
for i, value in enumerate(lowerCamelCase ):
lowerCAmelCase_ : int = i
lowerCAmelCase_ : Optional[Any] = i
lowerCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(lowerCamelCase , lowerCamelCase , ensure_ascii=lowerCamelCase )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(lowerCamelCase , lowerCamelCase , ensure_ascii=lowerCamelCase )
def __lowercase ( self : List[str] ) -> Tuple:
lowerCAmelCase_ : List[str] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase_ : str = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(lowerCamelCase , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
def __lowercase ( self : Dict ) -> str:
lowerCAmelCase_ : List[Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __lowercase ( self : List[str] ) -> Tuple:
lowerCAmelCase_ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowercase ( self : Dict ) -> Optional[int]:
lowerCAmelCase_ : Tuple = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __lowercase ( self : Optional[Any] ) -> Any:
lowerCAmelCase_ : List[Any] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowercase ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase_ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowercase ( self : int ) -> Optional[Any]:
lowerCAmelCase_ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowercase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase_ : str = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowercase ( self : Union[str, Any] ) -> str:
lowerCAmelCase_ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowercase ( self : Tuple ) -> Any:
lowerCAmelCase_ : Dict = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __lowercase ( self : List[str] ) -> str:
lowerCAmelCase_ : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowerCAmelCase_ : Dict = {}
for i, token in enumerate(lowerCamelCase ):
lowerCAmelCase_ : Any = i
lowerCAmelCase_ : Optional[Any] = RoCBertWordpieceTokenizer(vocab=lowerCamelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __lowercase ( self : Optional[int] ) -> List[Any]:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __lowercase ( self : Tuple ) -> Dict:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __lowercase ( self : str ) -> Tuple:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __lowercase ( self : str ) -> Optional[Any]:
lowerCAmelCase_ : int = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
lowerCAmelCase_ : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def __lowercase ( self : Optional[Any] ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Any = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCAmelCase_ : int = tokenizer_r.encode_plus(
lowerCamelCase , return_attention_mask=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase , )
lowerCAmelCase_ : Any = tokenizer_r.do_lower_case if hasattr(lowerCamelCase , """do_lower_case""" ) else False
lowerCAmelCase_ : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __lowercase ( self : List[Any] ) -> Tuple:
lowerCAmelCase_ : Optional[Any] = ["""的""", """人""", """有"""]
lowerCAmelCase_ : int = """""".join(lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase_ : str = True
lowerCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : int = tokenizer_r.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : Any = tokenizer_r.convert_ids_to_tokens(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Any = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Tuple = tokenizer_r.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : List[Any] = tokenizer_p.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : Any = tokenizer_r.convert_ids_to_tokens(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ : Dict = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(lowerCamelCase )
]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def __lowercase ( self : Union[str, Any] ) -> Tuple:
lowerCAmelCase_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase_ : int = tokenizer.encode("""你好""" , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : Optional[int] = tokenizer.encode("""你是谁""" , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowerCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __lowercase ( self : Any ) -> List[str]:
lowerCAmelCase_ : List[Any] = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCAmelCase_ : List[Any] = """你好,你是谁"""
lowerCAmelCase_ : Any = tokenizer.tokenize(lowerCamelCase )
lowerCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = tokenizer.convert_tokens_to_shape_ids(lowerCamelCase )
lowerCAmelCase_ : int = tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase )
lowerCAmelCase_ : List[str] = tokenizer.prepare_for_model(
lowerCamelCase , lowerCamelCase , lowerCamelCase , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : Dict = tokenizer.encode_plus(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 366 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : List[Any] = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 0 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase=1024 , lowercase=1024 , lowercase=3.6 ):
_lowerCamelCase : Union[str, Any] = tokenizer
_lowerCamelCase : Any = tokenizer.bos_token_id
_lowerCamelCase : Optional[int] = dataset
_lowerCamelCase : Optional[int] = seq_length
_lowerCamelCase : Union[str, Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
_lowerCamelCase : Optional[int] = iter(self.dataset )
_lowerCamelCase : Dict = True
while more_examples:
_lowerCamelCase, _lowerCamelCase : Dict = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowercase )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Optional[Any] = False
break
_lowerCamelCase : Optional[Any] = tokenizer(lowercase , truncation=lowercase )['input_ids']
_lowerCamelCase : Any = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowercase ) , self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(lowercase ) == self.seq_length:
yield torch.tensor(lowercase )
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = {'streaming': True}
_lowerCamelCase : str = load_dataset(args.dataset_name , split='train' , **lowercase__ )
_lowerCamelCase : Dict = ConstantLengthDataset(lowercase__ , lowercase__ , seq_length=args.seq_length )
_lowerCamelCase : str = DataLoader(lowercase__ , batch_size=args.batch_size )
return eval_dataloader
def _snake_case ( lowercase__ ):
model.eval()
_lowerCamelCase : Tuple = []
for step, batch in enumerate(lowercase__ ):
with torch.no_grad():
_lowerCamelCase : str = model(lowercase__ , labels=lowercase__ )
_lowerCamelCase : Optional[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowercase__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : str = torch.mean(torch.cat(lowercase__ ) )
try:
_lowerCamelCase : Any = torch.exp(lowercase__ )
except OverflowError:
_lowerCamelCase : int = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowercase__ = Accelerator()
# Parse configuration
lowercase__ = HfArgumentParser(EvaluationArguments)
lowercase__ = parser.parse_args()
set_seed(args.seed)
# Logging
lowercase__ = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
lowercase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowercase__ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowercase__ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowercase__ , lowercase__ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
lowercase__ , lowercase__ = evaluate(args)
logger.info(F"loss/eval: {eval_loss}, perplexity: {perplexity}") | 96 |
def __UpperCamelCase ( _A : int ) ->int:
"""simple docstring"""
assert (
isinstance(_A , _A ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
lowerCamelCase_ , lowerCamelCase_ =1, 1
for _ in range(number_of_steps - 1 ):
lowerCamelCase_ , lowerCamelCase_ =current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = CTRLTokenizer
_UpperCAmelCase :Any = False
_UpperCAmelCase :Tuple = False
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Union[str, Any] = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
UpperCamelCase : Tuple = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase : Optional[Any] = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
UpperCamelCase : Any = {"unk_token": "<unk>"}
UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A_ ) )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Any = "adapt react readapt apt"
UpperCamelCase : str = "adapt react readapt apt"
return input_text, output_text
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase : int = "adapt react readapt apt"
UpperCamelCase : Dict = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
UpperCamelCase : List[Any] = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase : List[str] = tokens + [tokenizer.unk_token]
UpperCamelCase : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
| 140 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A__ :
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=2 , ):
'''simple docstring'''
UpperCamelCase : List[str] = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Optional[int] = patch_size
UpperCamelCase : List[str] = num_channels
UpperCamelCase : Any = is_training
UpperCamelCase : Dict = use_labels
UpperCamelCase : List[str] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : str = intermediate_size
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : List[Any] = type_sequence_label_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : Union[str, Any] = scope
UpperCamelCase : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase : Optional[Any] = (image_size // patch_size) ** 2
UpperCamelCase : int = num_patches + 2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Tuple = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase( self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = TFDeiTModel(config=A_ )
UpperCamelCase : Tuple = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = TFDeiTForMaskedImageModeling(config=A_ )
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase : Dict = 1
UpperCamelCase : Optional[Any] = TFDeiTForMaskedImageModeling(A_ )
UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Any = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.type_sequence_label_size
UpperCamelCase : List[Any] = TFDeiTForImageClassification(A_ )
UpperCamelCase : Optional[int] = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase : List[Any] = 1
UpperCamelCase : Optional[Any] = TFDeiTForImageClassification(A_ )
UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : List[Any] = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = config_and_inputs
UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Dict = False
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Optional[int] = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFDeiTModelTester(self )
UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Dense ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : str = model_class(A_ )
UpperCamelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : List[str] = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = TFDeiTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_ ( ) -> str:
UpperCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase( self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
UpperCamelCase : List[Any] = self.default_image_processor
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Union[str, Any] = image_processor(images=A_ , return_tensors="tf" )
# forward pass
UpperCamelCase : str = model(**A_ )
# verify the logits
UpperCamelCase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase : Tuple = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 140 | 1 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __A ( a_ :Optional[Any]) -> Union[str, Any]:
__a : Dict = os.path.join(args.tf_model_dir , '''parameters.json''')
__a : Optional[int] = json.loads(open(lowerCamelCase_).read())
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""")
if not args.output.endswith('''.pt'''):
__a : str = args.output + """.pt"""
__a : int = OrderedDict()
with tf.device('''/CPU:0'''):
__a : Any = tf.train.load_checkpoint(args.tf_model_dir)
__a : Any = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
__a : int = reader.get_tensor(lowerCamelCase_).astype(np.floataa)
if key_name.endswith('''/adam_m''') or key_name.endswith('''/adam_v'''):
continue
if key_name.startswith('''pasts/'''):
if key_name.startswith('''pasts/mlp'''):
__a : Optional[Any] = int(key_name[9])
elif key_name.startswith('''pasts/out'''):
__a : Dict = 8
__a : Optional[int] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
__a : List[Any] = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
__a : Any = torch.tensor(lowerCamelCase_)
elif key_name.startswith('''model/moe'''):
__a : Optional[Any] = int(key_name[9:].split('''/''')[0])
if key_name.endswith('''/switch_gating/kernel'''):
__a : Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
__a : Tuple = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
__a : str = torch.tensor(lowerCamelCase_)
elif key_name.endswith('''/softmlp/kernel'''):
__a : List[str] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
__a : int = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
__a : Optional[Any] = torch.tensor(lowerCamelCase_)
elif key_name.endswith('''/wo/kernel''') or key_name.endswith('''/wi/kernel'''):
__a : int = key_name[-9:-7]
for i in range(16):
__a : Optional[Any] = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
__a : Dict = (
vnp[i].transpose([1, 0]).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
__a : Optional[int] = torch.tensor(lowerCamelCase_)
elif key_name.startswith('''model/mlp'''):
__a : Optional[int] = int(key_name[9:].split('''/''')[0])
if key_name.endswith('''/p1/kernel'''):
__a : List[Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
__a : Optional[int] = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
__a : Tuple = torch.tensor(lowerCamelCase_)
elif key_name.endswith('''/p1/bias'''):
__a : Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
__a : Tuple = vnp.copy() # same because it is one dimensional
__a : Union[str, Any] = torch.tensor(lowerCamelCase_)
elif key_name.endswith('''/p2/kernel'''):
__a : Any = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
__a : Tuple = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
__a : Tuple = torch.tensor(lowerCamelCase_)
elif key_name.endswith('''/p2/bias'''):
__a : Tuple = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
__a : Optional[Any] = vnp.copy() # same because it is one dimensional
__a : Any = torch.tensor(lowerCamelCase_)
elif key_name.startswith('''model/ln'''):
__a : Any = int(key_name[8:].split('''/''')[0])
if key_name.endswith('''/b'''):
__a : int = """model.blocks.%d.feed_forward.norm.bias""" % player
__a : Union[str, Any] = vnp.copy() # same because it is one dimensional
__a : List[Any] = torch.tensor(lowerCamelCase_)
elif key_name.endswith('''/g'''):
__a : List[str] = """model.blocks.%d.feed_forward.norm.weight""" % player
__a : Any = vnp.copy() # same because it is one dimensional
__a : int = torch.tensor(lowerCamelCase_)
elif key_name.startswith('''model/att'''):
__a : Optional[int] = int(key_name[9:].split('''/''')[0])
if key_name.endswith('''/qkv/kernel'''):
__a : Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
__a : Optional[int] = state[:, 0, :, :]
__a : str = state[:, 1, :, :]
__a : List[Any] = state[:, 2, :, :]
__a : Optional[int] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]])
.transpose([1, 0])
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__a : Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]])
.transpose([1, 0])
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__a : List[Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]])
.transpose([1, 0])
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__a : Any = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
__a : Any = torch.tensor(lowerCamelCase_)
__a : Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
__a : List[Any] = torch.tensor(lowerCamelCase_)
__a : Optional[Any] = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
__a : List[str] = torch.tensor(lowerCamelCase_)
elif key_name.endswith('''/o/kernel'''):
__a : Optional[int] = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
__a : Tuple = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]]).transpose([1, 0]).copy()
) # Mesh-Tensorflow is a diagonal matrix
__a : List[str] = torch.tensor(lowerCamelCase_)
elif key_name.startswith('''model/an'''):
__a : Optional[Any] = int(key_name[8:].split('''/''')[0])
if key_name.endswith('''/b'''):
__a : Dict = """model.blocks.%d.self_attn.norm.bias""" % player
__a : Any = vnp.copy() # same because it is one dimensional
__a : Tuple = torch.tensor(lowerCamelCase_)
elif key_name.endswith('''/g'''):
__a : List[Any] = """model.blocks.%d.self_attn.norm.weight""" % player
__a : int = vnp.copy() # same because it is one dimensional
__a : str = torch.tensor(lowerCamelCase_)
elif (
key_name.startswith('''model/wte''')
or key_name.startswith('''model/wpe''')
or key_name.startswith('''model/ete''')
):
__a : Tuple = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
__a : int = """model.%s.weight""" % nlayer
__a : List[Any] = vnp.copy() # same in embedded
__a : str = torch.tensor(lowerCamelCase_)
if key_name.startswith('''model/wte'''):
__a : Dict = """lm_head.weight"""
__a : List[Any] = vnp.copy() # same in embedded
__a : Union[str, Any] = torch.tensor(lowerCamelCase_)
elif key_name.startswith('''model/wob'''):
__a : List[str] = """final_logits_bias"""
__a : Any = vnp.copy() # same in embedded
__a : Optional[int] = state.reshape((1, -1))
__a : List[Any] = torch.tensor(lowerCamelCase_)
elif key_name == "model/dense/kernel":
__a : Any = """model.last_project.weight"""
__a : int = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
__a : int = torch.tensor(lowerCamelCase_)
elif key_name == "model/dense_1/bias":
__a : Any = """model.last_project.bias"""
__a : List[str] = vnp.copy() # same because it is one dimensional
__a : List[Any] = torch.tensor(lowerCamelCase_)
torch.save(lowerCamelCase_ , args.output)
if __name__ == "__main__":
A = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
A = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 160 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCAmelCase = 0
__UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCAmelCase = tuple[int, int]
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = pos_x
SCREAMING_SNAKE_CASE : Any = pos_y
SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x)
SCREAMING_SNAKE_CASE : Tuple = goal_x
SCREAMING_SNAKE_CASE : List[str] = goal_y
SCREAMING_SNAKE_CASE : Optional[Any] = g_cost
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = self.calculate_heuristic()
SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [self.start]
SCREAMING_SNAKE_CASE : list[Node] = []
SCREAMING_SNAKE_CASE : str = False
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
return [self.start.pos]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for action in delta:
SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) )
return successors
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = node
SCREAMING_SNAKE_CASE : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent
path.reverse()
return path
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase_ , lowerCamelCase_ )
self.fwd_astar.closed_nodes.append(lowerCamelCase_ )
self.bwd_astar.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node
SCREAMING_SNAKE_CASE : Any = current_fwd_node
SCREAMING_SNAKE_CASE : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase_ )
else:
astar.open_nodes.append(lowerCamelCase_ )
return [self.fwd_astar.start.pos]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCAmelCase = time.time()
__UpperCAmelCase = AStar(init, goal)
__UpperCAmelCase = a_star.search()
__UpperCAmelCase = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__UpperCAmelCase = time.time()
__UpperCAmelCase = BidirectionalAStar(init, goal)
__UpperCAmelCase = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 323 | 0 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a__ : Optional[Any] = 'src/diffusers'
# Matches is_xxx_available()
a__ : List[Any] = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
a__ : int = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
a__ : Any = '\n{0} = None\n'
a__ : List[str] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
a__ : Dict = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = _re_backend.findall(__A )
if len(__A ) == 0:
return None
return "_and_".join(__A )
def _lowercase ( ):
'''simple docstring'''
with open(os.path.join(__A ,"""__init__.py""" ) ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
__UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__UpperCamelCase = 0
__UpperCamelCase = {}
# Go through the end of the file
while line_index < len(__A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
__UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(__A ) and len(lines[line_index] ) > 1:
__UpperCamelCase = lines[line_index]
__UpperCamelCase = _re_single_line_import.search(__A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__A ) > 0:
__UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def _lowercase ( __A ,__A ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(__A )
elif name.islower():
return DUMMY_FUNCTION.format(__A ,__A )
else:
return DUMMY_CLASS.format(__A ,__A )
def _lowercase ( __A=None ):
'''simple docstring'''
if backend_specific_objects is None:
__UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
__UpperCamelCase = """[""" + """, """.join(f"\"{b}\"" for b in backend.split("""_and_""" ) ) + """]"""
__UpperCamelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__A ,__A ) for o in objects] )
__UpperCamelCase = dummy_file
return dummy_files
def _lowercase ( __A=False ):
'''simple docstring'''
__UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__UpperCamelCase = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
__UpperCamelCase = os.path.join(__A ,"""utils""" )
__UpperCamelCase = {
backend: os.path.join(__A ,f"dummy_{short_names.get(__A ,__A )}_objects.py" )
for backend in dummy_files.keys()
}
__UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__A ):
with open(__A ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
__UpperCamelCase = f.read()
else:
__UpperCamelCase = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating diffusers.utils.dummy_{short_names.get(__A ,__A )}_objects.py as the main "
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f"diffusers.utils.dummy_{short_names.get(__A ,__A )}_objects.py. Run `make fix-copies` "
"""to fix this.""" )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a__ : Union[str, Any] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 243 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
a__ : int = TypeVar('T')
class UpperCAmelCase__ ( Generic[T]):
def __init__( self , lowercase = True ) -> None:
__UpperCamelCase = {} # dictionary of lists
__UpperCamelCase = directed
def __lowerCamelCase ( self , lowercase , lowercase ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
self.adj_list[destination_vertex].append(lowercase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
__UpperCamelCase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowercase )
__UpperCamelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__UpperCamelCase = [destination_vertex]
__UpperCamelCase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
__UpperCamelCase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__UpperCamelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__UpperCamelCase = [destination_vertex]
__UpperCamelCase = []
return self
def __repr__( self ) -> str:
return pformat(self.adj_list )
| 243 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : int , *__UpperCAmelCase : Any , **__UpperCAmelCase : Optional[int] ) ->None:
"""simple docstring"""
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 0 |
def _a ( a :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
a = gray_code_sequence_string(a )
#
# convert them to integers
for i in range(len(a ) ):
a = int(sequence[i] , 2 )
return sequence
def _a ( a :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a = gray_code_sequence_string(bit_count - 1 )
a = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a = '''0''' + smaller_sequence[i]
sequence.append(a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a = '''1''' + smaller_sequence[i]
sequence.append(a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0_0_0_0_0_0 ) -> int:
__lowerCamelCase : Tuple = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCamelCase : List[str] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCamelCase : str = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"""{solution() = }""")
| 113 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : List[Any] = logging.get_logger()
# the current default level is logging.WARNING
__lowerCamelCase : Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = logging.get_verbosity()
__lowerCamelCase : str = logging.get_logger('transformers.models.bart.tokenization_bart')
__lowerCamelCase : Tuple = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,msg + '\n')
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,'')
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,msg + '\n')
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE__)
@mockenv(TRANSFORMERS_VERBOSITY='error')
def lowerCAmelCase ( self : Tuple):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__lowerCamelCase : int = logging.get_logger('transformers.models.bart.tokenization_bart')
__lowerCamelCase : int = os.getenv('TRANSFORMERS_VERBOSITY' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = logging.log_levels[env_level_str]
__lowerCamelCase : Tuple = logging.get_verbosity()
self.assertEqual(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" ,)
# restore to the original level
__lowerCamelCase : List[str] = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error')
def lowerCAmelCase ( self : List[Any]):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__lowerCamelCase : List[str] = logging.logging.getLogger()
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart')
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' ,cl.out)
# no need to restore as nothing was changed
def lowerCAmelCase ( self : Any):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__lowerCamelCase : Tuple = logging.get_logger('transformers.models.bart.tokenization_bart')
__lowerCamelCase : Optional[int] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1'):
# nothing should be logged as env var disables this method
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,'')
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=''):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,msg + '\n')
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 113 | 1 |
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _lowerCAmelCase ( lowercase_ ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _lowerCAmelCase ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _lowerCAmelCase ( ):
UpperCAmelCase = 'mock-s3-bucket'
UpperCAmelCase = F"""s3://{mock_bucket}"""
UpperCAmelCase = extract_path_from_uri(lowercase_ )
assert dataset_path.startswith('s3://' ) is False
UpperCAmelCase = './local/path'
UpperCAmelCase = extract_path_from_uri(lowercase_ )
assert dataset_path == new_dataset_path
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = is_remote_filesystem(lowercase_ )
assert is_remote is True
UpperCAmelCase = fsspec.filesystem('file' )
UpperCAmelCase = is_remote_filesystem(lowercase_ )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , lowercase_ )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
UpperCAmelCase = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowercase_ )
UpperCAmelCase = fsspec.filesystem(compression_fs_class.protocol , fo=lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
UpperCAmelCase = os.path.basename(lowercase_ )
UpperCAmelCase = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(lowercase_ , 'r' , encoding='utf-8' ) as f, open(lowercase_ , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
UpperCAmelCase = compressed_file_paths[protocol]
UpperCAmelCase = 'dataset.jsonl'
UpperCAmelCase = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
UpperCAmelCase , *UpperCAmelCase = fsspec.get_fs_token_paths(lowercase_ )
assert fs.isfile(lowercase_ )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = hf_api.dataset_info(lowercase_ , token=lowercase_ )
UpperCAmelCase = HfFileSystem(repo_info=lowercase_ , token=lowercase_ )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(lowercase_ ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def _lowerCAmelCase ( ):
UpperCAmelCase = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowercase_ , lowercase_ , clobber=lowercase_ )
with pytest.warns(lowercase_ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowercase_ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 78 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
snake_case_ = """1"""
snake_case_ = """0"""
snake_case_ = """1"""
snake_case_ = ort.SessionOptions()
snake_case_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
snake_case_ = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
snake_case_ = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
snake_case_ = ort.RunOptions()
snake_case_ = 128
snake_case_ = 1
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
snake_case_ = time.time()
snake_case_ = 2000
snake_case_ = {}
for iter in range(max_iters):
snake_case_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1000 / max_iters))
| 78 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = 42
_lowerCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = 42
_lowerCamelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 370 |
"""simple docstring"""
def _A ( _a : int ):
"""simple docstring"""
A = abs(_a )
A = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def _A ( _a : int ):
"""simple docstring"""
A = abs(_a )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def _A ( _a : int ):
"""simple docstring"""
return sum(int(_a ) for c in str(abs(_a ) ) )
def _A ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_a : Callable , _a : int ) -> None:
A = f'{func.__name__}({value})'
A = timeit(f'__main__.{call}' , setup="""import __main__""" )
print(f'{call:56} = {func(_a )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_a , _a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 77 | 0 |
import random
class __A:
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _snake_case ) -> tuple[list[int], list[int]]:
'''simple docstring'''
__a = [ord(_snake_case ) for i in text]
__a = []
__a = []
for i in plain:
__a = random.randint(1 , 300 )
__a = (i + k) * k
cipher.append(_snake_case )
key.append(_snake_case )
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _snake_case , _snake_case ) -> str:
'''simple docstring'''
__a = []
for i in range(len(_snake_case ) ):
__a = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_snake_case ) )
return "".join(_snake_case )
if __name__ == "__main__":
A , A : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k)) | 6 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : int = BarthezTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = True
def __lowercase ( self : List[Any] ):
super().setUp()
_a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer
def __lowercase ( self : Tuple ):
_a : Optional[Any] = '<pad>'
_a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_UpperCAmelCase ) ,101122 )
def __lowercase ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def __lowercase ( self : Dict ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Dict = [0, 57, 3018, 70307, 91, 2]
_a : Dict = self.tokenizer(
_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
# fmt: off
_a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
| 89 | 0 |
'''simple docstring'''
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> None:
lowerCAmelCase__ : Optional[int] = size
lowerCAmelCase__ : Any = [0] * size
lowerCAmelCase__ : Any = [0] * size
@staticmethod
def UpperCAmelCase_ ( __UpperCAmelCase ) -> int:
return index | (index + 1)
@staticmethod
def UpperCAmelCase_ ( __UpperCAmelCase ) -> int:
return (index & (index + 1)) - 1
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
lowerCAmelCase__ : List[Any] = value
while index < self.size:
lowerCAmelCase__ : Dict = self.get_prev(__UpperCAmelCase ) + 1
if current_left_border == index:
lowerCAmelCase__ : Optional[Any] = value
else:
lowerCAmelCase__ : Optional[int] = max(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = self.get_next(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
right -= 1 # Because of right is exclusive
lowerCAmelCase__ : Tuple = 0
while left <= right:
lowerCAmelCase__ : Tuple = self.get_prev(__UpperCAmelCase )
if left <= current_left:
lowerCAmelCase__ : Optional[Any] = max(__UpperCAmelCase ,self.tree[right] )
lowerCAmelCase__ : Optional[int] = current_left
else:
lowerCAmelCase__ : Optional[Any] = max(__UpperCAmelCase ,self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 1000 ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = -1
lowerCAmelCase__ : Optional[Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCAmelCase__ : Optional[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCAmelCase__ : Tuple = n - a - b
if c * c == (a * a + b * b):
lowerCAmelCase__ : int = a * b * c
if candidate >= product:
lowerCAmelCase__ : Any = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 184 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( __lowercase : list[float] , __lowercase : list[float] ) -> float:
'''simple docstring'''
_UpperCAmelCase = sorted(numsa + numsa )
_UpperCAmelCase , _UpperCAmelCase = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE :Tuple = [float(x) for x in input('''Enter the elements of first array: ''').split()]
__SCREAMING_SNAKE_CASE :Any = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 22 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.dummy_model()
lowerCamelCase__: int =self.dummy_sample_deter
lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1
lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1
lowerCamelCase__: Optional[Any] =samplea.shape[0]
lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5005) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.dummy_model()
lowerCamelCase__: List[Any] =self.dummy_sample_deter
lowerCamelCase__: int =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Any =pred_prev_sample
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.9606) < 1E-2
assert abs(result_mean.item() - 0.3372) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction")
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: str =self.dummy_model()
lowerCamelCase__: str =self.dummy_sample_deter
lowerCamelCase__: Dict =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: List[str] =pred_prev_sample
lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 202.0296) < 1E-2
assert abs(result_mean.item() - 0.2631) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_):
if i == len(UpperCAmelCase_) - 1:
lowerCamelCase__: Dict =-1
else:
lowerCamelCase__: Union[str, Any] =timesteps[i + 1]
lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_)
lowerCamelCase__: str =prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: List[Any] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0]
lowerCamelCase__: int =len(UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
| 10 | 0 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
a :List[Any] = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
a :Optional[Any] = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ):
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
a :int = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 281 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = MgpstrTokenizer
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# fmt: off
a :int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
a :List[str] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :str = '''tester'''
a :Union[str, Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a :Any = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
a :str = tokenizer.encode([special_token] , add_special_tokens=_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , 1 )
a :Tuple = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a , a :Tuple = self.get_input_output_texts(_lowerCamelCase )
a :Tuple = tokenizer.tokenize(_lowerCamelCase )
a :int = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
a :Optional[int] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :Any = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertNotEqual(len(_lowerCamelCase ) , 0 )
a :str = tokenizer.decode(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _lowerCamelCase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
| 281 | 1 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
lowerCamelCase__ : List[Any] = str(abs(_UpperCAmelCase ) )
lowerCamelCase__ : Optional[Any] = [list(_UpperCAmelCase ) for char in range(len(_UpperCAmelCase ) )]
for index in range(len(_UpperCAmelCase ) ):
num_transpositions[index].pop(_UpperCAmelCase )
return max(
int(''.join(list(_UpperCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 50 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : int = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
_UpperCAmelCase : Any = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = RealmTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Tuple="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Union[str, Any]="[MASK]" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Optional[int] , ) -> str:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ : Optional[int] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) )
lowerCamelCase__ : Optional[Any] = do_lower_case
lowerCamelCase__ : str = strip_accents
lowerCamelCase__ : Optional[Any] = tokenize_chinese_chars
lowerCamelCase__ : int = normalizer_class(**UpperCAmelCase )
lowerCamelCase__ : str = do_lower_case
def A_ ( self : Optional[int] , UpperCAmelCase : int , **UpperCAmelCase : int ) -> List[Any]:
lowerCamelCase__ : List[Any] = PaddingStrategy.MAX_LENGTH
lowerCamelCase__ : Optional[int] = text
lowerCamelCase__ : Dict = kwargs.pop('text_pair' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = kwargs.pop('return_tensors' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCAmelCase ):
if batch_text_pair is not None:
lowerCamelCase__ : Tuple = batch_text_pair[idx]
else:
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[int] = super().__call__(UpperCAmelCase , UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Any = encoded_candidates.get('input_ids' )
lowerCamelCase__ : Union[str, Any] = encoded_candidates.get('attention_mask' )
lowerCamelCase__ : Tuple = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase )
lowerCamelCase__ : int = {key: item for key, item in output_data.items() if len(UpperCAmelCase ) != 0}
return BatchEncoding(UpperCAmelCase , tensor_type=UpperCAmelCase )
def A_ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None ) -> List[str]:
lowerCamelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase__ : int = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 50 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase__ ( _UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
snake_case = filter(lambda _UpperCamelCase : p.requires_grad , model.parameters() )
snake_case = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
if metric == "rouge2":
snake_case = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
snake_case = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
snake_case = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
snake_case = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
snake_case = ModelCheckpoint(
dirpath=_UpperCamelCase , filename=_UpperCamelCase , monitor=f"""val_{metric}""" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) -> int:
"""simple docstring"""
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCamelCase , verbose=_UpperCamelCase , )
class lowerCAmelCase_ ( pl.Callback ):
"""simple docstring"""
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = {F"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase )
@rank_zero_only
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True ):
"""simple docstring"""
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
snake_case = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
snake_case = Path(pl_module.hparams.output_dir )
if type_path == "test":
snake_case = od / 'test_results.txt'
snake_case = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
snake_case = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
snake_case = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase )
with open(lowerCAmelCase , 'a+' ) as writer:
for key in sorted(lowerCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
snake_case = metrics[key]
if isinstance(lowerCAmelCase , torch.Tensor ):
snake_case = val.item()
snake_case = F"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase )
if not save_generations:
return
if "preds" in metrics:
snake_case = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(lowerCAmelCase )
@rank_zero_only
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
try:
snake_case = pl_module.model.model.num_parameters()
except AttributeError:
snake_case = pl_module.model.num_parameters()
snake_case = count_trainable_parameters(lowerCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase , lowerCAmelCase , 'test' )
@rank_zero_only
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 359 | """simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = torch.nn.Linear(10 , 10 )
snake_case = torch.optim.SGD(model.parameters() , 0.1 )
snake_case = Accelerator()
snake_case = accelerator.prepare(lowerCAmelCase )
try:
pickle.loads(pickle.dumps(lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 149 | 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = tmp_path / """cache"""
UpperCAmelCase__ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase__ : Any = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = tmp_path / """cache"""
UpperCAmelCase__ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCAmelCase__ : Any = features.copy() if features else default_expected_features
UpperCAmelCase__ : Tuple = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase__ : Optional[int] = ParquetDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = tmp_path / """cache"""
UpperCAmelCase__ : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCAmelCase__ : str = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , split=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if issubclass(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[int] = parquet_path
elif issubclass(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = [parquet_path]
UpperCAmelCase__ : str = tmp_path / """cache"""
UpperCAmelCase__ : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=("train",) ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for split in splits:
UpperCAmelCase__ : Tuple = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = tmp_path / """cache"""
UpperCAmelCase__ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase__ : Any = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : List[str] = tmp_path / """cache"""
UpperCAmelCase__ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCAmelCase__ : Tuple = features.copy() if features else default_expected_features
UpperCAmelCase__ : int = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase__ : List[str] = ParquetDatasetReader({"""train""": parquet_path} , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if split:
UpperCAmelCase__ : Any = {split: parquet_path}
else:
UpperCAmelCase__ : Optional[int] = """train"""
UpperCAmelCase__ : List[Any] = {"""train""": parquet_path, """test""": parquet_path}
UpperCAmelCase__ : int = tmp_path / """cache"""
UpperCAmelCase__ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Any = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
UpperCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / """foo.parquet""" )
UpperCAmelCase__ : Any = pf.read()
assert dataset.data.table == output_table
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = str(shared_datadir / """test_image_rgb.jpg""" )
UpperCAmelCase__ : int = {"""image""": [image_path]}
UpperCAmelCase__ : Any = Features({"""image""": Image()} )
UpperCAmelCase__ : Union[str, Any] = Dataset.from_dict(UpperCamelCase__ , features=UpperCamelCase__ )
UpperCAmelCase__ : Dict = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
UpperCAmelCase__ : Union[str, Any] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
UpperCAmelCase__ : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=UpperCamelCase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
assert get_writer_batch_size(UpperCamelCase__ ) == expected | 163 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _UpperCamelCase ( UpperCamelCase__ ):
return x + 2
class _snake_case ( unittest.TestCase ):
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = """x = 3"""
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result == 3
self.assertDictEqual(_lowerCamelCase , {"""x""": 3})
UpperCAmelCase__ : Optional[int] = """x = y"""
UpperCAmelCase__ : Optional[Any] = {"""y""": 5}
UpperCAmelCase__ : Dict = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 5, """y""": 5})
def snake_case__ ( self):
UpperCAmelCase__ : Any = """y = add_two(x)"""
UpperCAmelCase__ : Optional[Any] = {"""x""": 3}
UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5})
# Won't work without the tool
with CaptureStdout() as out:
UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result is None
assert "tried to execute add_two" in out.out
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """x = 3"""
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result == 3
self.assertDictEqual(_lowerCamelCase , {"""x""": 3})
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """test_dict = {'x': x, 'y': add_two(x)}"""
UpperCAmelCase__ : Any = {"""x""": 3}
UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5})
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}})
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = """x = 3\ny = 5"""
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5})
def snake_case__ ( self):
UpperCAmelCase__ : Dict = """text = f'This is x: {x}.'"""
UpperCAmelCase__ : str = {"""x""": 3}
UpperCAmelCase__ : Optional[Any] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """text""": """This is x: 3."""})
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """if x <= 3:\n y = 2\nelse:\n y = 5"""
UpperCAmelCase__ : Optional[Any] = {"""x""": 3}
UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 2})
UpperCAmelCase__ : Optional[int] = {"""x""": 8}
UpperCAmelCase__ : int = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 8, """y""": 5})
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """test_list = [x, add_two(x)]"""
UpperCAmelCase__ : int = {"""x""": 3}
UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , [3, 5])
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_list""": [3, 5]})
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = """y = x"""
UpperCAmelCase__ : Optional[Any] = {"""x""": 3}
UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result == 3
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 3})
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = """test_list = [x, add_two(x)]\ntest_list[1]"""
UpperCAmelCase__ : Union[str, Any] = {"""x""": 3}
UpperCAmelCase__ : int = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_list""": [3, 5]})
UpperCAmelCase__ : List[str] = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
UpperCAmelCase__ : Any = {"""x""": 3}
UpperCAmelCase__ : Dict = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}})
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = """x = 0\nfor i in range(3):\n x = i"""
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""range""": range} , state=_lowerCamelCase)
assert result == 2
self.assertDictEqual(_lowerCamelCase , {"""x""": 2, """i""": 2}) | 163 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=1_8 , __UpperCAmelCase=3_0 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=True , __UpperCAmelCase=3_2 , __UpperCAmelCase=True , ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = parent
lowerCAmelCase__ :Any = batch_size
lowerCAmelCase__ :Any = num_channels
lowerCAmelCase__ :str = image_size
lowerCAmelCase__ :Optional[Any] = min_resolution
lowerCAmelCase__ :Tuple = max_resolution
lowerCAmelCase__ :str = do_resize
lowerCAmelCase__ :Optional[Any] = size_divisor
lowerCAmelCase__ :List[Any] = do_rescale
def snake_case ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Tuple = GLPNImageProcessor if is_vision_available() else None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = GLPNImageProcessingTester(self )
@property
def snake_case ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'size_divisor' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'resample' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_rescale' ) )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ :int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase__ :List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase__ :List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 356 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__A = direct_transformers_import(PATH_TO_TRANSFORMERS)
__A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__A = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"config.{attribute}" in modeling_source
or F"getattr(config, \"{attribute}\"" in modeling_source
or F"getattr(self.config, \"{attribute}\"" in modeling_source
):
lowerCAmelCase__ :List[str] = True
# Deal with multi-line cases
elif (
re.search(
rF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , _SCREAMING_SNAKE_CASE , )
is not None
):
lowerCAmelCase__ :int = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCAmelCase__ :Any = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCAmelCase__ :Union[str, Any] = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
lowerCAmelCase__ :Union[str, Any] = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
lowerCAmelCase__ :Any = True
if not attribute_used:
lowerCAmelCase__ :List[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCAmelCase__ :List[str] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCAmelCase__ :Tuple = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCAmelCase__ :Optional[Any] = True
elif attribute.endswith('_token_id' ):
lowerCAmelCase__ :List[Any] = True
# configuration class specific cases
if not case_allowed:
lowerCAmelCase__ :List[str] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowerCAmelCase__ :List[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __A (_SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
lowerCAmelCase__ :List[Any] = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
lowerCAmelCase__ :List[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCAmelCase__ :Optional[Any] = {}
if len(config_class.attribute_map ) > 0:
lowerCAmelCase__ :Optional[int] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCAmelCase__ :str = inspect.getsourcefile(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = os.path.dirname(_SCREAMING_SNAKE_CASE )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCAmelCase__ :Dict = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for fn in os.listdir(_SCREAMING_SNAKE_CASE ) if fn.startswith('modeling_' )]
# Get the source code strings
lowerCAmelCase__ :Tuple = []
for path in modeling_paths:
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE ) as fp:
modeling_sources.append(fp.read() )
lowerCAmelCase__ :Any = []
for config_param, default_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# `attributes` here is all the variant names for `config_param`
lowerCAmelCase__ :Optional[int] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
unused_attributes.append(attributes[0] )
return sorted(_SCREAMING_SNAKE_CASE )
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCAmelCase__ :List[str] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _SCREAMING_SNAKE_CASE : inspect.isclass(_SCREAMING_SNAKE_CASE )
and issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and inspect.getmodule(_SCREAMING_SNAKE_CASE ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowerCAmelCase__ :Union[str, Any] = check_config_attributes_being_used(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase__ :int = unused_attributes
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase__ :Any = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += F"{name}: {attributes}\n"
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
check_config_attributes()
| 254 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ : str = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowercase__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 190 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class SCREAMING_SNAKE_CASE (datasets.BuilderConfig ):
lowerCAmelCase = None
class SCREAMING_SNAKE_CASE (datasets.ArrowBasedBuilder ):
lowerCAmelCase = PandasConfig
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}')
__A : Dict = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_UpperCAmelCase , (str, list, tuple)):
__A : Union[str, Any] = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Optional[Any] = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
__A : Tuple = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Optional[Any] = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files}))
return splits
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__A : List[str] = table_cast(_UpperCAmelCase , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase)):
with open(_UpperCAmelCase , 'rb') as f:
__A : Optional[int] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase))
yield i, self._cast_table(_UpperCAmelCase) | 190 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__snake_case = {
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 169 |
from __future__ import annotations
from statistics import mean
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = burst_time[i]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = -1
for i in range(UpperCamelCase_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
SCREAMING_SNAKE_CASE__ = i
total_time += burst_time[target_process]
completed += 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
for i in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case = 4
__snake_case = [2, 5, 3, 7]
__snake_case = [0, 0, 0, 0]
__snake_case = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 169 | 1 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowercase__ ( snake_case_ :ndarray ):
return np.dot(snake_case_ , snake_case_ )
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , *,
_lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ):
__UpperCAmelCase = regularization
__UpperCAmelCase = gamma
if kernel == "linear":
__UpperCAmelCase = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__UpperCAmelCase = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__UpperCAmelCase = F'''Unknown kernel: {kernel}'''
raise ValueError(_lowercase )
def a ( self : Dict , _lowercase : ndarray , _lowercase : ndarray ):
return np.dot(_lowercase , _lowercase )
def a ( self : Any , _lowercase : ndarray , _lowercase : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def a ( self : Union[str, Any] , _lowercase : list[ndarray] , _lowercase : ndarray ):
__UpperCAmelCase = observations
__UpperCAmelCase = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__UpperCAmelCase) , ) = np.shape(_lowercase )
def to_minimize(_lowercase : ndarray ) -> float:
__UpperCAmelCase = 0
((__UpperCAmelCase) , ) = np.shape(_lowercase )
for i in range(_lowercase ):
for j in range(_lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowercase )
__UpperCAmelCase = LinearConstraint(_lowercase , 0 , 0 )
__UpperCAmelCase = Bounds(0 , self.regularization )
__UpperCAmelCase = minimize(
_lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x
__UpperCAmelCase = l_star
# calculating mean offset of separation plane to points
__UpperCAmelCase = 0
for i in range(_lowercase ):
for j in range(_lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__UpperCAmelCase = s / n
def a ( self : List[Any] , _lowercase : ndarray ):
__UpperCAmelCase = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowercase__ ( ):
raise RuntimeError('''CUDA out of memory.''' )
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[Any] ):
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def a ( self : Optional[int] , _lowercase : Optional[Any] ):
return self.lineara(self.batchnorm(self.lineara(_lowercase ) ) )
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowercase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(_lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] )
def a ( self : Optional[int] ):
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowercase : str , _lowercase : List[str] ):
nonlocal batch_sizes
batch_sizes.append(_lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def a ( self : Tuple ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_lowercase : Optional[int] ):
pass
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def a ( self : List[Any] ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_lowercase : List[Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def a ( self : Union[str, Any] ):
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : str ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def a ( self : Dict ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_lowercase : int ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def a ( self : str ):
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _lowercase )
__UpperCAmelCase = release_memory(_lowercase )
self.assertEqual(torch.cuda.memory_allocated() , _lowercase )
| 332 | 1 |
"""simple docstring"""
SCREAMING_SNAKE_CASE = "Alexander Joslin"
import operator as op
from .stack import Stack
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
A__ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
A__ = Stack()
A__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase_ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase_ )
elif i == ")":
# RULE 4
A__ = operator_stack.peek()
operator_stack.pop()
A__ = operand_stack.peek()
operand_stack.pop()
A__ = operand_stack.peek()
operand_stack.pop()
A__ = operators[opr](lowercase_ , lowercase_ )
operand_stack.push(lowercase_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 230 |
"""simple docstring"""
SCREAMING_SNAKE_CASE = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
SCREAMING_SNAKE_CASE = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
SCREAMING_SNAKE_CASE = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
SCREAMING_SNAKE_CASE = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
SCREAMING_SNAKE_CASE = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
SCREAMING_SNAKE_CASE = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
SCREAMING_SNAKE_CASE = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
SCREAMING_SNAKE_CASE = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 230 | 1 |
"""simple docstring"""
import requests
_snake_case = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
# fetching a list of articles in json format
_a : Dict = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 294 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(UpperCamelCase__ ):
print(F"""{i}\t\t{d}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for j in range(UpperCamelCase__ ):
_a , _a , _a : List[str] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = [float("""inf""" )] * vertex_count
_a : Any = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
_a , _a , _a : List[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
_a : Any = distance[u] + w
_a : Union[str, Any] = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input('Enter number of vertices: ').strip())
_snake_case = int(input('Enter number of edges: ').strip())
_snake_case = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
_snake_case , _snake_case , _snake_case = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
_snake_case = {'src': src, 'dst': dest, 'weight': weight}
_snake_case = int(input('\nEnter shortest path source:').strip())
_snake_case = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 294 | 1 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def _snake_case ( _snake_case : list[int] , _snake_case : list[int] , _snake_case : int ) -> list[int]:
'''simple docstring'''
_A = [0] * no_of_processes
_A = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_snake_case ):
_A = burst_time[i]
_A = []
_A = 0
_A = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_A = []
_A = -1
for i in range(_snake_case ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_snake_case )
if len(_snake_case ) > 0:
_A = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_A = i
total_time += burst_time[target_process]
completed += 1
_A = 0
_A = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _snake_case ( _snake_case : list[int] , _snake_case : int , _snake_case : list[int] ) -> list[int]:
'''simple docstring'''
_A = [0] * no_of_processes
for i in range(_snake_case ):
_A = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
a = 4
a = [2, 5, 3, 7]
a = [0, 0, 0, 0]
a = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 271 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
a = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def _snake_case ( _snake_case : str , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple=None ) -> List[str]:
'''simple docstring'''
_A = XLNetConfig.from_json_file(_snake_case )
_A = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
_A = finetuning_task
_A = GLUE_TASKS_NUM_LABELS[finetuning_task]
_A = XLNetForSequenceClassification(_snake_case )
elif "squad" in finetuning_task:
_A = finetuning_task
_A = XLNetForQuestionAnswering(_snake_case )
else:
_A = XLNetLMHeadModel(_snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
_A = os.path.join(_snake_case , _snake_case )
_A = os.path.join(_snake_case , _snake_case )
print(F'''Save PyTorch model to {os.path.abspath(_snake_case )}''' )
torch.save(model.state_dict() , _snake_case )
print(F'''Save configuration file to {os.path.abspath(_snake_case )}''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
a = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 271 | 1 |
def lowercase_ (A : int = 2_0_0_0_0_0_0 ):
snake_case__ : Dict = [0 for i in range(n + 1 )]
snake_case__ : List[str] = 1
snake_case__ : List[str] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , A ):
snake_case__ : List[str] = 1
snake_case__ : List[Any] = 0
for i in range(A ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 277 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a_ :Tuple = logging.get_logger(__name__)
a_ :List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
a_ :Optional[int] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowercase_ (A : Union[str, Any] , A : str , A : Dict , A : Optional[Any] , A : Optional[Any] ):
for attribute in key.split('.' ):
snake_case__ : Any = getattr(A , A )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(A , A ).shape
else:
snake_case__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case__ : Tuple = value
elif weight_type == "weight_g":
snake_case__ : Tuple = value
elif weight_type == "weight_v":
snake_case__ : List[Any] = value
elif weight_type == "bias":
snake_case__ : List[Any] = value
else:
snake_case__ : Optional[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowercase_ (A : str , A : Any ):
snake_case__ : Union[str, Any] = []
snake_case__ : Union[str, Any] = fairseq_model.state_dict()
snake_case__ : Union[str, Any] = hf_model.feature_extractor
snake_case__ : Any = hf_model.adapter
for name, value in fairseq_dict.items():
snake_case__ : Any = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == 'group' , )
snake_case__ : List[Any] = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(A , A , A , A )
snake_case__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case__ : Tuple = True
if "*" in mapped_key:
snake_case__ : List[Any] = name.split(A )[0].split('.' )[-2]
snake_case__ : Optional[int] = mapped_key.replace('*' , A )
if "weight_g" in name:
snake_case__ : Optional[int] = 'weight_g'
elif "weight_v" in name:
snake_case__ : Optional[Any] = 'weight_v'
elif "bias" in name:
snake_case__ : Union[str, Any] = 'bias'
elif "weight" in name:
snake_case__ : Optional[int] = 'weight'
else:
snake_case__ : Tuple = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase_ (A : Union[str, Any] , A : Any , A : str , A : str , A : int ):
snake_case__ : str = full_name.split('conv_layers.' )[-1]
snake_case__ : Optional[int] = name.split('.' )
snake_case__ : Tuple = int(items[0] )
snake_case__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case__ : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case__ : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case__ : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
def lowercase_ (A : Optional[Any] , A : Any , A : Tuple , A : Any ):
snake_case__ : List[str] = full_name.split('adaptor.' )[-1]
snake_case__ : Tuple = name.split('.' )
if items[1].isdigit():
snake_case__ : Optional[int] = int(items[1] )
else:
snake_case__ : Any = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
snake_case__ : List[Any] = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
snake_case__ : int = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
snake_case__ : str = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
snake_case__ : Dict = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(A , A ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
snake_case__ : List[str] = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
snake_case__ : List[str] = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(A )
def lowercase_ (A : int ):
snake_case__ , snake_case__ : Union[str, Any] = emb.weight.shape
snake_case__ : int = nn.Linear(A , A , bias=A )
snake_case__ : Optional[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def lowercase_ (A : Tuple , A : Tuple , A : Any , A : Optional[Any] , A : int , A : Optional[Any] , A : Union[str, Any] , A : Union[str, Any] , A : Optional[Any] , A : List[Any] , A : Union[str, Any] , ):
snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained(
A , add_adapter=A , adapter_stride=A , adapter_kernel_size=A , use_auth_token=A , output_hidden_size=A , )
snake_case__ : Dict = MBartConfig.from_pretrained(A )
# load model
snake_case__ , snake_case__ , snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
snake_case__ : List[Any] = model[0].eval()
# load feature extractor
snake_case__ : str = WavaVecaFeatureExtractor.from_pretrained(A , use_auth_token=A )
# set weights for wav2vec2 encoder
snake_case__ : List[str] = WavaVecaModel(A )
recursively_load_weights_wavaveca(model.encoder , A )
# load decoder weights
snake_case__ : Any = MBartForCausalLM(A )
snake_case__ , snake_case__ : int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=A )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
snake_case__ : Union[str, Any] = SpeechEncoderDecoderModel(encoder=A , decoder=A )
snake_case__ : str = False
snake_case__ : int = MBartaaTokenizer(A )
tokenizer.save_pretrained(A )
snake_case__ : Any = hf_wavavec.config.to_dict()
snake_case__ : Tuple = tokenizer.pad_token_id
snake_case__ : Union[str, Any] = tokenizer.bos_token_id
snake_case__ : Dict = tokenizer.eos_token_id
snake_case__ : Optional[int] = 'mbart50'
snake_case__ : Union[str, Any] = 'wav2vec2'
snake_case__ : List[str] = tokenizer.eos_token_id
snake_case__ : Union[str, Any] = 2_5_0_0_0_4
snake_case__ : int = tokenizer.eos_token_id
snake_case__ : Union[str, Any] = SpeechEncoderDecoderConfig.from_dict(A )
hf_wavavec.save_pretrained(A )
feature_extractor.save_pretrained(A )
if __name__ == "__main__":
a_ :str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250_004, type=int, help="`decoder_start_token_id` of model config")
a_ :Union[str, Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 277 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = [0] * len(lowerCamelCase__ )
A_ : int = []
A_ : int = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase__ ) ):
if indegree[i] == 0:
queue.append(lowerCamelCase__ )
while queue:
A_ : Dict = queue.pop(0 )
cnt += 1
topo.append(lowerCamelCase__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase__ )
if cnt != len(lowerCamelCase__ ):
print("""Cycle exists""" )
else:
print(lowerCamelCase__ )
# Adjacency List of Graph
lowerCamelCase :Tuple = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph) | 135 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Dict = 10
def _a (self ):
A_ : str = [1, 2, 3, 4]
A_ : List[str] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A_ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A_ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : Any = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
A_, A_ : Optional[int] = process_story(lowercase )
self.assertEqual(lowercase , [] )
def _a (self ):
A_ : Union[str, Any] = """"""
A_, A_ : Union[str, Any] = process_story(lowercase )
self.assertEqual(lowercase , [] )
self.assertEqual(lowercase , [] )
def _a (self ):
A_ : List[Any] = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
A_, A_ : int = process_story(lowercase )
A_ : List[Any] = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(lowercase , lowercase )
A_ : Tuple = ["""It was the best of times."""]
self.assertEqual(lowercase , lowercase )
def _a (self ):
A_ : Dict = torch.tensor([1, 2, 3, 4] )
A_ : Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase , 0 ).numpy() , expected.numpy() )
def _a (self ):
A_ : Union[str, Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A_ : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 23 ).numpy() , expected.numpy() )
def _a (self ):
A_ : Optional[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A_ : Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 1 ).numpy() , expected.numpy() )
def _a (self ):
A_ : List[str] = 101
A_ : str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A_ : int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A_ : Optional[Any] = compute_token_type_ids(lowercase , lowercase )
np.testing.assert_array_equal(lowercase , lowercase ) | 135 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCamelCase__ : Optional[Any] = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCamelCase__ : List[str] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase__ : Dict = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCamelCase__ : Tuple = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_UpperCAmelCase )-1}""" )
if "norm" in key:
lowerCamelCase__ : str = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase__ : Dict = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCamelCase__ : str = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase__ : List[Any] = key[key.find('block' ) + len('block' )]
lowerCamelCase__ : int = key.replace(F"""block{idx}""" , F"""block.{int(_UpperCAmelCase )-1}""" )
if "attn.q" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCamelCase__ : Dict = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCamelCase__ : Dict = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCamelCase__ : Any = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCamelCase__ : Dict = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCamelCase__ : Tuple = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCamelCase__ : List[str] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase__ : Optional[Any] = key[key.find('linear_c' ) + len('linear_c' )]
lowerCamelCase__ : Dict = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_UpperCAmelCase )-1}""" )
if "bot_conv" in key:
lowerCamelCase__ : str = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCamelCase__ : List[Any] = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCamelCase__ : Optional[int] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCamelCase__ : List[Any] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCamelCase__ : str = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCamelCase__ : Dict = key.replace('module.last_layer_depth' , 'head.head' )
lowerCamelCase__ : str = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase__ : Any = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCamelCase__ : Optional[Any] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase__ : Optional[int] = kv_bias[: config.hidden_sizes[i]]
lowerCamelCase__ : Any = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase__ : Dict = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Tuple = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=None ) -> Optional[int]:
lowerCamelCase__ : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCamelCase__ : Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCamelCase__ : Any = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCamelCase__ : str = rename_keys(_UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase , _UpperCAmelCase )
# create HuggingFace model and load state dict
lowerCamelCase__ : Dict = GLPNForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
lowerCamelCase__ : List[str] = model(_UpperCAmelCase )
lowerCamelCase__ : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCamelCase__ : List[Any] = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCamelCase__ : List[str] = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
lowerCamelCase__ : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_UpperCAmelCase : int = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 50 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
@dataclass
class A :
'''simple docstring'''
A = 42
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def a_ (self ) -> Tuple:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A :
'''simple docstring'''
A = 42
A = 42
A = 0
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def __call__(self , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized
__UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized
__UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise Exception(
f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while"
f" destination module has {len(_UpperCAmelCase )}." )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ):
print(F"Converting {name}..." )
with torch.no_grad():
__UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval()
__UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval()
__UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ )
__UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(snake_case__ )
assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one."
__UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}"
print(snake_case__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , )
# we can use the convnext one
__UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , )
print(F"Pushed {checkpoint_name}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ):
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : Any = 1_000
__UpperCamelCase : List[str] = (1, num_labels)
__UpperCamelCase : List[str] = "huggingface/label-files"
__UpperCamelCase : str = num_labels
__UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Any = idalabel
__UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
__UpperCamelCase : Dict = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 298 | 0 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __snake_case ( lowerCamelCase__ ):
def __init__( self , snake_case__ = "▁" , snake_case__ = True , snake_case__ = "<unk>" , snake_case__ = "</s>" , snake_case__ = "<pad>" , ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict ={
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
UpperCAmelCase : Optional[int] =[None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase : Tuple =token_dict['''token''']
UpperCAmelCase : Any =Tokenizer(Unigram() )
UpperCAmelCase : Tuple =normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
UpperCAmelCase : Dict =pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=snake_case__ , add_prefix_space=snake_case__ ),
pre_tokenizers.Digits(individual_digits=snake_case__ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase : int =decoders.Metaspace(replacement=snake_case__ , add_prefix_space=snake_case__ )
UpperCAmelCase : List[Any] =TemplateProcessing(
single=f'''$A {self.special_tokens['eos']['token']}''' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
UpperCAmelCase : Dict ={
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = 8000 , snake_case__ = True , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict =trainers.UnigramTrainer(
vocab_size=snake_case__ , special_tokens=self.special_tokens_list , show_progress=snake_case__ , )
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Union[str, Any] =[files]
self._tokenizer.train(snake_case__ , trainer=snake_case__ )
self.add_unk_id()
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = 8000 , snake_case__ = True , ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple =trainers.UnigramTrainer(
vocab_size=snake_case__ , special_tokens=self.special_tokens_list , show_progress=snake_case__ , )
self._tokenizer.train_from_iterator(snake_case__ , trainer=snake_case__ )
self.add_unk_id()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str =json.loads(self._tokenizer.to_str() )
UpperCAmelCase : Optional[Any] =self.special_tokens['''unk''']['''id''']
UpperCAmelCase : Tuple =Tokenizer.from_str(json.dumps(snake_case__ ) )
| 350 | from __future__ import annotations
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> list[list[int]]:
'''simple docstring'''
UpperCAmelCase : list[list[int]] =[]
create_all_state(1 , __lowerCAmelCase , __lowerCAmelCase , [] , __lowerCAmelCase )
return result
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )-> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__lowerCAmelCase , total_number - level + 2 ):
current_list.append(__lowerCAmelCase )
create_all_state(i + 1 , __lowerCAmelCase , level - 1 , __lowerCAmelCase , __lowerCAmelCase )
current_list.pop()
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
for i in total_list:
print(*__lowerCAmelCase )
if __name__ == "__main__":
__snake_case = 4
__snake_case = 2
__snake_case = generate_all_combinations(n, k)
print_all_state(total_list)
| 78 | 0 |
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case_ = False
snake_case_ = logging.get_logger(__name__)
snake_case_ = """ybelkada/fonts"""
def _lowerCAmelCase ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
'Pix2StructImageProcessor. Please upgrade torch.' )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
requires_backends(lowercase_ , ['torch'] )
_check_torch_version()
UpperCAmelCase = image_tensor.unsqueeze(0 )
UpperCAmelCase = torch.nn.functional.unfold(lowercase_ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCAmelCase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowercase_ , lowercase_ , -1 )
UpperCAmelCase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _lowerCAmelCase ( lowercase_ , lowercase_ = 36 , lowercase_ = "black" , lowercase_ = "white" , lowercase_ = 5 , lowercase_ = 5 , lowercase_ = 5 , lowercase_ = 5 , lowercase_ = None , lowercase_ = None , ):
requires_backends(lowercase_ , 'vision' )
# Add new lines so that each line is no more than 80 characters.
UpperCAmelCase = textwrap.TextWrapper(width=80 )
UpperCAmelCase = wrapper.wrap(text=lowercase_ )
UpperCAmelCase = '\n'.join(lowercase_ )
if font_bytes is not None and font_path is None:
UpperCAmelCase = io.BytesIO(lowercase_ )
elif font_path is not None:
UpperCAmelCase = font_path
else:
UpperCAmelCase = hf_hub_download(lowercase_ , 'Arial.TTF' )
UpperCAmelCase = ImageFont.truetype(lowercase_ , encoding='UTF-8' , size=lowercase_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCAmelCase = ImageDraw.Draw(Image.new('RGB' , (1, 1) , lowercase_ ) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = temp_draw.textbbox((0, 0) , lowercase_ , lowercase_ )
# Create the actual image with a bit of padding around the text.
UpperCAmelCase = text_width + left_padding + right_padding
UpperCAmelCase = text_height + top_padding + bottom_padding
UpperCAmelCase = Image.new('RGB' , (image_width, image_height) , lowercase_ )
UpperCAmelCase = ImageDraw.Draw(lowercase_ )
draw.text(xy=(left_padding, top_padding) , text=lowercase_ , fill=lowercase_ , font=lowercase_ )
return image
def _lowerCAmelCase ( lowercase_ , lowercase_ , **lowercase_ ):
requires_backends(lowercase_ , 'vision' )
# Convert to PIL image if necessary
UpperCAmelCase = to_pil_image(lowercase_ )
UpperCAmelCase = render_text(lowercase_ , **lowercase_ )
UpperCAmelCase = max(header_image.width , image.width )
UpperCAmelCase = int(image.height * (new_width / image.width) )
UpperCAmelCase = int(header_image.height * (new_width / header_image.width) )
UpperCAmelCase = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCAmelCase = to_numpy_array(lowercase_ )
if infer_channel_dimension_format(lowercase_ ) == ChannelDimension.LAST:
UpperCAmelCase = to_channel_dimension_format(lowercase_ , ChannelDimension.LAST )
return new_image
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""flattened_patches"""]
def __init__( self :str , lowercase_ :bool = True , lowercase_ :bool = True , lowercase_ :Dict[str, int] = None , lowercase_ :int = 20_48 , lowercase_ :bool = False , **lowercase_ :Optional[Any] , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase = patch_size if patch_size is not None else {'height': 16, 'width': 16}
UpperCAmelCase = do_normalize
UpperCAmelCase = do_convert_rgb
UpperCAmelCase = max_patches
UpperCAmelCase = is_vqa
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :np.ndarray , lowercase_ :int , lowercase_ :dict , **lowercase_ :str ) -> np.ndarray:
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
UpperCAmelCase = to_channel_dimension_format(lowercase_ , ChannelDimension.FIRST )
UpperCAmelCase = torch.from_numpy(lowercase_ )
UpperCAmelCase , UpperCAmelCase = patch_size['height'], patch_size['width']
UpperCAmelCase , UpperCAmelCase = get_image_size(lowercase_ )
# maximize scale s.t.
UpperCAmelCase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCAmelCase = max(min(math.floor(scale * image_height / patch_height ) , lowercase_ ) , 1 )
UpperCAmelCase = max(min(math.floor(scale * image_width / patch_width ) , lowercase_ ) , 1 )
UpperCAmelCase = max(num_feasible_rows * patch_height , 1 )
UpperCAmelCase = max(num_feasible_cols * patch_width , 1 )
UpperCAmelCase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=lowercase_ , antialias=lowercase_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCAmelCase = torch_extract_patches(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = patches.shape
UpperCAmelCase = patches_shape[1]
UpperCAmelCase = patches_shape[2]
UpperCAmelCase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCAmelCase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCAmelCase = torch.arange(lowercase_ ).reshape([rows, 1] ).repeat(1 , lowercase_ ).reshape([rows * columns, 1] )
UpperCAmelCase = torch.arange(lowercase_ ).reshape([1, columns] ).repeat(lowercase_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCAmelCase = row_ids.to(torch.floataa )
UpperCAmelCase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCAmelCase = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCAmelCase = torch.nn.functional.pad(lowercase_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCAmelCase = to_numpy_array(lowercase_ )
return result
def UpperCAmelCase__ ( self :int , lowercase_ :np.ndarray , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Any ) -> np.ndarray:
if image.dtype == np.uinta:
UpperCAmelCase = image.astype(np.floataa )
# take mean across the whole `image`
UpperCAmelCase = np.mean(lowercase_ )
UpperCAmelCase = np.std(lowercase_ )
UpperCAmelCase = max(lowercase_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Dict , lowercase_ :ImageInput , lowercase_ :Optional[str] = None , lowercase_ :bool = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[int] = None , lowercase_ :Optional[Dict[str, int]] = None , lowercase_ :Optional[Union[str, TensorType]] = None , lowercase_ :ChannelDimension = ChannelDimension.FIRST , **lowercase_ :List[str] , ) -> ImageInput:
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase = patch_size if patch_size is not None else self.patch_size
UpperCAmelCase = max_patches if max_patches is not None else self.max_patches
UpperCAmelCase = self.is_vqa
if kwargs.get('data_format' , lowercase_ ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
UpperCAmelCase = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
UpperCAmelCase = kwargs.pop('font_bytes' , lowercase_ )
UpperCAmelCase = kwargs.pop('font_path' , lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [header_text] * len(lowercase_ )
UpperCAmelCase = [
render_header(lowercase_ , header_text[i] , font_bytes=lowercase_ , font_path=lowercase_ )
for i, image in enumerate(lowercase_ )
]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase_ ) for image in images]
# convert to torch tensor and permute
UpperCAmelCase = [
self.extract_flattened_patches(image=lowercase_ , max_patches=lowercase_ , patch_size=lowercase_ )
for image in images
]
# create attention mask in numpy
UpperCAmelCase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCAmelCase = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=lowercase_ )
return encoded_outputs
| 78 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""image_processor""", """tokenizer"""]
__UpperCamelCase = """LayoutLMv2ImageProcessor"""
__UpperCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self :Any , lowercase_ :int=None , lowercase_ :Union[str, Any]=None , **lowercase_ :Optional[Any] ) -> Dict:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
UpperCAmelCase = kwargs.pop('feature_extractor' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :str , lowercase_ :Optional[int] , lowercase_ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ :Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ :Optional[Union[List[int], List[List[int]]]] = None , lowercase_ :bool = True , lowercase_ :Union[bool, str, PaddingStrategy] = False , lowercase_ :Union[bool, str, TruncationStrategy] = None , lowercase_ :Optional[int] = None , lowercase_ :int = 0 , lowercase_ :Optional[int] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = True , lowercase_ :Optional[Union[str, TensorType]] = None , **lowercase_ :Any , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features['words']
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
UpperCAmelCase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] )
UpperCAmelCase = images
return encoded_inputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :List[Any] , lowercase_ :Any ) -> Optional[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(lowercase_ )} and {len(lowercase_ )}""" )
return images_with_overflow
def UpperCAmelCase__ ( self :Any , *lowercase_ :int , **lowercase_ :Tuple ) -> Tuple:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , *lowercase_ :List[Any] , **lowercase_ :Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self :int ) -> Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 78 | 1 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCAmelCase = None
try:
import msvcrt
except ImportError:
__lowerCAmelCase = None
try:
import fcntl
except ImportError:
__lowerCAmelCase = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCAmelCase = OSError
# Data
# ------------------------------------------------
__lowerCAmelCase = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
__lowerCAmelCase = """3.0.12"""
__lowerCAmelCase = None
def UpperCAmelCase_ ():
"""simple docstring"""
global _logger
_a : Dict = _logger or logging.getLogger(__name__ )
return _logger
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Any ,_a : Optional[Any] ):
'''simple docstring'''
_a : Dict = lock_file
return None
def __str__( self : Any ):
'''simple docstring'''
_a : List[Any] = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] ,_a : List[str] ):
'''simple docstring'''
_a : List[str] = lock
return None
def __enter__( self : Union[str, Any] ):
'''simple docstring'''
return self.lock
def __exit__( self : Dict ,_a : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
self.lock.release()
return None
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,_a : Optional[Any] ,_a : int=-1 ,_a : str=None ):
'''simple docstring'''
_a : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
_a : Dict = self.hash_filename_if_too_long(_a ,_a )
# The path to the lock file.
_a : List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a : List[str] = None
# The default timeout value.
_a : List[str] = timeout
# We use this lock primarily for the lock counter.
_a : int = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a : str = 0
return None
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return self._lock_file
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return self._timeout
@timeout.setter
def __lowercase ( self : List[Any] ,_a : Any ):
'''simple docstring'''
_a : Union[str, Any] = float(_a )
return None
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self._lock_file_fd is not None
def __lowercase ( self : str ,_a : Optional[int]=None ,_a : Optional[Any]=0.05 ):
'''simple docstring'''
if timeout is None:
_a : str = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a : str = id(self )
_a : int = self._lock_file
_a : Dict = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(_a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a : Tuple = max(0 ,self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __lowercase ( self : Optional[int] ,_a : Tuple=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a : int = id(self )
_a : Union[str, Any] = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
_a : Optional[Any] = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self : List[Any] ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self : List[str] ,_a : str ,_a : Any ,_a : Any ):
'''simple docstring'''
self.release()
return None
def __del__( self : int ):
'''simple docstring'''
self.release(force=_a )
return None
def __lowercase ( self : Optional[int] ,_a : str ,_a : int ):
'''simple docstring'''
_a : Any = os.path.basename(_a )
if len(_a ) > max_length and max_length > 0:
_a : List[str] = os.path.dirname(_a )
_a : Optional[Any] = str(hash(_a ) )
_a : List[str] = filename[: max_length - len(_a ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(_a ,_a )
else:
return path
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,_a : Optional[Any] ,_a : List[str]=-1 ,_a : int=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(_a ,timeout=_a ,max_filename_length=_a )
_a : Any = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a : int = os.open(self._lock_file ,_a )
except OSError:
pass
else:
try:
msvcrt.locking(_a ,msvcrt.LK_NBLCK ,1 )
except OSError:
os.close(_a )
else:
_a : List[Any] = fd
return None
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = self._lock_file_fd
_a : Any = None
msvcrt.locking(_a ,msvcrt.LK_UNLCK ,1 )
os.close(_a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,_a : Union[str, Any] ,_a : List[str]=-1 ,_a : Any=None ):
'''simple docstring'''
_a : Optional[int] = os.statvfs(os.path.dirname(_a ) ).f_namemax
super().__init__(_a ,timeout=_a ,max_filename_length=_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a : Optional[Any] = os.open(self._lock_file ,_a )
try:
fcntl.flock(_a ,fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_a )
else:
_a : Tuple = fd
return None
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[int] = self._lock_file_fd
_a : Union[str, Any] = None
fcntl.flock(_a ,fcntl.LOCK_UN )
os.close(_a )
return None
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a : List[Any] = os.open(self._lock_file ,_a )
except OSError:
pass
else:
_a : List[str] = fd
return None
def __lowercase ( self : int ):
'''simple docstring'''
os.close(self._lock_file_fd )
_a : Tuple = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCAmelCase = None
if msvcrt:
__lowerCAmelCase = WindowsFileLock
elif fcntl:
__lowerCAmelCase = UnixFileLock
else:
__lowerCAmelCase = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 5 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 1 |
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase : Optional[int] = 1
for i in range(1 ,n + 1 ):
# to compute current row from previous row.
lowerCamelCase : List[str] = min(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 48 | """simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 213 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowerCAmelCase :str = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A , A , A , **A ) -> Tuple:
_UpperCAmelCase : Optional[Any] = feature_size
_UpperCAmelCase : Optional[int] = sampling_rate
_UpperCAmelCase : int = padding_value
_UpperCAmelCase : Any = kwargs.pop('''padding_side''' , '''right''' )
_UpperCAmelCase : Tuple = kwargs.pop('''return_attention_mask''' , A )
super().__init__(**A )
def __lowerCAmelCase ( self , A , A = True , A = None , A = False , A = None , A = None , A = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_UpperCAmelCase : int = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
_UpperCAmelCase : Any = processed_features[self.model_input_names[0]]
_UpperCAmelCase : str = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A ) == 0:
if return_attention_mask:
_UpperCAmelCase : Tuple = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_UpperCAmelCase : Dict = required_input[0]
if isinstance(A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_UpperCAmelCase : Dict = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A ):
_UpperCAmelCase : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A ):
_UpperCAmelCase : int = '''tf'''
elif is_torch_tensor(A ):
_UpperCAmelCase : Optional[Any] = '''pt'''
elif isinstance(A , (int, float, list, tuple, np.ndarray) ):
_UpperCAmelCase : Dict = '''np'''
else:
raise ValueError(
f'type of {first_element} unknown: {type(A )}. '
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_UpperCAmelCase : Dict = to_numpy(A )
else:
_UpperCAmelCase : Union[str, Any] = [to_numpy(A ) for v in value]
# Convert padding_strategy in PaddingStrategy
_UpperCAmelCase : Optional[Any] = self._get_padding_strategies(padding=A , max_length=A )
_UpperCAmelCase : Tuple = processed_features[self.model_input_names[0]]
_UpperCAmelCase : Union[str, Any] = len(A )
if not all(len(A ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
_UpperCAmelCase : Tuple = []
for i in range(A ):
_UpperCAmelCase : Dict = {k: v[i] for k, v in processed_features.items()}
# truncation
_UpperCAmelCase : Dict = self._truncate(
A , max_length=A , pad_to_multiple_of=A , truncation=A , )
truncated_inputs.append(A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_UpperCAmelCase : Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_UpperCAmelCase : Optional[Any] = PaddingStrategy.MAX_LENGTH
_UpperCAmelCase : Any = {}
for i in range(A ):
# padding
_UpperCAmelCase : List[Any] = self._pad(
truncated_inputs[i] , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
for key, value in outputs.items():
if key not in batch_outputs:
_UpperCAmelCase : Optional[int] = []
if value.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Union[str, Any] = value.astype(np.floataa )
batch_outputs[key].append(A )
return BatchFeature(A , tensor_type=A )
def __lowerCAmelCase ( self , A , A = None , A = PaddingStrategy.DO_NOT_PAD , A = None , A = None , ) -> dict:
_UpperCAmelCase : Optional[int] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_UpperCAmelCase : int = len(A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCAmelCase : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCAmelCase : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_UpperCAmelCase : Optional[int] = np.ones(len(A ) , dtype=np.intaa )
if needs_to_be_padded:
_UpperCAmelCase : List[str] = max_length - len(A )
if self.padding_side == "right":
if return_attention_mask:
_UpperCAmelCase : Optional[int] = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
_UpperCAmelCase : Tuple = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_UpperCAmelCase : List[Any] = np.pad(
A , A , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_UpperCAmelCase : Optional[Any] = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
_UpperCAmelCase : Tuple = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_UpperCAmelCase : Optional[int] = np.pad(
A , A , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def __lowerCAmelCase ( self , A , A = None , A = None , A = None , ) -> Any:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
_UpperCAmelCase : List[str] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCAmelCase : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCAmelCase : Union[str, Any] = len(A ) > max_length
if needs_to_be_truncated:
_UpperCAmelCase : Optional[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_UpperCAmelCase : int = processed_features['''attention_mask'''][:max_length]
return processed_features
def __lowerCAmelCase ( self , A=False , A=None ) -> List[Any]:
# Get padding strategy
if padding is not False:
if padding is True:
_UpperCAmelCase : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A , A ):
_UpperCAmelCase : Tuple = PaddingStrategy(A )
elif isinstance(A , A ):
_UpperCAmelCase : Optional[Any] = padding
else:
_UpperCAmelCase : Optional[int] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 68 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase :Optional[Any] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 68 | 1 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Dict ):
'''simple docstring'''
snake_case_ = torch.load(snake_case , map_location="cpu" )
snake_case_ = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
snake_case_ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
snake_case_ = v
else:
snake_case_ = v
snake_case_ = chkpt["params"]
snake_case_ = {n: v for n, v in config.items() if not isinstance(snake_case , (torch.FloatTensor, numpy.ndarray) )}
snake_case_ = chkpt["dico_word2id"]
snake_case_ = {s + "</w>" if s.find("@@" ) == -1 and i > 1_3 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
snake_case_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
snake_case_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
snake_case_ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(snake_case , snake_case )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , indent=2 ) + "\n" )
print(f'Save vocab file to {pytorch_config_dump_path}' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , indent=2 ) + "\n" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 85 |
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
while b:
a , a :Optional[Any] = b, a % b
return a
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase_ , a % b )
def __lowerCamelCase ( ):
"""simple docstring"""
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 94 | 0 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class a__ ( snake_case__ ):
_a : str = """xlm-prophetnet"""
_a : Dict = ["""past_key_values"""]
_a : Any = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self , _A = 0.1 , _A = "gelu" , _A = 3_0_5_2_2 , _A = 1_0_2_4 , _A = 4_0_9_6 , _A = 1_2 , _A = 1_6 , _A = 4_0_9_6 , _A = 1_2 , _A = 1_6 , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 0.02 , _A = True , _A = True , _A = 0 , _A = 2 , _A = 3_2 , _A = 1_2_8 , _A = False , _A = 0.0 , _A = True , _A = 0 , _A = 1 , _A = 2 , **_A , ):
"""simple docstring"""
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = num_encoder_layers
__lowerCAmelCase = num_encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = num_decoder_layers
__lowerCAmelCase = num_decoder_attention_heads
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = init_std # Normal(0, this parameter)
__lowerCAmelCase = activation_function
# parameters for xlmprophetnet
__lowerCAmelCase = ngram
__lowerCAmelCase = num_buckets
__lowerCAmelCase = relative_max_distance
__lowerCAmelCase = disable_ngram_loss
__lowerCAmelCase = eps
# 3 Types of Dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = dropout
__lowerCAmelCase = use_cache
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , add_cross_attention=_A , decoder_start_token_id=_A , **_A , )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 364 |
import enum
import shutil
import sys
UpperCamelCase__ , UpperCamelCase__ = shutil.get_terminal_size()
UpperCamelCase__ = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class a__ ( enum.Enum ):
_a : Any = 0
_a : Dict = 1
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict="" ):
sys.stdout.write(str(SCREAMING_SNAKE_CASE_ ) + end )
sys.stdout.flush()
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str="" ):
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , SCREAMING_SNAKE_CASE_ )
def _a ( ):
forceWrite("\r" )
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ):
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def _a ( ):
forceWrite(" " * TERMINAL_WIDTH )
reset_cursor()
def _a ( ):
reset_cursor()
forceWrite("-" * TERMINAL_WIDTH )
| 102 | 0 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = VQModel
UpperCAmelCase__ : str = "sample"
@property
def _a ( self , A_=(32, 32) ) -> str:
__UpperCamelCase =4
__UpperCamelCase =3
__UpperCamelCase =floats_tensor((batch_size, num_channels) + sizes ).to(A_ )
return {"sample": image}
@property
def _a ( self ) -> str:
return (3, 32, 32)
@property
def _a ( self ) -> Optional[int]:
return (3, 32, 32)
def _a ( self ) -> Optional[Any]:
__UpperCamelCase ={
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
__UpperCamelCase =self.dummy_input
return init_dict, inputs_dict
def _a ( self ) -> str:
pass
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(A_ )
__UpperCamelCase =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _a ( self ) -> Tuple:
__UpperCamelCase =VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(A_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__UpperCamelCase =torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__UpperCamelCase =image.to(A_ )
with torch.no_grad():
__UpperCamelCase =model(A_ ).sample
__UpperCamelCase =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__UpperCamelCase =torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) )
| 62 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _snake_case :
def __init__( self , a , a=13 , a=64 , a=2 , a=3 , a=True , a=True , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=10 , a=0.02 , a=[1, 16, 4, 4] , a=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=a , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> int:
SCREAMING_SNAKE_CASE = ViTHybridModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[Any]:
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
_lowercase : str = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
_lowercase : int = False
_lowercase : Any = False
_lowercase : Dict = False
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear))
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(a)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=a)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> str:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(a)
self.assertIsNotNone(a)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
a)
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=a , return_tensors='pt').to(a)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**a)
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , a)
SCREAMING_SNAKE_CASE = torch.tensor([-1.90_90, -0.49_93, -0.23_89]).to(a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4))
@slow
@require_accelerate
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384')
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto')
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=a , return_tensors='pt')
SCREAMING_SNAKE_CASE = model(**a)
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat')
| 137 | 0 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = min(SCREAMING_SNAKE_CASE__ ) # min() finds the minimum value
snake_case : int = max(SCREAMING_SNAKE_CASE__ ) # max() finds the maximum value
snake_case : List[Any] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
snake_case : List[str] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
snake_case : Dict = 0
for count in range(SCREAMING_SNAKE_CASE__ ):
while holes[count] > 0:
holes[count] -= 1
snake_case : int = count + min_val
i += 1
def _UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(SCREAMING_SNAKE_CASE__ )
print('''Sorted order is:''' , ''' '''.join(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main()
| 83 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Any=[10, 20, 30, 40] , UpperCamelCase__ : Any=[1, 1, 2, 1] , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str="relu" , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Tuple=None , ) -> List[str]:
"""simple docstring"""
snake_case : List[str] = parent
snake_case : Tuple = batch_size
snake_case : int = image_size
snake_case : Any = num_channels
snake_case : Optional[int] = embeddings_size
snake_case : Optional[int] = hidden_sizes
snake_case : str = depths
snake_case : Tuple = is_training
snake_case : List[str] = use_labels
snake_case : List[str] = hidden_act
snake_case : Tuple = num_labels
snake_case : Tuple = scope
snake_case : List[str] = len(UpperCamelCase__ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Any = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
snake_case : List[str] = FlaxRegNetModel(config=UpperCamelCase__ )
snake_case : str = model(UpperCamelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
snake_case : int = self.num_labels
snake_case : List[str] = FlaxRegNetForImageClassification(config=UpperCamelCase__ )
snake_case : Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
snake_case : str = self.prepare_config_and_inputs()
snake_case ,snake_case : Tuple = config_and_inputs
snake_case : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def lowerCAmelCase ( self : List[str] ) -> None:
"""simple docstring"""
snake_case : List[str] = FlaxRegNetModelTester(self )
snake_case : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
snake_case ,snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
snake_case : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : int = [*signature.parameters.keys()]
snake_case : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
snake_case : Any = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
snake_case ,snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Tuple = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : List[str] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case ,snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case : Any = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = model_class(UpperCamelCase__ )
@jax.jit
def model_jitted(UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ):
return model(pixel_values=UpperCamelCase__ , **UpperCamelCase__ )
with self.subTest('''JIT Enabled''' ):
snake_case : Optional[int] = model_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case : Tuple = model_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
snake_case : str = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
snake_case : Any = self.default_image_processor
snake_case : Any = prepare_img()
snake_case : Union[str, Any] = image_processor(images=UpperCamelCase__ , return_tensors='''np''' )
snake_case : List[str] = model(**UpperCamelCase__ )
# verify the logits
snake_case : Optional[int] = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
snake_case : Dict = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 83 | 1 |