code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''ViTFeatureExtractor''']
__lowercase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43 | import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : int = None
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :str = add_prefix_space
__UpperCamelCase :List[str] = pre_tok_class(**__lowercase)
__UpperCamelCase :Tuple = add_prefix_space
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id])
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :Any = input_ids[-self.model_max_length :]
return input_ids
| 43 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : int = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :List[Any] = '''gpt_bigcode'''
lowerCamelCase_ :Any = ['''past_key_values''']
lowerCamelCase_ :Optional[Any] = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , snake_case_=5_0_2_5_7 , snake_case_=1_0_2_4 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=None , snake_case_="gelu_pytorch_tanh" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_=True , snake_case_=True , snake_case_=5_0_2_5_6 , snake_case_=5_0_2_5_6 , snake_case_=True , snake_case_=True , snake_case_=True , **snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Tuple = n_positions
UpperCAmelCase_ : Any = n_embd
UpperCAmelCase_ : int = n_layer
UpperCAmelCase_ : Optional[int] = n_head
UpperCAmelCase_ : str = n_inner
UpperCAmelCase_ : str = activation_function
UpperCAmelCase_ : Union[str, Any] = resid_pdrop
UpperCAmelCase_ : Union[str, Any] = embd_pdrop
UpperCAmelCase_ : List[Any] = attn_pdrop
UpperCAmelCase_ : Optional[Any] = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Any = scale_attn_weights
UpperCAmelCase_ : List[str] = use_cache
UpperCAmelCase_ : Optional[Any] = attention_softmax_in_fpaa
UpperCAmelCase_ : List[str] = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : Any = multi_query
UpperCAmelCase_ : str = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
| 274 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
snake_case__ : Dict = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :str = '''albert'''
def __init__( self , snake_case_=3_0_0_0_0 , snake_case_=1_2_8 , snake_case_=4_0_9_6 , snake_case_=1_2 , snake_case_=1 , snake_case_=6_4 , snake_case_=1_6_3_8_4 , snake_case_=1 , snake_case_="gelu_new" , snake_case_=0 , snake_case_=0 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.1 , snake_case_="absolute" , snake_case_=0 , snake_case_=2 , snake_case_=3 , **snake_case_ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Dict = embedding_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_hidden_groups
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : Any = inner_group_num
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : Dict = type_vocab_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Dict = classifier_dropout_prob
UpperCAmelCase_ : Tuple = position_embedding_type
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase_ : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 274 | 1 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCAmelCase = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
__lowerCAmelCase = F"""{src_lang}-{tgt_lang}"""
__lowerCAmelCase = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase)
__lowerCAmelCase = os.path.join(lowerCamelCase, '''README.md''')
print(F"""Generating {path}""")
with open(lowerCamelCase, '''w''', encoding='''utf-8''') as f:
f.write(lowerCamelCase)
# make sure we are under the root of the project
_UpperCAmelCase : Dict = Path(__file__).resolve().parent.parent.parent
_UpperCAmelCase : Optional[int] = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = model_name.split("""-""")
_UpperCAmelCase : Union[str, Any] = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 174 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_UpperCAmelCase : int = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCamelCase : Optional[datasets.Features] = None
def __magic_name__( lowerCamelCase, lowerCamelCase, ):
import pyspark
def generate_fn():
__lowerCAmelCase = df.select('''*''', pyspark.sql.functions.spark_partition_id().alias('''part_id'''))
for partition_id in partition_order:
__lowerCAmelCase = df_with_partition_id.select('''*''').where(F"""part_id = {partition_id}""").drop('''part_id''')
__lowerCAmelCase = partition_df.collect()
__lowerCAmelCase = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class a__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , ):
__lowerCAmelCase = df
__lowerCAmelCase = partition_order or range(self.df.rdd.getNumPartitions() )
__lowerCAmelCase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__(self ):
yield from self.generate_examples_fn()
def _snake_case (self , __lowercase ):
__lowerCAmelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__lowercase )
return SparkExamplesIterable(self.df , partition_order=__lowercase )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = self.split_shard_indices_by_worker(__lowercase , __lowercase )
return SparkExamplesIterable(self.df , partition_order=__lowercase )
@property
def _snake_case (self ):
return len(self.partition_order )
class a__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCamelCase : int = SparkConfig
def __init__(self , __lowercase , __lowercase = None , __lowercase = None , **__lowercase , ):
import pyspark
__lowerCAmelCase = pyspark.sql.SparkSession.builder.getOrCreate()
__lowerCAmelCase = df
__lowerCAmelCase = working_dir
super().__init__(
cache_dir=__lowercase , config_name=str(self.df.semanticHash() ) , **__lowercase , )
def _snake_case (self ):
# Returns the path of the created file.
def create_cache_and_write_probe(__lowercase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__lowercase )
__lowerCAmelCase = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__lowercase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__lowerCAmelCase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__lowercase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def _snake_case (self ):
return datasets.DatasetInfo(features=self.config.features )
def _snake_case (self , __lowercase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _snake_case (self , __lowercase ):
import pyspark
def get_arrow_batch_size(__lowercase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
__lowerCAmelCase = self.df.count()
__lowerCAmelCase = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__lowerCAmelCase = (
self.df.limit(__lowercase )
.repartition(1 )
.mapInArrow(__lowercase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__lowerCAmelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__lowerCAmelCase = min(__lowercase , int(approx_total_size / max_shard_size ) )
__lowerCAmelCase = self.df.repartition(__lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase , ):
import pyspark
__lowerCAmelCase = ParquetWriter if file_format == '''parquet''' else ArrowWriter
__lowerCAmelCase = os.path.join(self._working_dir , os.path.basename(__lowercase ) ) if self._working_dir else fpath
__lowerCAmelCase = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__lowerCAmelCase = self.config.features
__lowerCAmelCase = self._writer_batch_size
__lowerCAmelCase = self._fs.storage_options
def write_arrow(__lowercase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__lowerCAmelCase = pyspark.TaskContext().taskAttemptId()
__lowerCAmelCase = next(__lowercase , __lowercase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
__lowerCAmelCase = 0
__lowerCAmelCase = writer_class(
features=__lowercase , path=working_fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , writer_batch_size=__lowercase , storage_options=__lowercase , embed_local_files=__lowercase , )
__lowerCAmelCase = pa.Table.from_batches([first_batch] )
writer.write_table(__lowercase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
__lowerCAmelCase = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , writer_batch_size=__lowercase , storage_options=__lowercase , embed_local_files=__lowercase , )
__lowerCAmelCase = pa.Table.from_batches([batch] )
writer.write_table(__lowercase )
if writer._num_bytes > 0:
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__lowercase ) ):
__lowerCAmelCase = os.path.join(os.path.dirname(__lowercase ) , os.path.basename(__lowercase ) )
shutil.move(__lowercase , __lowercase )
__lowerCAmelCase = (
self.df.mapInArrow(__lowercase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _snake_case (self , __lowercase , __lowercase = "arrow" , __lowercase = None , __lowercase = None , **__lowercase , ):
self._validate_cache_dir()
__lowerCAmelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__lowercase )
__lowerCAmelCase = not is_remote_filesystem(self._fs )
__lowerCAmelCase = os.path.join if is_local else posixpath.join
__lowerCAmelCase = '''-TTTTT-SSSSS-of-NNNNN'''
__lowerCAmelCase = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
__lowerCAmelCase = path_join(self._output_dir , __lowercase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = []
__lowerCAmelCase = []
for task_id, content in self._prepare_split_single(__lowercase , __lowercase , __lowercase ):
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__lowercase )
__lowerCAmelCase = total_num_examples
__lowerCAmelCase = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
__lowerCAmelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__lowerCAmelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowercase , __lowercase , __lowercase , ):
rename(
__lowercase , fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , fpath.replace('''TTTTT-SSSSS''' , F"""{global_shard_id:05d}""" ).replace('''NNNNN''' , F"""{total_shards:05d}""" ) , )
__lowerCAmelCase = []
__lowerCAmelCase = 0
for i in range(len(__lowercase ) ):
__lowerCAmelCase , __lowerCAmelCase = task_id_and_num_shards[i]
for shard_id in range(__lowercase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__lowercase , len(__lowercase ) ).map(lambda __lowercase : _rename_shard(*__lowercase ) ).collect()
else:
# don't use any pattern
__lowerCAmelCase = 0
__lowerCAmelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , fpath.replace(__lowercase , '''''' ) , )
def _snake_case (self , __lowercase , ):
return SparkExamplesIterable(self.df )
| 174 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['''image_processor''', '''tokenizer''']
snake_case = '''BlipImageProcessor'''
snake_case = '''AutoTokenizer'''
def __init__( self : str , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Any ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
# add QFormer tokenizer
_A = qformer_tokenizer
def __call__( self : List[Any] , __UpperCAmelCase : ImageInput = None , __UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , **__UpperCAmelCase : Union[str, Any] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_A = BatchFeature()
if text is not None:
_A = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
encoding.update(__UpperCAmelCase )
_A = self.qformer_tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
_A = qformer_text_encoding.pop("input_ids" )
_A = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_A = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase )
encoding.update(__UpperCAmelCase )
return encoding
def lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Any , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
'''simple docstring'''
if os.path.isfile(__UpperCAmelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_A = os.path.join(__UpperCAmelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__UpperCAmelCase )
return super().save_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def lowerCAmelCase ( cls : Tuple , __UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Optional[int] ):
'''simple docstring'''
_A = AutoTokenizer.from_pretrained(__UpperCAmelCase , subfolder="qformer_tokenizer" )
_A = cls._get_arguments_from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
args.append(__UpperCAmelCase )
return cls(*__UpperCAmelCase )
| 174 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def __lowercase ( __lowercase = 150_0000 ) -> int:
'''simple docstring'''
_A = defaultdict(__lowercase )
_A = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __lowercase , 2 ):
if gcd(__lowercase , __lowercase ) > 1:
continue
_A = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__lowercase , limit + 1 , __lowercase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCAmelCase_ ( __lowerCAmelCase = 8 ) -> str:
__lowercase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__lowerCAmelCase )
__lowercase : Dict = i // 3
__lowercase : List[str] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__lowercase : int = (
chars_incl
+ random(__lowerCAmelCase , quotient + remainder )
+ random(__lowerCAmelCase , __lowerCAmelCase )
+ random(__lowerCAmelCase , __lowerCAmelCase )
)
__lowercase : List[str] = list(__lowerCAmelCase )
shuffle(__lowerCAmelCase )
return "".join(__lowerCAmelCase )
# random is a generalised function for letters, characters and numbers
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
return "".join(secrets.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
pass # Put your code here...
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
pass # Put your code here...
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
pass # Put your code here...
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 8 ) -> bool:
if len(__lowerCAmelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
__lowercase : Dict = any(char in ascii_uppercase for char in password )
__lowercase : str = any(char in ascii_lowercase for char in password )
__lowercase : Tuple = any(char in digits for char in password )
__lowercase : Optional[Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCAmelCase_ ( ) -> Any:
__lowercase : Optional[int] = int(input('''Please indicate the max length of your password: ''' ).strip() )
__lowercase : Optional[int] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(__lowerCAmelCase ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(__lowerCAmelCase , __lowerCAmelCase ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 156 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__lowerCAmelCase : int = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ['''pixel_values''']
def __init__( self : Dict , _snake_case : bool = True , _snake_case : Optional[Dict[str, int]] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : bool = True , _snake_case : Union[int, float] = 1 / 255 , _snake_case : bool = True , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , **_snake_case : Tuple , ):
super().__init__(**_snake_case )
__lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 256}
__lowercase : Dict = get_size_dict(_snake_case , default_to_square=_snake_case )
__lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__lowercase : Any = get_size_dict(_snake_case , param_name='''crop_size''' )
__lowercase : int = do_resize
__lowercase : Union[str, Any] = size
__lowercase : Optional[int] = resample
__lowercase : str = do_center_crop
__lowercase : str = crop_size
__lowercase : Optional[int] = do_rescale
__lowercase : str = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self : List[str] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BICUBIC , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any , ):
__lowercase : Union[str, Any] = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__lowercase : Dict = get_resize_output_image_size(_snake_case , size=size['''shortest_edge'''] , default_to_square=_snake_case )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : Union[str, Any] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ):
__lowercase : str = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_snake_case , size=(size['''height'''], size['''width''']) , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : str , _snake_case : np.ndarray , _snake_case : float , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any ):
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : Any , _snake_case : np.ndarray , _snake_case : Union[float, List[float]] , _snake_case : Union[float, List[float]] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ):
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : Optional[Any] , _snake_case : ImageInput , _snake_case : Optional[bool] = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[float] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_snake_case : int , ):
__lowercase : List[str] = do_resize if do_resize is not None else self.do_resize
__lowercase : str = size if size is not None else self.size
__lowercase : Any = get_size_dict(_snake_case , default_to_square=_snake_case )
__lowercase : str = resample if resample is not None else self.resample
__lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[str] = crop_size if crop_size is not None else self.crop_size
__lowercase : int = get_size_dict(_snake_case , param_name='''crop_size''' )
__lowercase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Any = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__lowercase : Dict = image_std if image_std is not None else self.image_std
__lowercase : Any = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase : List[str] = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
__lowercase : Dict = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
__lowercase : List[str] = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
if do_normalize:
__lowercase : str = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images]
__lowercase : Dict = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
__lowercase : List[str] = {'''pixel_values''': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
def snake_case_ ( self : Optional[int] , _snake_case : str , _snake_case : List[Tuple] = None ):
__lowercase : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_snake_case ) != len(_snake_case ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_snake_case ):
__lowercase : str = target_sizes.numpy()
__lowercase : Union[str, Any] = []
for idx in range(len(_snake_case ) ):
__lowercase : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_snake_case )
__lowercase : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_snake_case )
else:
__lowercase : str = logits.argmax(dim=1 )
__lowercase : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 156 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : int ):
"""simple docstring"""
UpperCAmelCase_ : str = len(lowerCamelCase_ )
while cur > 1:
# Find the maximum number in arr
UpperCAmelCase_ : Optional[Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCAmelCase_ : Optional[Any] = arr[mi::-1] + arr[mi + 1 : len(lowerCamelCase_ )]
# Reverse whole list
UpperCAmelCase_ : Any = arr[cur - 1 :: -1] + arr[cur : len(lowerCamelCase_ )]
cur -= 1
return arr
if __name__ == "__main__":
snake_case__ : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case__ : List[Any] = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 354 | '''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : int = nn.Parameter(torch.zeros(1 , snake_case_ ) )
UpperCAmelCase_ : str = nn.Parameter(torch.ones(1 , snake_case_ ) )
def _UpperCamelCase ( self , snake_case_ = None , snake_case_ = None , ):
'''simple docstring'''
UpperCAmelCase_ : int = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
UpperCAmelCase_ : Tuple = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = (embeds * self.std) + self.mean
return embeds
| 274 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCamelCase_ = logging.get_logger(__name__)
# General docstring
lowerCamelCase_ = '''MobileNetV1Config'''
# Base docstring
lowerCamelCase_ = '''google/mobilenet_v1_1.0_224'''
lowerCamelCase_ = [1, 10_24, 7, 7]
# Image classification docstring
lowerCamelCase_ = '''google/mobilenet_v1_1.0_224'''
lowerCamelCase_ = '''tabby, tabby cat'''
lowerCamelCase_ = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowercase ( __lowercase , __lowercase , __lowercase=None ) -> List[str]:
'''simple docstring'''
_A = {}
if isinstance(__lowercase , __lowercase ):
_A = model.mobilenet_va
else:
_A = model
_A = "MobilenetV1/Conv2d_0/"
_A = backbone.conv_stem.convolution.weight
_A = backbone.conv_stem.normalization.bias
_A = backbone.conv_stem.normalization.weight
_A = backbone.conv_stem.normalization.running_mean
_A = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_A = i + 1
_A = i * 2
_A = backbone.layer[pt_index]
_A = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
_A = pointer.convolution.weight
_A = pointer.normalization.bias
_A = pointer.normalization.weight
_A = pointer.normalization.running_mean
_A = pointer.normalization.running_var
_A = backbone.layer[pt_index + 1]
_A = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
_A = pointer.convolution.weight
_A = pointer.normalization.bias
_A = pointer.normalization.weight
_A = pointer.normalization.running_mean
_A = pointer.normalization.running_var
if isinstance(__lowercase , __lowercase ):
_A = "MobilenetV1/Logits/Conv2d_1c_1x1/"
_A = model.classifier.weight
_A = model.classifier.bias
return tf_to_pt_map
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Dict:
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
_A = tf.train.list_variables(__lowercase )
_A = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
_A = tf.train.load_variable(__lowercase , __lowercase )
_A = array
# Build TF to PyTorch weights loading map
_A = _build_tf_to_pytorch_map(__lowercase , __lowercase , __lowercase )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
_A = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
_A = np.transpose(__lowercase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
_A = array.squeeze().transpose()
else:
_A = np.transpose(__lowercase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
_A = torch.from_numpy(__lowercase )
tf_weights.pop(__lowercase , __lowercase )
tf_weights.pop(name + "/RMSProp" , __lowercase )
tf_weights.pop(name + "/RMSProp_1" , __lowercase )
tf_weights.pop(name + "/ExponentialMovingAverage" , __lowercase )
logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def __lowercase ( __lowercase , __lowercase ) -> torch.Tensor:
'''simple docstring'''
_A , _A = features.shape[-2:]
_A , _A = conv_layer.stride
_A , _A = conv_layer.kernel_size
if in_height % stride_height == 0:
_A = max(kernel_height - stride_height , 0 )
else:
_A = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
_A = max(kernel_width - stride_width , 0 )
else:
_A = max(kernel_width - (in_width % stride_width) , 0 )
_A = pad_along_width // 2
_A = pad_along_width - pad_left
_A = pad_along_height // 2
_A = pad_along_height - pad_top
_A = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__lowercase , __lowercase , "constant" , 0.0 )
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : MobileNetVaConfig , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
_A = config
if in_channels % groups != 0:
raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
_A = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_A = nn.Convad(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=__UpperCAmelCase , groups=__UpperCAmelCase , bias=__UpperCAmelCase , padding_mode="zeros" , )
if use_normalization:
_A = nn.BatchNormad(
num_features=__UpperCAmelCase , eps=config.layer_norm_eps , momentum=0.9997 , affine=__UpperCAmelCase , track_running_stats=__UpperCAmelCase , )
else:
_A = None
if use_activation:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __UpperCAmelCase ):
_A = ACTaFN[config.hidden_act]
else:
_A = config.hidden_act
else:
_A = None
def lowerCAmelCase ( self : str , __UpperCAmelCase : torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_A = apply_tf_padding(__UpperCAmelCase , self.convolution )
_A = self.convolution(__UpperCAmelCase )
if self.normalization is not None:
_A = self.normalization(__UpperCAmelCase )
if self.activation is not None:
_A = self.activation(__UpperCAmelCase )
return features
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = MobileNetVaConfig
snake_case = load_tf_weights_in_mobilenet_va
snake_case = '''mobilenet_v1'''
snake_case = '''pixel_values'''
snake_case = False
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__UpperCAmelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowerCamelCase_ = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCamelCase_ = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , snake_case_ , )
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : MobileNetVaConfig , __UpperCAmelCase : bool = True ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
_A = config
_A = 32
_A = max(int(depth * config.depth_multiplier ) , config.min_depth )
_A = MobileNetVaConvLayer(
__UpperCAmelCase , in_channels=config.num_channels , out_channels=__UpperCAmelCase , kernel_size=3 , stride=2 , )
_A = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_A = nn.ModuleList()
for i in range(13 ):
_A = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_A = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=3 , stride=strides[i] , groups=__UpperCAmelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
__UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=1 , ) )
_A = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[str] ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ):
'''simple docstring'''
_A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
_A = self.conv_stem(__UpperCAmelCase )
_A = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_A = layer_module(__UpperCAmelCase )
if output_hidden_states:
_A = all_hidden_states + (hidden_states,)
_A = hidden_states
if self.pooler is not None:
_A = torch.flatten(self.pooler(__UpperCAmelCase ) , start_dim=1 )
else:
_A = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCAmelCase , pooler_output=__UpperCAmelCase , hidden_states=__UpperCAmelCase , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , snake_case_ , )
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : MobileNetVaConfig ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
_A = config.num_labels
_A = MobileNetVaModel(__UpperCAmelCase )
_A = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_A = nn.Dropout(config.classifier_dropout_prob , inplace=__UpperCAmelCase )
_A = nn.Linear(__UpperCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , ):
'''simple docstring'''
_A = return_dict if return_dict is not None else self.config.use_return_dict
_A = self.mobilenet_va(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase )
_A = outputs.pooler_output if return_dict else outputs[1]
_A = self.classifier(self.dropout(__UpperCAmelCase ) )
_A = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_A = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_A = "single_label_classification"
else:
_A = "multi_label_classification"
if self.config.problem_type == "regression":
_A = MSELoss()
if self.num_labels == 1:
_A = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_A = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
_A = CrossEntropyLoss()
_A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_A = BCEWithLogitsLoss()
_A = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
if not return_dict:
_A = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states , )
| 79 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return choice(__a )
def UpperCAmelCase_ (__a : list[int] , __a : int ):
"""simple docstring"""
_a : Dict = random_pivot(__a )
# partition based on pivot
# linear time
_a : Optional[int] = [e for e in lst if e < pivot]
_a : List[str] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__a ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__a ) < k - 1:
return kth_number(__a , k - len(__a ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__a , __a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 | 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : int) -> Any:
'''simple docstring'''
__UpperCamelCase : Tuple = XCLIPTextConfig()
# derive patch size from model name
__UpperCamelCase : Any = model_name.find("patch")
__UpperCamelCase : Dict = int(model_name[start_idx + len("patch") : start_idx + len("patch") + 2])
__UpperCamelCase : List[str] = XCLIPVisionConfig(patch_size=_lowerCamelCase , num_frames=_lowerCamelCase)
if "large" in model_name:
__UpperCamelCase : Any = 768
__UpperCamelCase : List[str] = 3_072
__UpperCamelCase : Dict = 12
__UpperCamelCase : List[Any] = 1_024
__UpperCamelCase : int = 4_096
__UpperCamelCase : List[Any] = 16
__UpperCamelCase : List[Any] = 24
__UpperCamelCase : Tuple = 768
__UpperCamelCase : Union[str, Any] = 3_072
if model_name == "xclip-large-patch14-16-frames":
__UpperCamelCase : List[Any] = 336
__UpperCamelCase : List[Any] = XCLIPConfig.from_text_vision_configs(_lowerCamelCase , _lowerCamelCase)
if "large" in model_name:
__UpperCamelCase : str = 768
return config
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> Tuple:
'''simple docstring'''
if name == "token_embedding.weight":
__UpperCamelCase : Tuple = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight")
if name == "positional_embedding":
__UpperCamelCase : Dict = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight")
if "ln_1" in name:
__UpperCamelCase : Union[str, Any] = name.replace("ln_1" , "layer_norm1")
if "ln_2" in name:
__UpperCamelCase : Tuple = name.replace("ln_2" , "layer_norm2")
if "c_fc" in name:
__UpperCamelCase : Dict = name.replace("c_fc" , "fc1")
if "c_proj" in name:
__UpperCamelCase : str = name.replace("c_proj" , "fc2")
if name.startswith("transformer.resblocks"):
__UpperCamelCase : str = name.replace("transformer.resblocks" , "text_model.encoder.layers")
if "attn.out_proj" in name and "message" not in name:
__UpperCamelCase : int = name.replace("attn.out_proj" , "self_attn.out_proj")
if "ln_final" in name:
__UpperCamelCase : Union[str, Any] = name.replace("ln_final" , "text_model.final_layer_norm")
# visual encoder
if name == "visual.class_embedding":
__UpperCamelCase : List[Any] = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding")
if name == "visual.positional_embedding":
__UpperCamelCase : List[str] = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight")
if name.startswith("visual.transformer.resblocks"):
__UpperCamelCase : int = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers")
if "visual.conv1" in name:
__UpperCamelCase : List[Any] = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding")
if "visual.ln_pre" in name:
__UpperCamelCase : Union[str, Any] = name.replace("visual.ln_pre" , "vision_model.pre_layernorm")
if "visual.ln_post" in name:
__UpperCamelCase : Tuple = name.replace("visual.ln_post" , "vision_model.post_layernorm")
if "visual.proj" in name:
__UpperCamelCase : Optional[Any] = name.replace("visual.proj" , "visual_projection.weight")
if "text_projection" in name:
__UpperCamelCase : List[Any] = name.replace("text_projection" , "text_projection.weight")
# things on top
if "prompts_visual_proj" in name:
__UpperCamelCase : int = name.replace("prompts_visual_proj" , "prompts_visual_projection")
if "prompts_visual_ln" in name:
__UpperCamelCase : str = name.replace("prompts_visual_ln" , "prompts_visual_layernorm")
# mit
if name == "mit.positional_embedding":
__UpperCamelCase : Dict = name.replace("positional" , "position")
if name.startswith("mit.resblocks"):
__UpperCamelCase : str = name.replace("mit.resblocks" , "mit.encoder.layers")
# prompts generator
if name.startswith("prompts_generator.norm"):
__UpperCamelCase : Any = name.replace("prompts_generator.norm" , "prompts_generator.layernorm")
return name
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]) -> str:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__UpperCamelCase : Union[str, Any] = orig_state_dict.pop(_lowerCamelCase)
if "attn.in_proj" in key:
__UpperCamelCase : Optional[int] = key.split(".")
if key.startswith("visual"):
__UpperCamelCase : Optional[Any] = key_split[3]
__UpperCamelCase : List[str] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__UpperCamelCase : str = val[
:dim, :
]
__UpperCamelCase : Tuple = val[
dim : dim * 2, :
]
__UpperCamelCase : str = val[
-dim:, :
]
else:
__UpperCamelCase : str = val[
:dim
]
__UpperCamelCase : Optional[Any] = val[
dim : dim * 2
]
__UpperCamelCase : List[str] = val[
-dim:
]
else:
if "weight" in key:
__UpperCamelCase : Optional[Any] = val[
:dim, :
]
__UpperCamelCase : Optional[int] = val[
dim : dim * 2, :
]
__UpperCamelCase : Union[str, Any] = val[
-dim:, :
]
else:
__UpperCamelCase : Optional[Any] = val[:dim]
__UpperCamelCase : Optional[int] = val[
dim : dim * 2
]
__UpperCamelCase : List[str] = val[-dim:]
elif key.startswith("mit"):
__UpperCamelCase : List[Any] = key_split[2]
__UpperCamelCase : Optional[int] = config.vision_config.mit_hidden_size
if "weight" in key:
__UpperCamelCase : Any = val[:dim, :]
__UpperCamelCase : Optional[Any] = val[dim : dim * 2, :]
__UpperCamelCase : Dict = val[-dim:, :]
else:
__UpperCamelCase : Optional[int] = val[:dim]
__UpperCamelCase : Optional[int] = val[dim : dim * 2]
__UpperCamelCase : Dict = val[-dim:]
else:
__UpperCamelCase : List[str] = key_split[2]
__UpperCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
__UpperCamelCase : Optional[Any] = val[:dim, :]
__UpperCamelCase : Union[str, Any] = val[
dim : dim * 2, :
]
__UpperCamelCase : Optional[Any] = val[-dim:, :]
else:
__UpperCamelCase : Any = val[:dim]
__UpperCamelCase : Union[str, Any] = val[
dim : dim * 2
]
__UpperCamelCase : Optional[int] = val[-dim:]
else:
__UpperCamelCase : Dict = rename_key(_lowerCamelCase)
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__UpperCamelCase : List[Any] = val.T
__UpperCamelCase : Any = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any]) -> Tuple:
'''simple docstring'''
if num_frames == 8:
__UpperCamelCase : Any = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
__UpperCamelCase : Any = "eating_spaghetti.npy"
elif num_frames == 32:
__UpperCamelCase : Tuple = "eating_spaghetti_32_frames.npy"
__UpperCamelCase : Optional[int] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=_lowerCamelCase , repo_type="dataset" , )
__UpperCamelCase : Optional[Any] = np.load(_lowerCamelCase)
return list(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[Any]=False) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Any = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
__UpperCamelCase : Union[str, Any] = model_to_url[model_name]
__UpperCamelCase : Tuple = 8
if "16-frames" in model_name:
__UpperCamelCase : Optional[int] = 16
elif "shot" in model_name:
__UpperCamelCase : List[Any] = 32
__UpperCamelCase : Optional[int] = get_xclip_config(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : Optional[Any] = XCLIPModel(_lowerCamelCase)
model.eval()
if "drive" in checkpoint_url:
__UpperCamelCase : str = "pytorch_model.bin"
gdown.cached_download(_lowerCamelCase , _lowerCamelCase , quiet=_lowerCamelCase)
__UpperCamelCase : Optional[Any] = torch.load(_lowerCamelCase , map_location="cpu")["model"]
else:
__UpperCamelCase : Optional[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase)["model"]
__UpperCamelCase : Dict = convert_state_dict(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : Dict = XCLIPModel(_lowerCamelCase)
__UpperCamelCase , __UpperCamelCase : Dict = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase)
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__UpperCamelCase : str = 336 if model_name == "xclip-large-patch14-16-frames" else 224
__UpperCamelCase : str = VideoMAEImageProcessor(size=_lowerCamelCase)
__UpperCamelCase : Any = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
__UpperCamelCase : Optional[int] = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32")
__UpperCamelCase : Optional[Any] = XCLIPProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase)
__UpperCamelCase : str = prepare_video(_lowerCamelCase)
__UpperCamelCase : Tuple = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=_lowerCamelCase , return_tensors="pt" , padding=_lowerCamelCase)
print("Shape of pixel values:" , inputs.pixel_values.shape)
with torch.no_grad():
__UpperCamelCase : Optional[int] = model(**_lowerCamelCase)
# Verify outputs
__UpperCamelCase : Union[str, Any] = outputs.logits_per_video
__UpperCamelCase : List[str] = logits_per_video.softmax(dim=1)
print("Probs:" , _lowerCamelCase)
# kinetics-400
if model_name == "xclip-base-patch32":
__UpperCamelCase : int = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]])
elif model_name == "xclip-base-patch32-16-frames":
__UpperCamelCase : List[str] = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]])
elif model_name == "xclip-base-patch16":
__UpperCamelCase : Any = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]])
elif model_name == "xclip-base-patch16-16-frames":
__UpperCamelCase : int = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]])
elif model_name == "xclip-large-patch14":
__UpperCamelCase : Optional[Any] = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]])
elif model_name == "xclip-large-patch14-16-frames":
__UpperCamelCase : List[str] = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]])
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__UpperCamelCase : Dict = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]])
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__UpperCamelCase : int = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]])
elif model_name == "xclip-large-patch14-kinetics-600":
__UpperCamelCase : List[Any] = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]])
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__UpperCamelCase : Union[str, Any] = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]])
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__UpperCamelCase : Dict = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]])
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__UpperCamelCase : Any = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]])
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__UpperCamelCase : Optional[int] = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]])
elif model_name == "xclip-base-patch16-ucf-2-shot":
__UpperCamelCase : Tuple = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]])
elif model_name == "xclip-base-patch16-ucf-4-shot":
__UpperCamelCase : Union[str, Any] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]])
elif model_name == "xclip-base-patch16-ucf-8-shot":
__UpperCamelCase : List[str] = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]])
elif model_name == "xclip-base-patch16-ucf-16-shot":
__UpperCamelCase : Any = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]])
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__UpperCamelCase : int = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]])
else:
raise ValueError(F'Model name {model_name} not supported')
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}')
model.save_pretrained(_lowerCamelCase)
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub...")
model.push_to_hub(_lowerCamelCase , organization="nielsr")
processor.push_to_hub(_lowerCamelCase , organization="nielsr")
slow_tokenizer.push_to_hub(_lowerCamelCase , organization="nielsr")
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowercase : Any = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 151 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : List[Any] = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 151 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def lowercase ( A_ , A_ , A_=8 )-> List[Any]:
'''simple docstring'''
a : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _A ( _a ):
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : DDPMScheduler , __UpperCAmelCase : VQModel , ):
super().__init__()
self.register_modules(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , movq=__UpperCAmelCase , )
a : int = 2 ** (len(self.movq.config.block_out_channels) - 1)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str]):
if latents is None:
a : Any = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase)
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''')
a : Dict = latents.to(__UpperCAmelCase)
a : Dict = latents * scheduler.init_noise_sigma
return latents
def __snake_case ( self : Any , __UpperCAmelCase : List[Any]=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`")
a : Dict = torch.device(f'''cuda:{gpu_id}''')
a : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : List[Any] , __UpperCAmelCase : Union[str, Any]=0):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
a : List[Any] = torch.device(f'''cuda:{gpu_id}''')
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCAmelCase)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a : Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
a , a : Tuple = cpu_offload_with_hook(__UpperCAmelCase , __UpperCAmelCase , prev_module_hook=__UpperCAmelCase)
# We'll offload the last model manually.
a : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : Optional[int]):
if not hasattr(self.unet , "_hf_hook"):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCAmelCase , "_hf_hook")
and hasattr(module._hf_hook , "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCAmelCase)
def __call__( self : Optional[Any] , __UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 4.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ):
a : Optional[int] = self._execution_device
a : int = guidance_scale > 1.0
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Union[str, Any] = torch.cat(__UpperCAmelCase , dim=0)
a : int = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : List[str] = torch.cat(__UpperCAmelCase , dim=0)
if do_classifier_free_guidance:
a : Union[str, Any] = image_embeds.repeat_interleave(__UpperCAmelCase , dim=0)
a : Tuple = negative_image_embeds.repeat_interleave(__UpperCAmelCase , dim=0)
a : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=__UpperCAmelCase)
self.scheduler.set_timesteps(__UpperCAmelCase , device=__UpperCAmelCase)
a : Union[str, Any] = self.scheduler.timesteps
a : Any = self.unet.config.in_channels
a , a : str = downscale_height_and_width(__UpperCAmelCase , __UpperCAmelCase , self.movq_scale_factor)
# create initial latent
a : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCAmelCase)):
# expand the latents if we are doing classifier free guidance
a : str = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
a : Dict = {"image_embeds": image_embeds}
a : Tuple = self.unet(
sample=__UpperCAmelCase , timestep=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , added_cond_kwargs=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0]
if do_classifier_free_guidance:
a , a : int = noise_pred.split(latents.shape[1] , dim=1)
a , a : Any = noise_pred.chunk(2)
a , a : int = variance_pred.chunk(2)
a : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a : str = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , "variance_type")
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a , a : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
a : Any = self.scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase , )[0]
# post-processing
a : Tuple = self.movq.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase)["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''')
if output_type in ["np", "pil"]:
a : Optional[Any] = image * 0.5 + 0.5
a : Union[str, Any] = image.clamp(0 , 1)
a : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
a : Optional[int] = self.numpy_to_pil(__UpperCAmelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase)
| 40 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _A ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase : int = 1_0_0_0_0
UpperCAmelCase : Optional[List[str]] = None
UpperCAmelCase : Optional[datasets.Features] = None
class _A ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase : str = ParquetConfig
def __snake_case ( self : Tuple):
return datasets.DatasetInfo(features=self.config.features)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''')
a : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__UpperCAmelCase , (str, list, tuple)):
a : Dict = data_files
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
a : Dict = []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__UpperCAmelCase):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase))
break
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files}))
return splits
def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema)
return pa_table
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''')
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = pq.ParquetFile(__UpperCAmelCase)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
a : Optional[Any] = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase)
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''')
raise
| 40 | 1 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_lowercase = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
_lowercase = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
_lowercase = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def A (__lowerCamelCase :Dict ):
def remove_articles(__lowerCamelCase :Optional[Any] ):
_lowerCAmelCase = re.compile(r"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(UpperCamelCase__ , """ """ , UpperCamelCase__ )
def white_space_fix(__lowerCamelCase :Any ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase :int ):
_lowerCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase :Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Dict ):
return int(normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ ) )
def A (__lowerCamelCase :Any , __lowerCamelCase :Union[str, Any] ):
_lowerCAmelCase = [any(compute_exact(UpperCamelCase__ , UpperCamelCase__ ) for ref in refs ) for pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ )]
return (sum(UpperCamelCase__ ) / len(UpperCamelCase__ )) * 100
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Any , __lowerCamelCase :str , __lowerCamelCase :str ):
_lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowerCAmelCase = Counter(UpperCamelCase__ )
_lowerCAmelCase = Counter(UpperCamelCase__ )
_lowerCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
_lowerCAmelCase = scount * numref
_lowerCAmelCase = Counter(UpperCamelCase__ )
_lowerCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
_lowerCAmelCase = ccount * numref
# KEEP
_lowerCAmelCase = sgramcounter_rep & cgramcounter_rep
_lowerCAmelCase = keepgramcounter_rep & rgramcounter
_lowerCAmelCase = sgramcounter_rep & rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(UpperCamelCase__ ) > 0:
_lowerCAmelCase = keeptmpscorea / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowerCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowerCAmelCase = sgramcounter_rep - cgramcounter_rep
_lowerCAmelCase = delgramcounter_rep - rgramcounter
_lowerCAmelCase = sgramcounter_rep - rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
if len(UpperCamelCase__ ) > 0:
_lowerCAmelCase = deltmpscorea / len(UpperCamelCase__ )
# ADDITION
_lowerCAmelCase = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
_lowerCAmelCase = set(UpperCamelCase__ ) & set(UpperCamelCase__ )
_lowerCAmelCase = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
_lowerCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(UpperCamelCase__ ) > 0:
_lowerCAmelCase = addtmpscore / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_lowerCAmelCase = addtmpscore / len(UpperCamelCase__ )
_lowerCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A (__lowerCamelCase :Any , __lowerCamelCase :List[Any] , __lowerCamelCase :Optional[int] ):
_lowerCAmelCase = len(UpperCamelCase__ )
_lowerCAmelCase = ssent.split(""" """ )
_lowerCAmelCase = csent.split(""" """ )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for rsent in rsents:
_lowerCAmelCase = rsent.split(""" """ )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
_lowerCAmelCase = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
_lowerCAmelCase = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
_lowerCAmelCase = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
_lowerCAmelCase = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
_lowerCAmelCase = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
_lowerCAmelCase = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
_lowerCAmelCase = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
_lowerCAmelCase = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
_lowerCAmelCase = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(UpperCamelCase__ )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A (__lowerCamelCase :List[str] , __lowerCamelCase :bool = True , __lowerCamelCase :str = "13a" , __lowerCamelCase :bool = True ):
if lowercase:
_lowerCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase__ )()(UpperCamelCase__ )
else:
_lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase__ )
elif tokenizer == "moses":
_lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ , escape=UpperCamelCase__ )
elif tokenizer == "penn":
_lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ )
else:
_lowerCAmelCase = sentence
if not return_str:
_lowerCAmelCase = normalized_sent.split()
return normalized_sent
def A (__lowerCamelCase :int , __lowerCamelCase :Tuple , __lowerCamelCase :Union[str, Any] ):
if not (len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == len(UpperCamelCase__ )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
_lowerCAmelCase = 0
for src, pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
sari_score += SARIsent(normalize(UpperCamelCase__ ) , normalize(UpperCamelCase__ ) , [normalize(UpperCamelCase__ ) for sent in refs] )
_lowerCAmelCase = sari_score / len(UpperCamelCase__ )
return 100 * sari_score
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :int , __lowerCamelCase :Union[str, Any]="exp" , __lowerCamelCase :List[str]=None , __lowerCamelCase :List[str]=False , __lowerCamelCase :Dict=False , __lowerCamelCase :str=False , ):
_lowerCAmelCase = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
_lowerCAmelCase = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
_lowerCAmelCase = sacrebleu.corpus_bleu(
UpperCamelCase__ , UpperCamelCase__ , smooth_method=UpperCamelCase__ , smooth_value=UpperCamelCase__ , force=UpperCamelCase__ , lowercase=UpperCamelCase__ , use_effective_order=UpperCamelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
result.update({"""sari""": compute_sari(sources=_a , predictions=_a , references=_a )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=_a , references=_a )} )
result.update({"""exact""": compute_em(predictions=_a , references=_a )} )
return result
| 363 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''convbert'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=768 , _lowercase=2 , _lowercase=9 , _lowercase=1 , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = embedding_size
_lowerCAmelCase = head_ratio
_lowerCAmelCase = conv_kernel_size
_lowerCAmelCase = num_groups
_lowerCAmelCase = classifier_dropout
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 229 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 268 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase_ :
def __init__( self : str ) -> Dict:
UpperCAmelCase_ : List[Any] = ""
UpperCAmelCase_ : int = ""
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[Any] = 256
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : List[str] = 0
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Dict = cva.imread(lowerCAmelCase_ , 0 )
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.img )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
UpperCAmelCase_ : List[Any] = np.sum(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : List[Any] = x[i] / self.k
self.sk += prk
UpperCAmelCase_ : Optional[Any] = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase_ : Any = int(last % last )
UpperCAmelCase_ : List[str] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase_ : Dict = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase_ : Any = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase_ : Tuple = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCamelCase_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowerCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 268 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase = 4_00_00_00 ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase , _lowerCAmelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = b, a + b
return sum(lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 358 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = cva.getAffineTransform(lowerCAmelCase , lowerCAmelCase )
return cva.warpAffine(lowerCAmelCase , lowerCAmelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
A__ : Any =cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
A__ : Any =cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A__ , A__ : Tuple =gray_img.shape
# set different points to rotate image
A__ : Optional[int] =np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
A__ : Tuple =np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
A__ : List[str] =np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
A__ : Dict =np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
A__ : List[Any] =[
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A__ : Tuple =plt.figure(1)
A__ : int =['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 220 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = (DDPMScheduler,)
def A_ ( self , **lowercase ):
_lowerCamelCase : Dict = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**lowercase )
return config
def A_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A_ ( self ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase , beta_end=lowercase )
def A_ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase )
def A_ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowercase )
def A_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def A_ ( self ):
self.check_over_configs(thresholding=lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , )
def A_ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def A_ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowercase )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : Optional[int] = scheduler_class(**lowercase )
_lowerCamelCase : Tuple = len(lowercase )
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : Tuple = self.dummy_sample_deter
_lowerCamelCase : Tuple = torch.manual_seed(0 )
for t in reversed(range(lowercase ) ):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : str = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCamelCase : List[str] = pred_prev_sample
_lowerCamelCase : List[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : str = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCamelCase : List[Any] = scheduler_class(**lowercase )
_lowerCamelCase : List[Any] = len(lowercase )
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
_lowerCamelCase : Any = torch.manual_seed(0 )
for t in reversed(range(lowercase ) ):
# 1. predict noise residual
_lowerCamelCase : Union[str, Any] = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Any = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : List[str] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**lowercase )
_lowerCamelCase : Dict = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowercase )
_lowerCamelCase : Any = scheduler.timesteps
for i, timestep in enumerate(lowercase ):
if i == len(lowercase ) - 1:
_lowerCamelCase : Optional[int] = -1
else:
_lowerCamelCase : Optional[Any] = timesteps[i + 1]
_lowerCamelCase : Optional[int] = scheduler.previous_timestep(lowercase )
_lowerCamelCase : Tuple = prev_t.item()
self.assertEqual(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : List[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(lowercase , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=lowercase )
def A_ ( self ):
_lowerCamelCase : str = self.scheduler_classes[0]
_lowerCamelCase : List[Any] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**lowercase )
_lowerCamelCase : int = [100, 87, 50, 1, 0]
_lowerCamelCase : Optional[int] = len(lowercase )
with self.assertRaises(lowercase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=lowercase , timesteps=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**lowercase )
_lowerCamelCase : int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=lowercase ) | 96 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : int = ort.SessionOptions()
__lowerCAmelCase : Tuple = False
return options
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
__lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
__lowerCAmelCase : Any = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = "A red cat sitting on a park bench"
__lowerCAmelCase : Optional[int] = np.random.RandomState(0)
__lowerCAmelCase : str = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type="np" , )
__lowerCAmelCase : Optional[int] = output.images
__lowerCAmelCase : List[Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__lowerCAmelCase : Dict = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
__lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
__lowerCAmelCase : List[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx")
__lowerCAmelCase : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = "A red cat sitting on a park bench"
__lowerCAmelCase : Optional[int] = np.random.RandomState(0)
__lowerCAmelCase : Optional[int] = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=20 , generator=_SCREAMING_SNAKE_CASE , output_type="np" , )
__lowerCAmelCase : List[str] = output.images
__lowerCAmelCase : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__lowerCAmelCase : Tuple = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 | 58 |
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _lowercase ( __snake_case ) -> Dict:
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] ,unknown_args[1::2] )}
def _lowercase ( ) -> Union[str, Any]:
__lowerCAmelCase : List[str] = ArgumentParser(
"HuggingFace Datasets CLI tool" ,usage="datasets-cli <command> [<args>]" ,allow_abbrev=__snake_case )
__lowerCAmelCase : str = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__snake_case )
EnvironmentCommand.register_subcommand(__snake_case )
TestCommand.register_subcommand(__snake_case )
RunBeamCommand.register_subcommand(__snake_case )
DummyDataCommand.register_subcommand(__snake_case )
# Parse args
__lowerCAmelCase , __lowerCAmelCase : Any = parser.parse_known_args()
if not hasattr(__snake_case ,"func" ):
parser.print_help()
exit(1 )
__lowerCAmelCase : List[Any] = parse_unknown_args(__snake_case )
# Run
__lowerCAmelCase : Union[str, Any] = args.func(__snake_case ,**__snake_case )
service.run()
if __name__ == "__main__":
main() | 58 | 1 |
'''simple docstring'''
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
while a != 0:
__lowercase , __lowercase =b % a, a
return b
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if gcd(__lowerCamelCase , __lowerCamelCase ) != 1:
__lowercase =f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowerCamelCase )
__lowercase , __lowercase , __lowercase =1, 0, a
__lowercase , __lowercase , __lowercase =0, 1, m
while va != 0:
__lowercase =ua // va
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase =(ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 166 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2FeatureExtractor"]
__UpperCAmelCase = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int]=13 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : int=4 , lowerCAmelCase : int=[10, 20, 30, 40] , lowerCAmelCase : List[Any]=[2, 2, 3, 2] , lowerCAmelCase : Any=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Dict=37 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : str=10 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : str=["stage2", "stage3", "stage4"] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=None , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : Optional[int] = image_size
__lowerCAmelCase : Tuple = num_channels
__lowerCAmelCase : str = num_stages
__lowerCAmelCase : str = hidden_sizes
__lowerCAmelCase : Optional[Any] = depths
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : int = use_labels
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : Optional[int] = hidden_act
__lowerCAmelCase : List[Any] = type_sequence_label_size
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : Dict = out_features
__lowerCAmelCase : List[Any] = num_labels
__lowerCAmelCase : Union[str, Any] = scope
__lowerCAmelCase : Union[str, Any] = num_stages
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : Tuple = None
if self.use_labels:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCAmelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = UperNetForSemanticSegmentation(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Tuple = model(lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,
) : Dict = config_and_inputs
__lowerCAmelCase : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any =(UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCamelCase : List[str] ={"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCamelCase : Dict =False
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : str =False
lowerCamelCase : List[Any] =False
lowerCamelCase : Optional[Any] =False
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = UperNetModelTester(self )
__lowerCAmelCase : Dict = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
"""simple docstring"""
return
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class(lowerCAmelCase )
__lowerCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : str = [*signature.parameters.keys()]
__lowerCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] ):
__lowerCAmelCase : int = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowerCAmelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Dict = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : str = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = _config_zero_init(lowerCAmelCase )
__lowerCAmelCase : List[str] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowerCAmelCase : str = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : int = UperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def snake_case_ () -> int:
__lowerCAmelCase : int = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
__lowerCAmelCase : str = Image.open(__A ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
"""simple docstring"""
__lowerCAmelCase : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
__lowerCAmelCase : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(lowerCAmelCase )
__lowerCAmelCase : List[str] = prepare_img()
__lowerCAmelCase : int = processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : Any = model(**lowerCAmelCase )
__lowerCAmelCase : Any = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowerCAmelCase : Optional[int] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
__lowerCAmelCase : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = prepare_img()
__lowerCAmelCase : List[Any] = processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = model(**lowerCAmelCase )
__lowerCAmelCase : List[Any] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowerCAmelCase : List[str] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
| 139 |
from pathlib import Path
import fire
def snake_case_ (__A : str , __A : str , __A : int ) -> Any:
__lowerCAmelCase : Tuple = Path(__A )
__lowerCAmelCase : Tuple = Path(__A )
dest_dir.mkdir(exist_ok=__A )
for path in src_dir.iterdir():
__lowerCAmelCase : str = [x.rstrip() for x in list(path.open().readlines() )][:n]
__lowerCAmelCase : Dict = dest_dir.joinpath(path.name )
print(__A )
dest_path.open("""w""" ).write("""\n""".join(__A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 139 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "funnel"
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self, lowerCAmelCase__=3_0522, lowerCAmelCase__=[4, 4, 4], lowerCAmelCase__=None, lowerCAmelCase__=2, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=64, lowerCAmelCase__=3072, lowerCAmelCase__="gelu_new", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0, lowerCAmelCase__=0.1, lowerCAmelCase__=None, lowerCAmelCase__=1e-9, lowerCAmelCase__="mean", lowerCAmelCase__="relative_shift", lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]:
snake_case_ = vocab_size
snake_case_ = block_sizes
snake_case_ = [1] * len(lowerCAmelCase__) if block_repeats is None else block_repeats
assert len(lowerCAmelCase__) == len(
self.block_repeats), "`block_sizes` and `block_repeats` should have the same length."
snake_case_ = num_decoder_layers
snake_case_ = d_model
snake_case_ = n_head
snake_case_ = d_head
snake_case_ = d_inner
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = initializer_range
snake_case_ = initializer_std
snake_case_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'
snake_case_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'
snake_case_ = attention_type
snake_case_ = separate_cls
snake_case_ = truncate_seq
snake_case_ = pool_q_only
super().__init__(**lowerCAmelCase__)
@property
def a_ ( self) -> Optional[Any]:
return sum(self.block_sizes)
@num_hidden_layers.setter
def a_ ( self, lowerCAmelCase__) -> Tuple:
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.')
@property
def a_ ( self) -> Optional[int]:
return len(self.block_sizes)
@num_blocks.setter
def a_ ( self, lowerCAmelCase__) -> List[Any]:
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.')
| 69 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__(__snake_case ) -> int: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__() -> Any:
'''simple docstring'''
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase__ = [1, 2, 3]
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case ,__snake_case ,num_proc=2 )
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case ,__snake_case ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' ,[2, -1] )
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = [1, 2]
lowerCamelCase__ = {'''a''': 1, '''b''': 2}
lowerCamelCase__ = {'''a''': [1, 2], '''b''': [3, 4]}
lowerCamelCase__ = {'''a''': {'''1''': 1}, '''b''': 2}
lowerCamelCase__ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowerCamelCase__ = [2, 3]
lowerCamelCase__ = {'''a''': 2, '''b''': 3}
lowerCamelCase__ = {'''a''': [2, 3], '''b''': [4, 5]}
lowerCamelCase__ = {'''a''': {'''1''': 2}, '''b''': 3}
lowerCamelCase__ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
| 209 | 0 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : str = "cpu" , lowerCAmelCase_ : str = "openai/clip-vit-large-patch14" ) -> None:
'''simple docstring'''
A__ : Dict =device
A__ : Optional[Any] =CLIPTokenizerFast.from_pretrained(lowerCAmelCase_ )
A__ : List[Any] =[0.48145466, 0.4578275, 0.40821073]
A__ : Optional[int] =[0.26862954, 0.26130258, 0.27577711]
A__ : Union[str, Any] =torchvision.transforms.Normalize(self.image_mean , self.image_std )
A__ : Dict =torchvision.transforms.Resize(2_24 )
A__ : List[Any] =torchvision.transforms.CenterCrop(2_24 )
def lowercase__ ( self : Dict , lowerCAmelCase_ : str ) -> int:
'''simple docstring'''
A__ : Union[str, Any] =self.resize(lowerCAmelCase_ )
A__ : Union[str, Any] =self.center_crop(lowerCAmelCase_ )
A__ : List[Any] =self.normalize(lowerCAmelCase_ )
return images
def __call__( self : Dict , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : int =self.tokenizer(text=lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Optional[int] =self.preprocess_img(lowerCAmelCase_ )
A__ : List[str] ={key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : int=10 , lowerCAmelCase_ : Dict=0.01 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]="image" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=False , ) -> None:
'''simple docstring'''
super().__init__()
A__ : Optional[Any] =None
A__ : Any =device if device else get_device()
if vqgan:
A__ : Union[str, Any] =vqgan
else:
A__ : str =load_vqgan(self.device , conf_path=lowerCAmelCase_ , ckpt_path=lowerCAmelCase_ )
self.vqgan.eval()
if clip:
A__ : Optional[int] =clip
else:
A__ : Dict =CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
A__ : str =ProcessorGradientFlow(device=self.device )
A__ : Union[str, Any] =iterations
A__ : str =lr
A__ : List[str] =log
A__ : Any =make_grid
A__ : Dict =return_val
A__ : str =quantize
A__ : str =self.vqgan.decoder.z_shape
def lowercase__ ( self : Tuple , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[int]=5 , lowerCAmelCase_ : List[str]=True ) -> Any:
'''simple docstring'''
A__ : Optional[Any] =[]
if output_path is None:
A__ : str ="""./animation.gif"""
if input_path is None:
A__ : List[Any] =self.save_path
A__ : Any =sorted(glob(input_path + """/*""" ) )
if not len(lowerCAmelCase_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(lowerCAmelCase_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
A__ : Tuple =total_duration / len(lowerCAmelCase_ )
A__ : Union[str, Any] =[frame_duration] * len(lowerCAmelCase_ )
if extend_frames:
A__ : Any =1.5
A__ : Tuple =3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(lowerCAmelCase_ ) )
imageio.mimsave(lowerCAmelCase_ , lowerCAmelCase_ , duration=lowerCAmelCase_ )
print(f"gif saved to {output_path}" )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None ) -> List[Any]:
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
A__ : Optional[Any] =preprocess(Image.open(lowerCAmelCase_ ) , target_image_size=2_56 ).to(self.device )
A__ : Tuple =preprocess_vqgan(lowerCAmelCase_ )
A__ , *A__ : Dict =self.vqgan.encode(lowerCAmelCase_ )
return z
def lowercase__ ( self : Any , lowerCAmelCase_ : Optional[int] ) -> int:
'''simple docstring'''
A__ : Union[str, Any] =self.latent.detach().requires_grad_()
A__ : List[str] =base_latent + transform_vector
if self.quantize:
A__ , *A__ : List[Any] =self.vqgan.quantize(lowerCAmelCase_ )
else:
A__ : int =trans_latent
return self.vqgan.decode(lowerCAmelCase_ )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int=None ) -> Union[str, Any]:
'''simple docstring'''
A__ : int =self.clip_preprocessor(text=lowerCAmelCase_ , images=lowerCAmelCase_ , return_tensors="""pt""" , padding=lowerCAmelCase_ )
A__ : int =self.clip(**lowerCAmelCase_ )
A__ : List[Any] =clip_outputs.logits_per_image
if weights is not None:
A__ : Dict =similarity_logits * weights
return similarity_logits.sum()
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ) -> Tuple:
'''simple docstring'''
A__ : Dict =self._get_clip_similarity(pos_prompts["""prompts"""] , lowerCAmelCase_ , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
A__ : Any =self._get_clip_similarity(neg_prompts["""prompts"""] , lowerCAmelCase_ , weights=neg_prompts["""weights"""] )
else:
A__ : Optional[int] =torch.tensor([1] , device=self.device )
A__ : List[str] =-torch.log(lowerCAmelCase_ ) + torch.log(lowerCAmelCase_ )
return loss
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
A__ : Optional[int] =torch.randn_like(self.latent , requires_grad=lowerCAmelCase_ , device=self.device )
A__ : str =torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
A__ : str =self._add_vector(lowerCAmelCase_ )
A__ : List[Any] =loop_post_process(lowerCAmelCase_ )
A__ : Optional[Any] =self._get_CLIP_loss(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
print("""CLIP loss""" , lowerCAmelCase_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=lowerCAmelCase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowercase__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict ) -> List[str]:
'''simple docstring'''
wandb.init(reinit=lowerCAmelCase_ , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
A__ : str =Image.open(lowerCAmelCase_ )
A__ : Optional[Any] =image.resize((2_56, 2_56) )
wandb.log("""Original Image""" , wandb.Image(lowerCAmelCase_ ) )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if not prompts:
return []
A__ : List[Any] =[]
A__ : int =[]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Any =[prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(lowerCAmelCase_ , (tuple, list) ):
A__ : Any =prompt[0]
A__ : Tuple =float(prompt[1] )
elif ":" in prompt:
A__ , A__ : Dict =prompt.split(""":""" )
A__ : Union[str, Any] =float(lowerCAmelCase_ )
else:
A__ : Union[str, Any] =prompt
A__ : Dict =1.0
processed_prompts.append(lowerCAmelCase_ )
weights.append(lowerCAmelCase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCAmelCase_ , device=self.device ),
}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=False , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , ) -> Optional[Any]:
'''simple docstring'''
if image_path:
A__ : Union[str, Any] =self._get_latent(lowerCAmelCase_ )
else:
A__ : str =torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
assert pos_prompts, "You must provide at least one positive prompt."
A__ : List[str] =self.process_prompts(lowerCAmelCase_ )
A__ : List[str] =self.process_prompts(lowerCAmelCase_ )
if save_final and save_path is None:
A__ : List[str] =os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
else:
A__ : List[Any] =save_path + """_""" + get_timestamp()
os.makedirs(lowerCAmelCase_ )
A__ : List[Any] =save_path
A__ : Dict =self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(lowerCAmelCase_ ) )
A__ : str =loop_post_process(lowerCAmelCase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ):
if show_intermediate:
show_pil(lowerCAmelCase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({"""Image""": wandb.Image(lowerCAmelCase_ )} )
if show_final:
show_pil(lowerCAmelCase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}_final.png" ) )
| 136 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
A__ : Any =4
A__ : int =(1 << p) - 1
for _ in range(p - 2 ):
A__ : Dict =((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 136 | 1 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowercase__ = 100
lowercase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowercase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def UpperCamelCase( UpperCAmelCase_ ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
UpperCAmelCase : Union[str, Any] = set()
UpperCAmelCase : str = 42
UpperCAmelCase : Dict = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCamelCase( UpperCAmelCase_ = 50_00 ):
for number_to_partition in range(1 , __lowercase ):
if len(partition(__lowercase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 151 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def A ( self : Any ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowercase )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
model.to(lowercase )
model.train()
_snake_case = model(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
_snake_case = torch.device(lowercase )
_snake_case = randn_tensor(output.shape , device=lowercase )
_snake_case = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 282 | 0 |
import math
class A__ :
"""simple docstring"""
def __init__( self , __snake_case=0 ): # a graph with Node 0,1,...,N-1
snake_case = n
snake_case = [
[math.inf for j in range(0 , __snake_case )] for i in range(0 , __snake_case )
] # adjacency matrix for weight
snake_case = [
[math.inf for j in range(0 , __snake_case )] for i in range(0 , __snake_case )
] # dp[i][j] stores minimum distance from i to j
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = w
def a_ ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
snake_case = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def a_ ( self , __snake_case , __snake_case ):
return self.dp[u][v]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 367 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Dict = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 213 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 133 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowercase_ : Optional[Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
lowercase_ : str = []
lowercase_ : int = []
lowercase_ : Dict = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
lowercase_ : int = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
lowercase_ : int = 0
for log in Path().glob('*.log'):
lowercase_ : int = 0
with open(log, 'r') as f:
for line in f:
lowercase_ : List[str] = json.loads(line)
if line.get('nodeid', '') != "":
lowercase_ : List[str] = line['nodeid']
if line.get('duration', None) is not None:
lowercase_ : Tuple = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowercase_ : List[Any] = []
log.unlink()
lowercase_ : int = ''
lowercase_ : int = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowercase_ : Optional[Any] = []
lowercase_ : Any = {}
for test in failed_tests:
lowercase_ : List[str] = test[0].split('::')
lowercase_ : int = data[0].split('/')[-1]
if data[0] not in filesafailed:
lowercase_ : Dict = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowercase_ : Any = [test[0] for test in failed_table]
lowercase_ : Optional[Any] = list(set(files))
# Count number of instances in failed_tests
lowercase_ : Optional[Any] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowercase_ : Optional[Any] = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
lowercase_ : List[Any] = 'Too many failed tests, please see the full report in the Action results.'
lowercase_ : Union[str, Any] = len(err) + 10
lowercase_ : Tuple = message[: 30_00 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
lowercase_ : int = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
lowercase_ : Union[str, Any] = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
lowercase_ : List[Any] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
lowercase_ : List[Any] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowercase_ : List[str] = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowercase_ : Any = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
lowercase_ : Any = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowercase_ : Optional[int] = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowercase_ : Tuple = row[0]
else:
lowercase_ : Tuple = ''
lowercase_ : int = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 133 | 1 |
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
snake_case_ = datasets.load_iris()
snake_case_ = np.array(data["""data"""])
snake_case_ = np.array(data["""target"""])
snake_case_ = data["""target_names"""]
snake_case_ = train_test_split(X, y)
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return np.linalg.norm(np.array(lowercase_ ) - np.array(lowercase_ ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=5 ):
UpperCAmelCase = zip(lowercase_ , lowercase_ )
# List of distances of all points from the point to be classified
UpperCAmelCase = []
for data_point in data:
UpperCAmelCase = euclidean_distance(data_point[0] , lowercase_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase = [i[1] for i in sorted(lowercase_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase = Counter(lowercase_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 359 |
"""simple docstring"""
from collections import deque
class A_ :
"""simple docstring"""
def __init__( self :Any , lowercase_ :str , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = process_name # process name
UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase = arrival_time
UpperCAmelCase = burst_time # remaining burst time
UpperCAmelCase = 0 # total time of the process wait in ready queue
UpperCAmelCase = 0 # time from arrival time to completion time
class A_ :
"""simple docstring"""
def __init__( self :Any , lowercase_ :int , lowercase_ :list[int] , lowercase_ :deque[Process] , lowercase_ :int , ) -> None:
# total number of mlfq's queues
UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase = queue
# current time
UpperCAmelCase = current_time
# finished process is in this sequence queue
UpperCAmelCase = deque()
def UpperCAmelCase__ ( self :Optional[int] ) -> list[str]:
UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase__ ( self :List[str] , lowercase_ :list[Process] ) -> list[int]:
UpperCAmelCase = []
for i in range(len(lowercase_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase__ ( self :List[str] , lowercase_ :list[Process] ) -> list[int]:
UpperCAmelCase = []
for i in range(len(lowercase_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase__ ( self :Dict , lowercase_ :list[Process] ) -> list[int]:
UpperCAmelCase = []
for i in range(len(lowercase_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase__ ( self :str , lowercase_ :deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def UpperCAmelCase__ ( self :int , lowercase_ :Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :deque[Process] ) -> deque[Process]:
UpperCAmelCase = deque() # sequence deque of finished process
while len(lowercase_ ) != 0:
UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowercase_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase = 0
# set the process's turnaround time because it is finished
UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(lowercase_ )
self.finish_queue.extend(lowercase_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase__ ( self :Tuple , lowercase_ :deque[Process] , lowercase_ :int ) -> tuple[deque[Process], deque[Process]]:
UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowercase_ ) ):
UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowercase_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowercase_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase = 0
# set the finish time
UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowercase_ )
self.finish_queue.extend(lowercase_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase__ ( self :Optional[Any] ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
UpperCAmelCase , UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
snake_case_ = Process("""P1""", 0, 53)
snake_case_ = Process("""P2""", 0, 17)
snake_case_ = Process("""P3""", 0, 68)
snake_case_ = Process("""P4""", 0, 24)
snake_case_ = 3
snake_case_ = [17, 25]
snake_case_ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
snake_case_ = Process("""P1""", 0, 53)
snake_case_ = Process("""P2""", 0, 17)
snake_case_ = Process("""P3""", 0, 68)
snake_case_ = Process("""P4""", 0, 24)
snake_case_ = 3
snake_case_ = [17, 25]
snake_case_ = deque([Pa, Pa, Pa, Pa])
snake_case_ = MLFQ(number_of_queues, time_slices, queue, 0)
snake_case_ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 181 | 0 |
def lowerCAmelCase__ ( a__: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = 2
while i * i <= n:
_UpperCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(UpperCamelCase_ ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 329 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Dict =LxmertTokenizer
A__ : List[Any] =LxmertTokenizerFast
A__ : Any =True
A__ : List[Any] =True
def A_ ( self : Optional[Any] ):
super().setUp()
SCREAMING_SNAKE_CASE__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE__ = 'unwanted, running'
return input_text, output_text
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(UpperCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [7, 4, 5, 10, 8, 9] )
def A_ ( self : List[str] ):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 176 | 0 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=128 , a=32 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> List[str]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
(
SCREAMING_SNAKE_CASE
) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Dict:
SCREAMING_SNAKE_CASE = NezhaModel(config=__A)
model.to(__A)
model.eval()
SCREAMING_SNAKE_CASE = model(__A , attention_mask=__A , token_type_ids=__A)
SCREAMING_SNAKE_CASE = model(__A , token_type_ids=__A)
SCREAMING_SNAKE_CASE = model(__A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> List[Any]:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = NezhaModel(__A)
model.to(__A)
model.eval()
SCREAMING_SNAKE_CASE = model(
__A , attention_mask=__A , token_type_ids=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
SCREAMING_SNAKE_CASE = model(
__A , attention_mask=__A , token_type_ids=__A , encoder_hidden_states=__A , )
SCREAMING_SNAKE_CASE = model(__A , attention_mask=__A , token_type_ids=__A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> List[Any]:
SCREAMING_SNAKE_CASE = NezhaForMaskedLM(config=__A)
model.to(__A)
model.eval()
SCREAMING_SNAKE_CASE = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = NezhaForNextSentencePrediction(config=__A)
model.to(__A)
model.eval()
SCREAMING_SNAKE_CASE = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> str:
SCREAMING_SNAKE_CASE = NezhaForPreTraining(config=__A)
model.to(__A)
model.eval()
SCREAMING_SNAKE_CASE = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Dict:
SCREAMING_SNAKE_CASE = NezhaForQuestionAnswering(config=__A)
model.to(__A)
model.eval()
SCREAMING_SNAKE_CASE = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NezhaForSequenceClassification(__A)
model.to(__A)
model.eval()
SCREAMING_SNAKE_CASE = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NezhaForTokenClassification(config=__A)
model.to(__A)
model.eval()
SCREAMING_SNAKE_CASE = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NezhaForMultipleChoice(config=__A)
model.to(__A)
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_lowercase : str = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Union[str, Any] = True
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=False) -> str:
SCREAMING_SNAKE_CASE = super()._prepare_for_class(__A , __A , return_labels=__A)
if return_labels:
if model_class in get_values(__A):
SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A)
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A)
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = NezhaModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__A , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__A)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
# This regression test was failing with PyTorch < 1.3
(
SCREAMING_SNAKE_CASE
) = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE = None
self.model_tester.create_and_check_model_as_decoder(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__A)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained(__A)
self.assertIsNotNone(__A)
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(config=__A)
SCREAMING_SNAKE_CASE = self._prepare_for_class(__A , __A)
SCREAMING_SNAKE_CASE = torch.jit.trace(
__A , (inputs_dict['input_ids'].to('cpu'), inputs_dict['attention_mask'].to('cpu')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , 'bert.pt'))
SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(__A , 'bert.pt') , map_location=__A)
loaded(inputs_dict['input_ids'].to(__A) , inputs_dict['attention_mask'].to(__A))
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained('sijunhe/nezha-cn-base')
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]])
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1]])
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(__A , attention_mask=__A)[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 768))
self.assertEqual(output.shape , __A)
SCREAMING_SNAKE_CASE = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base')
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]])
SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1]])
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(__A , attention_mask=__A)[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 2_1128))
self.assertEqual(output.shape , __A)
SCREAMING_SNAKE_CASE = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4))
| 361 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = TFCamembertModel.from_pretrained('jplu/tf-camembert-base')
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE = model(a)['last_hidden_state']
SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10, 768))
self.assertEqual(output.shape , a)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 327 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : int ):
super().tearDown()
gc.collect()
def a ( self : Tuple ):
__UpperCAmelCase , __UpperCAmelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa )
__UpperCAmelCase , __UpperCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=UpperCamelCase__ , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa )
__UpperCAmelCase = controlnet_params
__UpperCAmelCase = '''bird'''
__UpperCAmelCase = jax.device_count()
__UpperCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__UpperCAmelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
__UpperCAmelCase = jax.random.PRNGKey(0 )
__UpperCAmelCase = jax.random.split(UpperCamelCase__ , jax.device_count() )
__UpperCAmelCase = replicate(UpperCamelCase__ )
__UpperCAmelCase = shard(UpperCamelCase__ )
__UpperCAmelCase = shard(UpperCamelCase__ )
__UpperCAmelCase = pipe(
prompt_ids=UpperCamelCase__ , image=UpperCamelCase__ , params=UpperCamelCase__ , prng_seed=UpperCamelCase__ , num_inference_steps=50 , jit=UpperCamelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
__UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__UpperCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
__UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCAmelCase = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def a ( self : List[str] ):
__UpperCAmelCase , __UpperCAmelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa )
__UpperCAmelCase , __UpperCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=UpperCamelCase__ , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa )
__UpperCAmelCase = controlnet_params
__UpperCAmelCase = '''Chef in the kitchen'''
__UpperCAmelCase = jax.device_count()
__UpperCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__UpperCAmelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
__UpperCAmelCase = jax.random.PRNGKey(0 )
__UpperCAmelCase = jax.random.split(UpperCamelCase__ , jax.device_count() )
__UpperCAmelCase = replicate(UpperCamelCase__ )
__UpperCAmelCase = shard(UpperCamelCase__ )
__UpperCAmelCase = shard(UpperCamelCase__ )
__UpperCAmelCase = pipe(
prompt_ids=UpperCamelCase__ , image=UpperCamelCase__ , params=UpperCamelCase__ , prng_seed=UpperCamelCase__ , num_inference_steps=50 , jit=UpperCamelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
__UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__UpperCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
__UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCAmelCase = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 332 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
lowerCAmelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCAmelCase_ = [4, 4, 4, 4]
lowerCAmelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
else:
lowerCAmelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCAmelCase_ = 96
elif "small" in model_name:
lowerCAmelCase_ = 96
elif "base" in model_name:
lowerCAmelCase_ = 128
elif "large" in model_name:
lowerCAmelCase_ = 192
elif "xlarge" in model_name:
lowerCAmelCase_ = 256
elif "huge" in model_name:
lowerCAmelCase_ = 352
# set label information
lowerCAmelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCAmelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = FocalNetConfig(
embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , )
return config
def __UpperCamelCase ( _A ):
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowerCAmelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase_ = name.replace('''head''' , '''classifier''' )
else:
lowerCAmelCase_ = '''focalnet.''' + name
return name
def __UpperCamelCase ( _A , _A , _A=False ):
# fmt: off
lowerCAmelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCAmelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _A )
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
lowerCAmelCase_ = val
lowerCAmelCase_ = get_focalnet_config(_A )
lowerCAmelCase_ = FocalNetForImageClassification(_A )
model.eval()
# load state dict
model.load_state_dict(_A )
# verify conversion
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = BitImageProcessor(
do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , )
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' )
lowerCAmelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 )
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
_A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__UpperCAmelCase = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__UpperCAmelCase = spec.loader.load_module()
__UpperCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__UpperCAmelCase = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__UpperCAmelCase = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def A__ ( ):
SCREAMING_SNAKE_CASE_ = []
for config_class in list(CONFIG_MAPPING.values() ):
SCREAMING_SNAKE_CASE_ = False
# source code of `config_class`
SCREAMING_SNAKE_CASE_ = inspect.getsource(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = _re_checkpoint.findall(__lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
SCREAMING_SNAKE_CASE_ = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
SCREAMING_SNAKE_CASE_ = True
break
SCREAMING_SNAKE_CASE_ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE_ = '''\n'''.join(sorted(__lowerCamelCase ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 257 |
from __future__ import annotations
__UpperCAmelCase = 10
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = max(__lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
SCREAMING_SNAKE_CASE_ = [[] for _ in range(__lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
SCREAMING_SNAKE_CASE_ = int((i / placement) % RADIX )
buckets[tmp].append(__lowerCamelCase )
# put each buckets' contents into list_of_ints
SCREAMING_SNAKE_CASE_ = 0
for b in range(__lowerCamelCase ):
for i in buckets[b]:
SCREAMING_SNAKE_CASE_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __lowerCAmelCase (_UpperCamelCase ):
create_state_space_tree(_UpperCamelCase , [] , 0 )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCamelCase__ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq) | 86 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
"""simple docstring"""
a_ : Optional[Any] = HfArgumentParser(__A )
a_ : Optional[int] = parser.parse_args_into_dataclasses()[0]
a_ : List[Any] = TensorFlowBenchmark(args=__A )
try:
a_ : List[str] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a_ : Dict = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
a_ : Dict = ' '.join(str(__A ).split(' ' )[:-1] )
a_ : int = ''
a_ : int = eval(str(__A ).split(' ' )[-1] )
a_ : Any = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__A )
if len(__A ) > 0:
a_ : str = full_error_msg + begin_error_msg + str(__A )
raise ValueError(__A )
benchmark.run()
if __name__ == "__main__":
main()
| 32 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a (self : int ):
"""simple docstring"""
__snake_case = 1
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowercase )
return image
@property
def a (self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__lowercase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def a (self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def a (self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(__lowercase )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.dummy_cond_unet_upscale
__snake_case = DDPMScheduler()
__snake_case = DDIMScheduler(prediction_type='''v_prediction''' )
__snake_case = self.dummy_vae
__snake_case = self.dummy_text_encoder
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__snake_case = StableDiffusionUpscalePipeline(
unet=__lowercase , low_res_scheduler=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , max_noise_level=350 , )
__snake_case = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.Generator(device=__lowercase ).manual_seed(0 )
__snake_case = sd_pipe(
[prompt] , image=__lowercase , generator=__lowercase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__snake_case = output.images
__snake_case = torch.Generator(device=__lowercase ).manual_seed(0 )
__snake_case = sd_pipe(
[prompt] , image=__lowercase , generator=__lowercase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=__lowercase , )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
__snake_case = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__snake_case = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.dummy_cond_unet_upscale
__snake_case = DDPMScheduler()
__snake_case = DDIMScheduler(prediction_type='''v_prediction''' )
__snake_case = self.dummy_vae
__snake_case = self.dummy_text_encoder
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__snake_case = StableDiffusionUpscalePipeline(
unet=__lowercase , low_res_scheduler=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , max_noise_level=350 , )
__snake_case = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__snake_case = output.images
assert image.shape[0] == 2
__snake_case = torch.Generator(device=__lowercase ).manual_seed(0 )
__snake_case = sd_pipe(
[prompt] , image=__lowercase , generator=__lowercase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__snake_case = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.dummy_cond_unet_upscale
__snake_case = DDPMScheduler()
__snake_case = DDIMScheduler(prediction_type='''v_prediction''' )
__snake_case = self.dummy_vae
__snake_case = self.dummy_text_encoder
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__snake_case = unet.half()
__snake_case = text_encoder.half()
# make sure here that pndm scheduler skips prk
__snake_case = StableDiffusionUpscalePipeline(
unet=__lowercase , low_res_scheduler=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , max_noise_level=350 , )
__snake_case = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe(
[prompt] , image=__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''np''' , ).images
__snake_case = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
__snake_case = '''stabilityai/stable-diffusion-x4-upscaler'''
__snake_case = StableDiffusionUpscalePipeline.from_pretrained(__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
__snake_case = '''a cat sitting on a park bench'''
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=__lowercase , image=__lowercase , generator=__lowercase , output_type='''np''' , )
__snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def a (self : Any ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
__snake_case = '''stabilityai/stable-diffusion-x4-upscaler'''
__snake_case = StableDiffusionUpscalePipeline.from_pretrained(
__lowercase , torch_dtype=torch.floataa , )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
__snake_case = '''a cat sitting on a park bench'''
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=__lowercase , image=__lowercase , generator=__lowercase , output_type='''np''' , )
__snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a (self : List[Any] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__snake_case = '''stabilityai/stable-diffusion-x4-upscaler'''
__snake_case = StableDiffusionUpscalePipeline.from_pretrained(
__lowercase , torch_dtype=torch.floataa , )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__snake_case = '''a cat sitting on a park bench'''
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=__lowercase , image=__lowercase , generator=__lowercase , num_inference_steps=5 , output_type='''np''' , )
__snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 367 |
def lowerCamelCase__ ( snake_case_ : int = 1000 ) -> int:
__snake_case = 2**power
__snake_case = str(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = 0
for i in list_num:
sum_of_num += int(snake_case_ )
return sum_of_num
if __name__ == "__main__":
snake_case_ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
snake_case_ = solution(power)
print('Sum of the digits is: ', result)
| 238 | 0 |
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : list ):
_enforce_args(__UpperCamelCase ,__UpperCamelCase )
if n == 0:
return 0
lowerCAmelCase_ : Any = float('''-inf''' )
for i in range(1 ,n + 1 ):
lowerCAmelCase_ : int = max(
__UpperCamelCase ,prices[i - 1] + naive_cut_rod_recursive(n - i ,__UpperCamelCase ) )
return max_revue
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : list ):
_enforce_args(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : list ,__UpperCamelCase : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowerCAmelCase_ : Union[str, Any] = float('''-inf''' )
for i in range(1 ,n + 1 ):
lowerCAmelCase_ : List[Any] = max(
__UpperCamelCase ,prices[i - 1] + _top_down_cut_rod_recursive(n - i ,__UpperCamelCase ,__UpperCamelCase ) ,)
lowerCAmelCase_ : Tuple = max_revenue
return max_rev[n]
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : list ):
_enforce_args(__UpperCamelCase ,__UpperCamelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowerCAmelCase_ : str = [float('''-inf''' ) for _ in range(n + 1 )]
lowerCAmelCase_ : Dict = 0
for i in range(1 ,n + 1 ):
lowerCAmelCase_ : List[Any] = max_rev[i]
for j in range(1 ,i + 1 ):
lowerCAmelCase_ : Optional[int] = max(__UpperCamelCase ,prices[j - 1] + max_rev[i - j] )
lowerCAmelCase_ : Optional[int] = max_revenue_i
return max_rev[n]
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : list ):
if n < 0:
lowerCAmelCase_ : int = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__UpperCamelCase )
if n > len(__UpperCamelCase ):
lowerCAmelCase_ : Union[str, Any] = (
'''Each integral piece of rod must have a corresponding price. '''
f"""Got n = {n} but length of prices = {len(__UpperCamelCase )}"""
)
raise ValueError(__UpperCamelCase )
def UpperCamelCase( ):
lowerCAmelCase_ : Union[str, Any] = [6, 10, 12, 15, 20, 23]
lowerCAmelCase_ : int = len(__UpperCamelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowerCAmelCase_ : List[str] = 36
lowerCAmelCase_ : Optional[int] = top_down_cut_rod(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = bottom_up_cut_rod(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = naive_cut_rod_recursive(__UpperCamelCase ,__UpperCamelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 103 |
from pathlib import Path
import fire
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : int ):
lowerCAmelCase_ : List[str] = Path(__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = Path(__UpperCamelCase )
dest_dir.mkdir(exist_ok=__UpperCamelCase )
for path in src_dir.iterdir():
lowerCAmelCase_ : Optional[Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCAmelCase_ : List[str] = dest_dir.joinpath(path.name )
print(__UpperCamelCase )
dest_path.open('''w''' ).write('''\n'''.join(__UpperCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 103 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
__A = getattr(a_ , a_ )
if weight_type is not None:
__A = getattr(a_ , a_ ).shape
else:
__A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__A = value
elif weight_type == "weight_g":
__A = value
elif weight_type == "weight_v":
__A = value
elif weight_type == "bias":
__A = value
else:
__A = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
__A = []
__A = fairseq_model.state_dict()
__A = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == "group" , )
__A = True
else:
for key, mapped_key in MAPPING.items():
__A = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
__A = True
if "*" in mapped_key:
__A = name.split(a_ )[0].split("." )[-2]
__A = mapped_key.replace("*" , a_ )
if "weight_g" in name:
__A = "weight_g"
elif "weight_v" in name:
__A = "weight_v"
elif "weight" in name:
__A = "weight"
elif "bias" in name:
__A = "bias"
else:
__A = None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
__A = full_name.split("conv_layers." )[-1]
__A = name.split("." )
__A = int(items[0] )
__A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(a_ )
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ , a_=None , a_=None , a_=True ) -> Union[str, Any]:
"""simple docstring"""
if config_path is not None:
__A = HubertConfig.from_pretrained(a_ )
else:
__A = HubertConfig()
if is_finetuned:
if dict_path:
__A = Dictionary.load(a_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__A = target_dict.pad_index
__A = target_dict.bos_index
__A = target_dict.eos_index
__A = len(target_dict.symbols )
__A = os.path.join(a_ , "vocab.json" )
if not os.path.isdir(a_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(a_ ) )
return
os.makedirs(a_ , exist_ok=a_ )
with open(a_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , a_ )
__A = WavaVecaCTCTokenizer(
a_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=a_ , )
__A = True if config.feat_extract_norm == "layer" else False
__A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=a_ , return_attention_mask=a_ , )
__A = WavaVecaProcessor(feature_extractor=a_ , tokenizer=a_ )
processor.save_pretrained(a_ )
__A = HubertForCTC(a_ )
else:
__A = HubertModel(a_ )
if is_finetuned:
__A , __A , __A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__A , __A , __A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__A = model[0].eval()
recursively_load_weights(a_ , a_ , a_ )
hf_wavavec.save_pretrained(a_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
SCREAMING_SNAKE_CASE :str = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 359 |
import copy
import re
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = "hp"
snake_case_ = {}
snake_case_ = None
@classmethod
def UpperCamelCase_ ( cls : Dict ,A : Dict ,A : Any ):
__A = prefix
__A = defaults
cls.build_naming_info()
@staticmethod
def UpperCamelCase_ ( A : Dict ,A : int ):
if len(A ) == 0:
return ""
__A = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 ,len(A ) + 1 ):
__A = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__A = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(A : str ):
__A = ""
while integer != 0:
__A = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__A = 0
while True:
__A = word + "#" + int_to_alphabetic(A )
if sword in info["reverse_short_word"]:
continue
else:
__A = sword
break
__A = short_word
__A = word
return short_word
@staticmethod
def UpperCamelCase_ ( A : int ,A : Tuple ):
__A = param_name.split("_" )
__A = [TrialShortNamer.shortname_for_word(A ,A ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__A = ["", "_"]
for separator in separators:
__A = separator.join(A )
if shortname not in info["reverse_short_param"]:
__A = shortname
__A = param_name
return shortname
return param_name
@staticmethod
def UpperCamelCase_ ( A : Optional[Any] ,A : Tuple ):
__A = TrialShortNamer.shortname_for_key(A ,A )
__A = short_name
__A = param_name
@classmethod
def UpperCamelCase_ ( cls : Dict ):
if cls.NAMING_INFO is not None:
return
__A = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__A = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(A ,A )
__A = info
@classmethod
def UpperCamelCase_ ( cls : Dict ,A : List[str] ):
cls.build_naming_info()
assert cls.PREFIX is not None
__A = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__A = cls.NAMING_INFO["short_param"][k]
if isinstance(A ,A ):
__A = 1 if v else 0
__A = "" if isinstance(A ,(int, float) ) else "-"
__A = f'''{key}{sep}{v}'''
name.append(A )
return "_".join(A )
@classmethod
def UpperCamelCase_ ( cls : Tuple ,A : Tuple ):
__A = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__A = []
else:
__A = repr.split("_" )
__A = {}
for value in values:
if "-" in value:
__A , __A = value.split("-" )
else:
__A = re.sub("[0-9.]" ,"" ,A )
__A = float(re.sub("[^0-9.]" ,"" ,A ) )
__A = cls.NAMING_INFO["reverse_short_param"][p_k]
__A = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__A = cls.DEFAULTS[k]
return parameters
| 124 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__: Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__: Any = "▁"
UpperCamelCase__: List[str] = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
UpperCamelCase__: Any = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
UpperCamelCase__: Optional[int] = {
"facebook/s2t-small-librispeech-asr": 1024,
}
UpperCamelCase__: Union[str, Any] = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
UpperCamelCase__: Optional[int] = {"mustc": MUSTC_LANGS}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = MAX_MODEL_INPUT_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
lowerCamelCase__ = []
def __init__( self : List[Any] , __snake_case : str , __snake_case : Any , __snake_case : List[str]="<s>" , __snake_case : str="</s>" , __snake_case : str="<pad>" , __snake_case : Any="<unk>" , __snake_case : str=False , __snake_case : List[Any]=False , __snake_case : List[Any]=None , __snake_case : Optional[Any]=None , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Optional[Any] , ) -> None:
UpperCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , do_upper_case=__snake_case , do_lower_case=__snake_case , tgt_lang=__snake_case , lang_codes=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
UpperCAmelCase : Union[str, Any] = do_upper_case
UpperCAmelCase : int = do_lower_case
UpperCAmelCase : Dict = load_json(__snake_case )
UpperCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase : Tuple = spm_file
UpperCAmelCase : Any = load_spm(__snake_case , self.sp_model_kwargs )
if lang_codes is not None:
UpperCAmelCase : List[str] = lang_codes
UpperCAmelCase : int = LANGUAGES[lang_codes]
UpperCAmelCase : Any = [F"""<lang:{lang}>""" for lang in self.langs]
UpperCAmelCase : Optional[int] = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
UpperCAmelCase : Optional[int] = self.lang_tokens
UpperCAmelCase : List[str] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
UpperCAmelCase : str = {}
@property
def A ( self : Dict ) -> int:
return len(self.encoder )
@property
def A ( self : Optional[Any] ) -> str:
return self._tgt_lang
@tgt_lang.setter
def A ( self : List[str] , __snake_case : int ) -> None:
UpperCAmelCase : List[Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(__snake_case )
def A ( self : int , __snake_case : str ) -> None:
UpperCAmelCase : Optional[Any] = self.lang_code_to_id[tgt_lang]
UpperCAmelCase : Any = [lang_code_id]
def A ( self : Tuple , __snake_case : str ) -> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def A ( self : str , __snake_case : Union[str, Any] ) -> Union[str, Any]:
return self.encoder.get(__snake_case , self.encoder[self.unk_token] )
def A ( self : str , __snake_case : int ) -> str:
return self.decoder.get(__snake_case , self.unk_token )
def A ( self : Tuple , __snake_case : List[str] ) -> str:
UpperCAmelCase : List[Any] = []
UpperCAmelCase : Tuple = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCAmelCase : Any = self.sp_model.decode(__snake_case )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCAmelCase : List[Any] = []
else:
current_sub_tokens.append(__snake_case )
UpperCAmelCase : Optional[Any] = self.sp_model.decode(__snake_case )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def A ( self : Any , __snake_case : List[str] , __snake_case : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
UpperCAmelCase : Tuple = [1] * len(self.prefix_tokens )
UpperCAmelCase : Optional[int] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def A ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : Optional[int] = self.__dict__.copy()
UpperCAmelCase : str = None
return state
def __setstate__( self : str , __snake_case : Dict ) -> None:
UpperCAmelCase : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase : List[str] = {}
UpperCAmelCase : Optional[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def A ( self : List[Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : List[Any] = Path(__snake_case )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
UpperCAmelCase : Dict = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
UpperCAmelCase : Union[str, Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __snake_case )
elif not os.path.isfile(self.spm_file ):
with open(__snake_case , '''wb''' ) as fi:
UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (str(__snake_case ), str(__snake_case ))
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
UpperCAmelCase : int = sentencepiece.SentencePieceProcessor(**_lowerCAmelCase )
spm.Load(str(_lowerCAmelCase ) )
return spm
def snake_case_ ( _lowerCAmelCase : str ) -> Union[Dict, List]:
with open(_lowerCAmelCase , '''r''' ) as f:
return json.load(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str ) -> None:
with open(_lowerCAmelCase , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase , indent=2 )
| 23 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a ( A__ : bool = True , *A__ : int , **A__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
_lowercase =False
if main_process_only:
_lowercase =PartialState().local_process_index == 0
return _tqdm(*A__ , **A__ , disable=A__ )
| 205 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
_UpperCamelCase : Optional[int] = str(lowercase_ )
return n == n[::-1]
def lowercase__ ( lowercase_ = 1_000_000 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = 0
for i in range(1 ,lowercase_ ):
if is_palindrome(lowercase_ ) and is_palindrome(bin(lowercase_ ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 369 |
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict:
super(__a , self ).__init__()
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Any = W_supports["start_token_id"].item()
_UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : str = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Any = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Any = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : Optional[Any] = p_start
_UpperCamelCase : str = p_end
return p_starts, p_ends
| 310 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def a_ ( ):
__lowerCAmelCase = 9
__lowerCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowerCAmelCase = kruskal(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCAmelCase_ ) == sorted(lowerCAmelCase_ )
| 284 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = filter(lambda lowerCAmelCase_ : p.requires_grad, model.parameters() )
__lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int] ):
if metric == "rouge2":
__lowerCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__lowerCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__lowerCAmelCase = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
__lowerCAmelCase = ModelCheckpoint(
dirpath=lowerCAmelCase_, filename=lowerCAmelCase_, monitor=F"""val_{metric}""", mode='max', save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Any ):
return EarlyStopping(
monitor=F"""val_{metric}""", mode='min' if 'loss' in metric else 'max', patience=lowerCAmelCase_, verbose=lowerCAmelCase_, )
class _UpperCAmelCase ( pl.Callback ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ) -> Any:
__lowerCAmelCase = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Optional[int] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase = od / 'test_results.txt'
__lowerCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'a+' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
__lowerCAmelCase = val.item()
__lowerCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> Dict:
try:
__lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase = pl_module.model.num_parameters()
__lowerCAmelCase = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase ( self : int , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ) -> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , 'test' )
@rank_zero_only
def lowercase ( self : List[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Any ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 284 | 1 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
_a : Tuple = precision
_a : int = ceil(precision / 14 )
_a : List[Any] = 426880 * Decimal(10005 ).sqrt()
_a : Any = 1
_a : Tuple = 13591409
_a : Optional[Any] = Decimal(lowerCAmelCase_ )
for k in range(1 , lowerCAmelCase_ ):
_a : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase_ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 107 |
'''simple docstring'''
from math import sqrt
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
_a : Dict = 0
for i in range(1 , int(sqrt(lowerCAmelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCAmelCase_ ):
total += i + n // i
elif i == sqrt(lowerCAmelCase_ ):
total += i
return total - n
def __lowerCamelCase ( lowerCAmelCase_ = 10000 ) -> int:
_a : Union[str, Any] = sum(
i
for i in range(1 , lowerCAmelCase_ )
if sum_of_divisors(sum_of_divisors(lowerCAmelCase_ ) ) == i and sum_of_divisors(lowerCAmelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 107 | 1 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=3 , snake_case=32 , snake_case=3 , snake_case=10 , snake_case=[10, 20, 30, 40] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = embeddings_size
lowercase = hidden_sizes
lowercase = depths
lowercase = is_training
lowercase = use_labels
lowercase = hidden_act
lowercase = num_labels
lowercase = scope
lowercase = len(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE__ ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = FlaxRegNetModel(config=snake_case )
lowercase = model(snake_case )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = self.num_labels
lowercase = FlaxRegNetForImageClassification(config=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase = config_and_inputs
lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Dict = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = FlaxRegNetModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self ):
return
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(snake_case )
lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
def check_hidden_states_output(snake_case , snake_case , snake_case ):
lowercase = model_class(snake_case )
lowercase = model(**self._prepare_for_class(snake_case , snake_case ) )
lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase = self.model_tester.num_stages
self.assertEqual(len(snake_case ) , expected_num_stages + 1 )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase = self._prepare_for_class(snake_case , snake_case )
lowercase = model_class(snake_case )
@jax.jit
def model_jitted(snake_case , **snake_case ):
return model(pixel_values=snake_case , **snake_case )
with self.subTest('JIT Enabled' ):
lowercase = model_jitted(**snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowercase = model_jitted(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) )
for jitted_output, output in zip(snake_case , snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_ ( ):
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=snake_case , return_tensors='np' )
lowercase = model(**snake_case )
# verify the logits
lowercase = (1, 1000)
self.assertEqual(outputs.logits.shape , snake_case )
lowercase = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 195 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = 1.5
lowercase = int(factor * num_class_images )
lowercase = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=__SCREAMING_SNAKE_CASE )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase = client.query(text=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase = int(factor * num_images )
lowercase = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
lowercase = 0
lowercase = 0
lowercase = tqdm(desc='downloading real regularization images' , total=__SCREAMING_SNAKE_CASE )
with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open(
F'''{class_data_dir}/images.txt''' , 'w' ) as fa:
while total < num_class_images:
lowercase = class_images[count]
count += 1
try:
lowercase = requests.get(images['url'] )
if img.status_code == 200:
lowercase = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase_ ( ):
lowercase = argparse.ArgumentParser('' , add_help=__SCREAMING_SNAKE_CASE )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
parser.add_argument('--class_data_dir' , help='path to save images' , required=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 195 | 1 |
from __future__ import annotations
import queue
class A__ :
"""simple docstring"""
def __init__( self , __snake_case ):
snake_case = data
snake_case = None
snake_case = None
def UpperCAmelCase__ ():
"""simple docstring"""
print('''\n********Press N to stop entering at any point of time********\n''' )
snake_case = input('''Enter the value of the root node: ''' ).strip().lower()
snake_case = queue.Queue()
snake_case = TreeNode(int(UpperCamelCase_ ) )
q.put(UpperCamelCase_ )
while not q.empty():
snake_case = q.get()
snake_case = F'''Enter the left node of {node_found.data}: '''
snake_case = input(UpperCamelCase_ ).strip().lower() or '''n'''
if check == "n":
return tree_node
snake_case = TreeNode(int(UpperCamelCase_ ) )
snake_case = left_node
q.put(UpperCamelCase_ )
snake_case = F'''Enter the right node of {node_found.data}: '''
snake_case = input(UpperCamelCase_ ).strip().lower() or '''n'''
if check == "n":
return tree_node
snake_case = TreeNode(int(UpperCamelCase_ ) )
snake_case = right_node
q.put(UpperCamelCase_ )
raise
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or not node:
return
print(node.data ,end=''',''' )
pre_order(node.left )
pre_order(node.right )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or not node:
return
in_order(node.left )
print(node.data ,end=''',''' )
in_order(node.right )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=''',''' )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or not node:
return
snake_case = queue.Queue()
q.put(UpperCamelCase_ )
while not q.empty():
snake_case = q.get()
print(node_dequeued.data ,end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or not node:
return
snake_case = queue.Queue()
q.put(UpperCamelCase_ )
while not q.empty():
snake_case = []
while not q.empty():
snake_case = q.get()
print(node_dequeued.data ,end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or not node:
return
snake_case = []
snake_case = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=''',''' )
stack.append(UpperCamelCase_ )
snake_case = n.left
# end of while means current node doesn't have left child
snake_case = stack.pop()
# start to traverse its right child
snake_case = n.right
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or not node:
return
snake_case = []
snake_case = node
while n or stack:
while n:
stack.append(UpperCamelCase_ )
snake_case = n.left
snake_case = stack.pop()
print(n.data ,end=''',''' )
snake_case = n.right
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or not node:
return
snake_case , snake_case = [], []
snake_case = node
stacka.append(UpperCamelCase_ )
while stacka: # to find the reversed order of post order, store it in stack2
snake_case = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(UpperCamelCase_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=''',''' )
def UpperCAmelCase__ (UpperCamelCase_ = "" ,UpperCamelCase_=50 ,UpperCamelCase_="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
snake_case , snake_case = divmod(width - len(UpperCamelCase_ ) - 2 ,2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
_SCREAMING_SNAKE_CASE : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 213 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a_ ( self , __snake_case=0 ):
snake_case = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__snake_case ) )
snake_case = np.random.RandomState(__snake_case )
snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
# warmup pass to apply optimizations
snake_case = pipe(**self.get_dummy_inputs() )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
@property
def a_ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self ):
snake_case = ort.SessionOptions()
snake_case = False
return options
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = '''A fantasy landscape, trending on artstation'''
snake_case = np.random.RandomState(0 )
snake_case = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__snake_case , output_type='''np''' , )
snake_case = output.images
snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
snake_case = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case = init_image.resize((7_6_8, 5_1_2) )
snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = '''A fantasy landscape, trending on artstation'''
snake_case = np.random.RandomState(0 )
snake_case = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__snake_case , output_type='''np''' , )
snake_case = output.images
snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
snake_case = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 213 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case :str = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[Any] = ['''YolosFeatureExtractor''']
__snake_case :Optional[Any] = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__snake_case :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__lowerCAmelCase : Optional[int] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__lowerCAmelCase : Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__lowerCAmelCase : Dict ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = en_sentvecs.shape[0]
# mean centering
__SCREAMING_SNAKE_CASE : Tuple = en_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : Optional[int] = in_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : str = cdist(lowercase__ , lowercase__ , '''cosine''' )
__SCREAMING_SNAKE_CASE : int = np.array(range(lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = sim.argsort(axis=1 )[:, :10]
__SCREAMING_SNAKE_CASE : str = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> str:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 9 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = ['image_processor', 'tokenizer']
_a = 'CLIPImageProcessor'
_a = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : List[str], lowerCamelCase : Dict=None, lowerCamelCase : str=None, **lowerCamelCase : int )-> Tuple:
lowerCamelCase__ : List[str] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCamelCase, )
lowerCamelCase__ : Optional[Any] =kwargs.pop('''feature_extractor''' )
lowerCamelCase__ : Optional[int] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase, lowerCamelCase )
def __call__( self : Any, lowerCamelCase : Tuple=None, lowerCamelCase : Any=None, lowerCamelCase : Any=None, **lowerCamelCase : List[Any] )-> Optional[int]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase__ : int =self.tokenizer(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase )
if images is not None:
lowerCamelCase__ : List[str] =self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase )
if text is not None and images is not None:
lowerCamelCase__ : List[Any] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ), tensor_type=lowerCamelCase )
def snake_case ( self : str, *lowerCamelCase : Any, **lowerCamelCase : List[str] )-> int:
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : List[str], *lowerCamelCase : Tuple, **lowerCamelCase : Optional[int] )-> Any:
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
@property
def snake_case ( self : List[str] )-> Optional[int]:
lowerCamelCase__ : Any =self.tokenizer.model_input_names
lowerCamelCase__ : List[Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 272 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
if "model" in orig_key:
lowerCamelCase__ : Optional[int] =orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
lowerCamelCase__ : Union[str, Any] =orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowerCamelCase__ : List[Any] =orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
lowerCamelCase__ : List[str] =orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
lowerCamelCase__ : str =orig_key.split('''.''' )[0].split('''_''' )[-1]
lowerCamelCase__ : Dict =orig_key.replace(f'''transformer_{layer_num}''' , f'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
lowerCamelCase__ : Union[str, Any] =orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
lowerCamelCase__ : str =orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
lowerCamelCase__ : Union[str, Any] =orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
lowerCamelCase__ : Optional[int] =orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
lowerCamelCase__ : List[str] =orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
lowerCamelCase__ : Dict =orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
lowerCamelCase__ : Union[str, Any] =orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
lowerCamelCase__ : str =orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
lowerCamelCase__ : Tuple =orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowerCamelCase__ : Optional[int] =orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
lowerCamelCase__ : Optional[int] ='''yoso.''' + orig_key
return orig_key
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Optional[Any] =orig_state_dict.pop(__lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCamelCase__ : List[str] =val
lowerCamelCase__ : Optional[int] =orig_state_dict['''cls.predictions.decoder.bias''']
lowerCamelCase__ : str =torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict''']
lowerCamelCase__ : List[Any] =YosoConfig.from_json_file(__lowerCamelCase )
lowerCamelCase__ : List[str] =YosoForMaskedLM(__lowerCamelCase )
lowerCamelCase__ : Tuple =convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase )
print(model.load_state_dict(__lowerCamelCase ) )
model.eval()
model.save_pretrained(__lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowercase : Optional[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 272 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
def __init__( self : Any , *__lowercase : Dict , **__lowercase : List[Any] ) -> List[str]:
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ ) | 152 |
import os
def _lowercase ( ) -> List[str]:
'''simple docstring'''
with open(os.path.dirname(UpperCamelCase_ ) + '/p022_names.txt' ) as file:
SCREAMING_SNAKE_CASE__ = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE__ = names.replace('"' , '' ).split(',' )
names.sort()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i, name in enumerate(UpperCamelCase_ ):
for letter in name:
name_score += ord(UpperCamelCase_ ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 176 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCAmelCase ( _UpperCamelCase ):
lowerCamelCase_ : Any = 'gpt_neo'
lowerCamelCase_ : List[str] = ['past_key_values']
lowerCamelCase_ : str = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self , __magic_name__=5_0257 , __magic_name__=2048 , __magic_name__=2048 , __magic_name__=24 , __magic_name__=[[["global", "local"], 12]] , __magic_name__=16 , __magic_name__=None , __magic_name__=256 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0256 , __magic_name__=5_0256 , **__magic_name__ , ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Any = max_position_embeddings
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : Tuple = num_layers
snake_case_ : Optional[Any] = num_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : str = window_size
snake_case_ : str = activation_function
snake_case_ : List[Any] = resid_dropout
snake_case_ : Optional[Any] = embed_dropout
snake_case_ : Tuple = attention_dropout
snake_case_ : List[str] = classifier_dropout
snake_case_ : List[str] = layer_norm_epsilon
snake_case_ : Optional[int] = initializer_range
snake_case_ : Any = use_cache
snake_case_ : Tuple = bos_token_id
snake_case_ : List[str] = eos_token_id
snake_case_ : List[str] = attention_types
snake_case_ : Optional[Any] = self.expand_attention_types_params(_SCREAMING_SNAKE_CASE )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@staticmethod
def lowerCamelCase (__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
import torch
snake_case_ : Union[str, Any] = input.size()
snake_case_ : List[Any] = len(_UpperCamelCase )
snake_case_ : Any = shape[dimension]
snake_case_ : str = torch.arange(0 , _UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[Any] = torch.div(sizedim - size , _UpperCamelCase , rounding_mode='''floor''' ) + 1
snake_case_ : Optional[int] = torch.arange(_UpperCamelCase ) + low_indices[:min_length][:, None]
snake_case_ : List[str] = [slice(_UpperCamelCase )] * rank
snake_case_ : Optional[int] = indices
snake_case_ : Union[str, Any] = input[s]
snake_case_ : int = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
import torch
snake_case_ : str = torch.arange(1 , _UpperCamelCase )
snake_case_ : Dict = torch.remainder(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Dict = remainders == 0
snake_case_ : List[Any] = candidates[divisor_indices]
snake_case_ : int = torch.max(_UpperCamelCase )
return largest_divisor, torch.div(_UpperCamelCase , _UpperCamelCase , rounding_mode='''floor''' )
class __lowerCAmelCase ( _UpperCamelCase ):
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case_ : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='''inputs''' )
snake_case_ : int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
snake_case_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return self._config.num_heads
def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Any = super(_SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
snake_case_ : List[str] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case_ , snake_case_ : Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case_ : int = seqlen + 2
snake_case_ : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case_ : Any = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
snake_case_ : str = common_inputs['''attention_mask''']
if self.use_past:
snake_case_ : Optional[Any] = ordered_inputs['''attention_mask'''].dtype
snake_case_ : int = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return 13
| 368 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.dummy_uncond_unet
snake_case_ : Optional[Any] = PNDMScheduler()
snake_case_ : Optional[Any] = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pndm.to(__magic_name__ )
pndm.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Dict = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type='''numpy''' ).images
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : str = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type='''numpy''' , return_dict=__magic_name__ )[0]
snake_case_ : Any = image[0, -3:, -3:, -1]
snake_case_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : str = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = '''google/ddpm-cifar10-32'''
snake_case_ : Tuple = UNetaDModel.from_pretrained(__magic_name__ )
snake_case_ : Optional[Any] = PNDMScheduler()
snake_case_ : Any = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pndm.to(__magic_name__ )
pndm.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : Tuple = pndm(generator=__magic_name__ , output_type='''numpy''' ).images
snake_case_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : str = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 279 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] ) -> Union[str, Any]:
__snake_case = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
__snake_case = True
# Deal with multi-line cases
elif (
re.search(
Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , snake_case_ , )
is not None
):
__snake_case = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__snake_case = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__snake_case = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
__snake_case = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
__snake_case = True
if not attribute_used:
__snake_case = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__snake_case = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__snake_case = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__snake_case = True
elif attribute.endswith('''_token_id''' ):
__snake_case = True
# configuration class specific cases
if not case_allowed:
__snake_case = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__snake_case = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> Tuple:
__snake_case = dict(inspect.signature(config_class.__init__ ).parameters )
__snake_case = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
__snake_case = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__snake_case = {}
if len(config_class.attribute_map ) > 0:
__snake_case = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__snake_case = inspect.getsourcefile(snake_case_ )
__snake_case = os.path.dirname(snake_case_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__snake_case = [os.path.join(snake_case_ , snake_case_ ) for fn in os.listdir(snake_case_ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
__snake_case = []
for path in modeling_paths:
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as fp:
modeling_sources.append(fp.read() )
__snake_case = []
for config_param, default_value in zip(snake_case_ , snake_case_ ):
# `attributes` here is all the variant names for `config_param`
__snake_case = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
unused_attributes.append(attributes[0] )
return sorted(snake_case_ )
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__snake_case = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda snake_case_ : inspect.isclass(snake_case_ )
and issubclass(snake_case_ , snake_case_ )
and inspect.getmodule(snake_case_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__snake_case = check_config_attributes_being_used(snake_case_ )
if len(snake_case_ ) > 0:
__snake_case = unused_attributes
if len(snake_case_ ) > 0:
__snake_case = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(snake_case_ )
if __name__ == "__main__":
check_config_attributes()
| 24 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
snake_case_ : List[Any] = logging.get_logger(__name__)
class lowercase__ ( lowercase ):
lowercase__ = """AutoTokenizer"""
lowercase__ = ["""tokenizer"""]
lowercase__ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple=None ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
_UpperCamelCase : Dict = speaker_embeddings
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : str="speaker_embeddings_path.json" ,**lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
_UpperCamelCase : Optional[Any] = get_file_from_repo(
lowerCamelCase__ ,lowerCamelCase__ ,subfolder=kwargs.pop('subfolder' ,lowerCamelCase__ ) ,cache_dir=kwargs.pop('cache_dir' ,lowerCamelCase__ ) ,force_download=kwargs.pop('force_download' ,lowerCamelCase__ ) ,proxies=kwargs.pop('proxies' ,lowerCamelCase__ ) ,resume_download=kwargs.pop('resume_download' ,lowerCamelCase__ ) ,local_files_only=kwargs.pop('local_files_only' ,lowerCamelCase__ ) ,use_auth_token=kwargs.pop('use_auth_token' ,lowerCamelCase__ ) ,revision=kwargs.pop('revision' ,lowerCamelCase__ ) ,)
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowerCamelCase__ ,lowerCamelCase__ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
_UpperCamelCase : Union[str, Any] = None
else:
with open(lowerCamelCase__ ) as speaker_embeddings_json:
_UpperCamelCase : Optional[int] = json.load(lowerCamelCase__ )
else:
_UpperCamelCase : Tuple = None
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
return cls(tokenizer=lowerCamelCase__ ,speaker_embeddings=lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int="speaker_embeddings_path.json" ,lowerCamelCase__ : Dict="speaker_embeddings" ,lowerCamelCase__ : bool = False ,**lowerCamelCase__ : Tuple ,):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ,'v2' ) ,exist_ok=lowerCamelCase__ )
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCamelCase : Any = self._load_voice_preset(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] ,lowerCamelCase__ ,F'{prompt_key}_{key}' ) ,voice_preset[key] ,allow_pickle=lowerCamelCase__ ,)
_UpperCamelCase : List[str] = os.path.join(lowerCamelCase__ ,F'{prompt_key}_{key}.npy' )
_UpperCamelCase : str = tmp_dict
with open(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) ,'w' ) as fp:
json.dump(lowerCamelCase__ ,lowerCamelCase__ )
super().save_pretrained(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str = None ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.speaker_embeddings[voice_preset]
_UpperCamelCase : Union[str, Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
_UpperCamelCase : Dict = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,lowerCamelCase__ ) ,cache_dir=kwargs.pop('cache_dir' ,lowerCamelCase__ ) ,force_download=kwargs.pop('force_download' ,lowerCamelCase__ ) ,proxies=kwargs.pop('proxies' ,lowerCamelCase__ ) ,resume_download=kwargs.pop('resume_download' ,lowerCamelCase__ ) ,local_files_only=kwargs.pop('local_files_only' ,lowerCamelCase__ ) ,use_auth_token=kwargs.pop('use_auth_token' ,lowerCamelCase__ ) ,revision=kwargs.pop('revision' ,lowerCamelCase__ ) ,)
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
_UpperCamelCase : List[str] = np.load(lowerCamelCase__ )
return voice_preset_dict
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self : Any ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Any="pt" ,lowerCamelCase__ : Dict=256 ,lowerCamelCase__ : int=False ,lowerCamelCase__ : int=True ,lowerCamelCase__ : List[str]=False ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
if voice_preset is not None and not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
if (
isinstance(lowerCamelCase__ ,lowerCamelCase__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCamelCase : Optional[int] = self._load_voice_preset(lowerCamelCase__ )
else:
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and not voice_preset.endswith('.npz' ):
_UpperCamelCase : Tuple = voice_preset + '.npz'
_UpperCamelCase : str = np.load(lowerCamelCase__ )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.tokenizer(
lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,padding='max_length' ,max_length=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
if voice_preset is not None:
_UpperCamelCase : Optional[Any] = voice_preset
return encoded_text
| 83 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : List[Any] = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''roberta'''
def __init__(self : Union[str, Any] , _UpperCAmelCase : Union[str, Any]=5_0265 , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Tuple=3072 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : int=1E-1_2 , _UpperCAmelCase : str=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[Any]="absolute" , _UpperCAmelCase : Any=True , _UpperCAmelCase : Any=None , **_UpperCAmelCase : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 369 |
A : Tuple = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def UpperCamelCase ( ) -> None:
"""simple docstring"""
lowercase__ = input("""Enter message: """ )
lowercase__ = input("""Enter key [alphanumeric]: """ )
lowercase__ = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase__ = """encrypt"""
lowercase__ = encrypt_message(__magic_name__ , __magic_name__ )
elif mode.lower().startswith("""d""" ):
lowercase__ = """decrypt"""
lowercase__ = decrypt_message(__magic_name__ , __magic_name__ )
print(f'''\n{mode.title()}ed message:''' )
print(__magic_name__ )
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> str:
"""simple docstring"""
return translate_message(__magic_name__ , __magic_name__ , """encrypt""" )
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> str:
"""simple docstring"""
return translate_message(__magic_name__ , __magic_name__ , """decrypt""" )
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : str ) -> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = 0
lowercase__ = key.upper()
for symbol in message:
lowercase__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__magic_name__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__magic_name__ ):
lowercase__ = 0
else:
translated.append(__magic_name__ )
return "".join(__magic_name__ )
if __name__ == "__main__":
main()
| 146 | 0 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
def __init__( self, __a, __a=2, __a=3, __a=4, __a=2, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=36, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=6, __a=6, __a=3, __a=4, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : Dict = patch_size
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : Optional[Any] = use_input_mask
_lowerCAmelCase : Union[str, Any] = use_token_type_ids
_lowerCAmelCase : Optional[Any] = use_labels
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : str = type_sequence_label_size
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Tuple = coordinate_size
_lowerCAmelCase : Tuple = shape_size
_lowerCAmelCase : int = num_labels
_lowerCAmelCase : List[str] = num_choices
_lowerCAmelCase : Union[str, Any] = scope
_lowerCAmelCase : Tuple = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase : int = text_seq_length
_lowerCAmelCase : Dict = (image_size // patch_size) ** 2 + 1
_lowerCAmelCase : int = self.text_seq_length + self.image_seq_length
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox)
_lowerCAmelCase : Optional[Any] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Dict = bbox[i, j, 3]
_lowerCAmelCase : Any = bbox[i, j, 1]
_lowerCAmelCase : Union[str, Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : Optional[Any] = bbox[i, j, 2]
_lowerCAmelCase : str = bbox[i, j, 0]
_lowerCAmelCase : Any = tmp_coordinate
_lowerCAmelCase : str = tf.constant(__a)
_lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : Dict = None
if self.use_input_mask:
_lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length])
_lowerCAmelCase : Any = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size)
_lowerCAmelCase : str = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels)
_lowerCAmelCase : str = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFLayoutLMvaModel(config=__a)
# text + image
_lowerCAmelCase : List[Any] = model(__a, pixel_values=__a, training=__a)
_lowerCAmelCase : Any = model(
__a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, training=__a, )
_lowerCAmelCase : int = model(__a, bbox=__a, pixel_values=__a, training=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
# text only
_lowerCAmelCase : Optional[int] = model(__a, training=__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
_lowerCAmelCase : Union[str, Any] = model({"pixel_values": pixel_values}, training=__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = TFLayoutLMvaForSequenceClassification(config=__a)
_lowerCAmelCase : Optional[Any] = model(
__a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, labels=__a, training=__a, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : str = TFLayoutLMvaForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = model(
__a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, labels=__a, training=__a, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = 2
_lowerCAmelCase : Tuple = TFLayoutLMvaForQuestionAnswering(config=__a)
_lowerCAmelCase : List[Any] = model(
__a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, training=__a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Any = config_and_inputs
_lowerCAmelCase : str = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
return True
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(__a)
if model_class in get_values(__a):
_lowerCAmelCase : Any = {
k: tf.tile(tf.expand_dims(__a, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(__a, tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Any = tf.ones(self.model_tester.batch_size, dtype=tf.intaa)
elif model_class in get_values(__a):
_lowerCAmelCase : Any = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
elif model_class in get_values(__a):
_lowerCAmelCase : Any = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
elif model_class in get_values(__a):
_lowerCAmelCase : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=tf.intaa)
return inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = TFLayoutLMvaModelTester(self)
_lowerCAmelCase : Dict = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(__a)
if getattr(__a, "hf_compute_loss", __a):
# The number of elements in the loss should be the same as the number of elements in the label
_lowerCAmelCase : int = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
_lowerCAmelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=__a)[0]
]
_lowerCAmelCase : str = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_lowerCAmelCase : str = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
_lowerCAmelCase : Optional[int] = prepared_for_class.pop("input_ids")
_lowerCAmelCase : Optional[int] = model(__a, **__a)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss when we mask some positions
_lowerCAmelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
_lowerCAmelCase : Union[str, Any] = prepared_for_class.pop("input_ids")
if "labels" in prepared_for_class:
_lowerCAmelCase : List[str] = prepared_for_class["labels"].numpy()
if len(labels.shape) > 1 and labels.shape[1] != 1:
_lowerCAmelCase : Optional[int] = -100
_lowerCAmelCase : Optional[Any] = tf.convert_to_tensor(__a)
_lowerCAmelCase : Any = model(__a, **__a)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
self.assertTrue(not np.any(np.isnan(loss.numpy())))
# Test that model correctly compute the loss with a dict
_lowerCAmelCase : Tuple = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
_lowerCAmelCase : Optional[int] = model(__a)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss with a tuple
_lowerCAmelCase : Tuple = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
# Get keys that were added with the _prepare_for_class function
_lowerCAmelCase : int = prepared_for_class.keys() - inputs_dict.keys()
_lowerCAmelCase : str = inspect.signature(model.call).parameters
_lowerCAmelCase : int = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
_lowerCAmelCase : Optional[int] = {0: "input_ids"}
for label_key in label_keys:
_lowerCAmelCase : str = signature_names.index(__a)
_lowerCAmelCase : Dict = label_key
_lowerCAmelCase : Optional[int] = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
_lowerCAmelCase : Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
_lowerCAmelCase : Optional[Any] = prepared_for_class[value]
_lowerCAmelCase : Union[str, Any] = tuple(__a)
# Send to model
_lowerCAmelCase : str = model(tuple_input[:-1])[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a, __a, __a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : List[str] = type
self.model_tester.create_and_check_model(__a, __a, __a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__a, __a, __a, __a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__a, __a, __a, __a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__a, __a, __a, __a, __a, __a, __a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = TFLayoutLMvaModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__a) if is_vision_available() else None
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base")
_lowerCAmelCase : str = self.default_image_processor
_lowerCAmelCase : List[Any] = prepare_img()
_lowerCAmelCase : Union[str, Any] = image_processor(images=__a, return_tensors="tf").pixel_values
_lowerCAmelCase : List[str] = tf.constant([[1, 2]])
_lowerCAmelCase : Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]), axis=0)
# forward pass
_lowerCAmelCase : Optional[Any] = model(input_ids=__a, bbox=__a, pixel_values=__a, training=__a)
# verify the logits
_lowerCAmelCase : Any = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape, __a)
_lowerCAmelCase : List[Any] = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]])
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], __a, atol=1E-4))
| 36 |
"""simple docstring"""
def __magic_name__ ( __snake_case : list ) -> list:
if len(__snake_case ) < 2:
return collection
def circle_sort_util(__snake_case : list , __snake_case : int , __snake_case : int ) -> bool:
lowercase : List[Any] = False
if low == high:
return swapped
lowercase : Union[str, Any] = low
lowercase : str = high
while left < right:
if collection[left] > collection[right]:
lowercase , lowercase : Optional[Any] = (
collection[right],
collection[left],
)
lowercase : Tuple = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowercase , lowercase : str = (
collection[right + 1],
collection[left],
)
lowercase : Union[str, Any] = True
lowercase : Any = low + int((high - low) / 2 )
lowercase : Tuple = circle_sort_util(__snake_case , __snake_case , __snake_case )
lowercase : List[Any] = circle_sort_util(__snake_case , mid + 1 , __snake_case )
return swapped or left_swap or right_swap
lowercase : int = True
while is_not_sorted is True:
lowercase : int = circle_sort_util(__snake_case , 0 , len(__snake_case ) - 1 )
return collection
if __name__ == "__main__":
_A : str = input("""Enter numbers separated by a comma:\n""").strip()
_A : Dict = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 202 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A , A ) -> Tuple:
lowerCAmelCase__ = WavaVecaForSequenceClassification.from_pretrained(A , config=A )
lowerCAmelCase__ = downstream_dict['''projector.weight''']
lowerCAmelCase__ = downstream_dict['''projector.bias''']
lowerCAmelCase__ = downstream_dict['''model.post_net.linear.weight''']
lowerCAmelCase__ = downstream_dict['''model.post_net.linear.bias''']
return model
def _snake_case ( A , A , A ) -> List[str]:
lowerCAmelCase__ = WavaVecaForAudioFrameClassification.from_pretrained(A , config=A )
lowerCAmelCase__ = downstream_dict['''model.linear.weight''']
lowerCAmelCase__ = downstream_dict['''model.linear.bias''']
return model
def _snake_case ( A , A , A ) -> str:
lowerCAmelCase__ = WavaVecaForXVector.from_pretrained(A , config=A )
lowerCAmelCase__ = downstream_dict['''connector.weight''']
lowerCAmelCase__ = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCAmelCase__ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
lowerCAmelCase__ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
lowerCAmelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
lowerCAmelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
lowerCAmelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
lowerCAmelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
lowerCAmelCase__ = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _snake_case ( A , A , A , A ) -> int:
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )
lowerCAmelCase__ = checkpoint['''Downstream''']
lowerCAmelCase__ = WavaVecaConfig.from_pretrained(A )
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(
A , return_attention_mask=A , do_normalize=A )
lowerCAmelCase__ = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
lowerCAmelCase__ = convert_classification(A , A , A )
elif arch.endswith('''ForAudioFrameClassification''' ):
lowerCAmelCase__ = convert_diarization(A , A , A )
elif arch.endswith('''ForXVector''' ):
lowerCAmelCase__ = convert_xvector(A , A , A )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
lowerCAmelCase__ = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(A )
hf_model.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__UpperCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 363 |
'''simple docstring'''
def _snake_case ( A = 10 ) -> str:
if not isinstance(A , A ) or n < 0:
raise ValueError('''Invalid input''' )
lowerCAmelCase__ = 10**n
lowerCAmelCase__ = 28433 * (pow(2 , 7830457 , A )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""") | 228 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
snake_case_ = {'''allegro/herbert-base-cased''': 514}
snake_case_ = {}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Tuple = VOCAB_FILES_NAMES
__lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[Any] = HerbertTokenizer
def __init__( self , a=None , a=None , a=None , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a="</s>" , **a , ):
super().__init__(
a , a , tokenizer_file=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , sep_token=a , **a , )
def snake_case_ ( self , a , a = None):
lowercase__ : Optional[int] = [self.cls_token_id]
lowercase__ : List[str] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case_ ( self , a , a = None , a = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
if token_ids_a is None:
return [1] + ([0] * len(a)) + [1]
return [1] + ([0] * len(a)) + [1] + ([0] * len(a)) + [1]
def snake_case_ ( self , a , a = None):
lowercase__ : str = [self.sep_token_id]
lowercase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case_ ( self , a , a = None):
lowercase__ : List[Any] = self._tokenizer.model.save(a , name=a)
return tuple(a)
| 214 |
import unittest
from transformers import DonutProcessor
snake_case_ = '''naver-clova-ix/donut-base'''
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
def snake_case_ ( self):
lowercase__ : Dict = DonutProcessor.from_pretrained(a)
def snake_case_ ( self):
lowercase__ : Tuple = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
lowercase__ : Tuple = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
lowercase__ : str = self.processor.tokenajson(a)
self.assertDictEqual(a , a)
| 214 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class a__ :
_lowerCamelCase = MBartConfig
_lowerCamelCase = {}
_lowerCamelCase = 'gelu'
def __init__( self : Union[str, Any], lowerCAmelCase : Tuple, lowerCAmelCase : Optional[Any]=13, lowerCAmelCase : Optional[int]=7, lowerCAmelCase : Optional[Any]=True, lowerCAmelCase : str=False, lowerCAmelCase : int=99, lowerCAmelCase : List[Any]=32, lowerCAmelCase : List[str]=2, lowerCAmelCase : Tuple=4, lowerCAmelCase : List[str]=37, lowerCAmelCase : Dict=0.1, lowerCAmelCase : List[str]=0.1, lowerCAmelCase : str=20, lowerCAmelCase : Any=2, lowerCAmelCase : List[Any]=1, lowerCAmelCase : Optional[Any]=0, ) -> Optional[Any]:
lowercase : Any = parent
lowercase : Dict = batch_size
lowercase : List[str] = seq_length
lowercase : str = is_training
lowercase : Union[str, Any] = use_labels
lowercase : str = vocab_size
lowercase : List[Any] = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : Optional[int] = num_attention_heads
lowercase : Dict = intermediate_size
lowercase : int = hidden_dropout_prob
lowercase : Tuple = attention_probs_dropout_prob
lowercase : Tuple = max_position_embeddings
lowercase : Tuple = eos_token_id
lowercase : Any = pad_token_id
lowercase : int = bos_token_id
def lowercase ( self : Optional[Any] ) -> Optional[int]:
lowercase : Any = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowercase : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowercase : Tuple = tf.concat([input_ids, eos_tensor], axis=1 )
lowercase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase : List[Any] = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowercase : Union[str, Any] = prepare_mbart_inputs_dict(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
return config, inputs_dict
def lowercase ( self : str, lowerCAmelCase : Any, lowerCAmelCase : Tuple ) -> List[Any]:
lowercase : List[str] = TFMBartModel(config=lowerCAmelCase ).get_decoder()
lowercase : int = inputs_dict['input_ids']
lowercase : Any = input_ids[:1, :]
lowercase : Any = inputs_dict['attention_mask'][:1, :]
lowercase : List[str] = inputs_dict['head_mask']
lowercase : Tuple = 1
# first forward pass
lowercase : Optional[Any] = model(lowerCAmelCase, attention_mask=lowerCAmelCase, head_mask=lowerCAmelCase, use_cache=lowerCAmelCase )
lowercase , lowercase : Tuple = outputs.to_tuple()
lowercase : Dict = past_key_values[1]
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
lowercase : List[str] = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_lowerCamelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
def lowercase ( self : Dict, lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Optional[int], lowerCAmelCase : int, lowerCAmelCase : Dict ) -> List[str]:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowercase ( self : Union[str, Any] ) -> int:
lowercase : Any = TFMBartModelTester(self )
lowercase : Optional[int] = ConfigTester(self, config_class=lowerCAmelCase )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowercase ( self : str ) -> Dict:
lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class a__ ( unittest.TestCase ):
_lowerCamelCase = [
' UN Chief Says There Is No Military Solution in Syria',
]
_lowerCamelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
_lowerCamelCase = 'facebook/mbart-large-en-ro'
@cached_property
def lowercase ( self : List[str] ) -> Optional[int]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowercase ( self : List[Any] ) -> Union[str, Any]:
lowercase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowercase ( self : int, **lowerCAmelCase : Tuple ) -> Tuple:
lowercase : Optional[Any] = self.translate_src_text(**lowerCAmelCase )
self.assertListEqual(self.expected_text, lowerCAmelCase )
def lowercase ( self : Dict, **lowerCAmelCase : Any ) -> List[Any]:
lowercase : Optional[Any] = self.tokenizer(self.src_text, **lowerCAmelCase, return_tensors='tf' )
lowercase : Any = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2 )
lowercase : str = self.tokenizer.batch_decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase )
return generated_words
@slow
def lowercase ( self : Tuple ) -> Optional[int]:
self._assert_generated_batch_equal_expected()
| 53 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCamelCase: Any = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 53 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a__: Union[str, Any] = 16
a__: Union[str, Any] = 32
def UpperCamelCase__( UpperCamelCase__ : Accelerator , UpperCamelCase__ : int = 16 , UpperCamelCase__ : str = "bert-base-cased" )->Tuple:
A__ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
A__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCamelCase__ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A__ = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=UpperCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(UpperCamelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
A__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
def UpperCamelCase__( UpperCamelCase__ : List[str] , UpperCamelCase__ : int )->Dict:
# Initialize accelerator
A__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['''lr''']
A__ = int(config['''num_epochs'''] )
A__ = int(config['''seed'''] )
A__ = int(config['''batch_size'''] )
A__ = args.model_name_or_path
set_seed(UpperCamelCase__ )
A__ , A__ = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ )
# Instantiate optimizer
A__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A__ = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
A__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
A__ = 1
A__ = (len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A__ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , )
else:
A__ = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
A__ = 0
# We also need to keep track of the stating epoch so files are named properly
A__ = 0
# Now we train the model
A__ = evaluate.load('''glue''' , '''mrpc''' )
A__ = 0
A__ = {}
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
A__ = model(**UpperCamelCase__ )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A__ = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**UpperCamelCase__ )
A__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A__ , A__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCamelCase__ ) - 1:
A__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , UpperCamelCase__ )
A__ = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
A__ = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__( )->Optional[int]:
A__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=UpperCamelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCamelCase__ , )
parser.add_argument(
'''--output_dir''' , type=UpperCamelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=UpperCamelCase__ , default=3 , help='''Number of train epochs.''' , )
A__ = parser.parse_args()
A__ = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 193 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__: Union[str, Any] = logging.get_logger(__name__)
a__: Union[str, Any] = {'vocab_file': 'spiece.model'}
a__: Tuple = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
a__: Any = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
__SCREAMING_SNAKE_CASE = []
def __init__( self,__lowerCamelCase,__lowerCamelCase="<unk>",__lowerCamelCase="<s>",__lowerCamelCase="</s>",__lowerCamelCase="<pad>",__lowerCamelCase="[SEP]",__lowerCamelCase="[MASK]",__lowerCamelCase="[CLS]",__lowerCamelCase = None,**__lowerCamelCase,):
A__ = AddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else bos_token
A__ = AddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else eos_token
A__ = AddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else unk_token
A__ = AddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else pad_token
A__ = AddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else cls_token
A__ = AddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ = AddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase,eos_token=__lowerCamelCase,unk_token=__lowerCamelCase,pad_token=__lowerCamelCase,sep_token=__lowerCamelCase,mask_token=__lowerCamelCase,cls_token=__lowerCamelCase,sp_model_kwargs=self.sp_model_kwargs,**__lowerCamelCase,)
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCamelCase ( self ):
return self.sp_model.get_piece_size()
def UpperCamelCase ( self ):
A__ = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self,__lowerCamelCase ):
A__ = d
# for backward compatibility
if not hasattr(self,'''sp_model_kwargs''' ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self,__lowerCamelCase ):
return self.sp_model.encode(__lowerCamelCase,out_type=__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
return self.sp_model.piece_to_id(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = []
A__ = ''''''
A__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
A__ = True
A__ = []
else:
current_sub_tokens.append(__lowerCamelCase )
A__ = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = False,__lowerCamelCase = None,__lowerCamelCase = True,**__lowerCamelCase,):
A__ = kwargs.pop('''use_source_tokenizer''',__lowerCamelCase )
A__ = self.convert_ids_to_tokens(__lowerCamelCase,skip_special_tokens=__lowerCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A__ = []
A__ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) )
A__ = []
sub_texts.append(__lowerCamelCase )
else:
current_sub_text.append(__lowerCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A__ = re.sub(r''' (\[(MASK|SEP)\])''',r'''\1''',''' '''.join(__lowerCamelCase ) )
else:
A__ = ''''''.join(__lowerCamelCase )
A__ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A__ = self.clean_up_tokenization(__lowerCamelCase )
return clean_text
else:
return text
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ = os.path.join(
__lowerCamelCase,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase,'''wb''' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase,token_ids_a=__lowerCamelCase,already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 193 | 1 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
__snake_case = parser.parse_args()
__snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__snake_case = CLIPImageProcessor()
__snake_case = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
__snake_case = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 371 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 169 | 0 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase__ ( _A : int , _A : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase__ ( _A : int ):
'''simple docstring'''
a__ =[]
a__ =11
a__ =int('''1''' + '''0''' * digit_len )
for num in range(lowerCAmelCase_ , lowerCAmelCase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowerCAmelCase_ , lowerCAmelCase_ ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
a__ =10
return solutions
def UpperCAmelCase__ ( _A : int = 2 ):
'''simple docstring'''
a__ =1.0
for fraction in fraction_list(lowerCAmelCase_ ):
a__ =Fraction(lowerCAmelCase_ )
result *= frac.denominator / frac.numerator
return int(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution())
| 188 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = '''naver-clova-ix/donut-base-finetuned-docvqa'''
_A : Any = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
_A : Tuple = '''document_qa'''
_A : Dict = AutoProcessor
_A : Tuple = VisionEncoderDecoderModel
_A : Optional[int] = ['''image''', '''text''']
_A : Optional[int] = ['''text''']
def __init__( self : Any , *__a : List[str] , **__a : Any ) -> Optional[Any]:
"""simple docstring"""
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*__a , **__a )
def lowerCAmelCase ( self : List[Any] , __a : "Image" , __a : str ) -> List[str]:
"""simple docstring"""
__lowercase : int = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
__lowercase : str = task_prompt.replace("""{user_input}""" , __a )
__lowercase : Union[str, Any] = self.pre_processor.tokenizer(
__a , add_special_tokens=__a , return_tensors="""pt""" ).input_ids
__lowercase : int = self.pre_processor(__a , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCAmelCase ( self : Optional[int] , __a : int ) -> int:
"""simple docstring"""
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__a , ).sequences
def lowerCAmelCase ( self : Union[str, Any] , __a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.pre_processor.batch_decode(__a )[0]
__lowercase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
__lowercase : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
__lowercase : Optional[Any] = re.sub(r"""<.*?>""" , """""" , __a , count=1 ).strip() # remove first task start token
__lowercase : Dict = self.pre_processor.tokenajson(__a )
return sequence["answer"] | 233 | 0 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar('''KEY''')
lowerCamelCase__ = TypeVar('''VAL''')
@dataclass(frozen=__lowercase , slots=__lowercase )
class __magic_name__ (Generic[KEY, VAL] ):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class __magic_name__ (_Item ):
def __init__( self ) -> None:
super().__init__(_a , _a )
def __bool__( self ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __magic_name__ (MutableMapping[KEY, VAL] ):
def __init__( self , _a = 8 , _a = 0.7_5 ) -> None:
lowerCAmelCase_ = initial_block_size
lowerCAmelCase_ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase_ = capacity_factor
lowerCAmelCase_ = 0
def __a ( self , _a ) -> int:
return hash(_a ) % len(self._buckets )
def __a ( self , _a ) -> int:
return (ind + 1) % len(self._buckets )
def __a ( self , _a , _a , _a ) -> bool:
lowerCAmelCase_ = self._buckets[ind]
if not stored:
lowerCAmelCase_ = _Item(_a , _a )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase_ = _Item(_a , _a )
return True
else:
return False
def __a ( self ) -> bool:
lowerCAmelCase_ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_a )
def __a ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase_ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __a ( self , _a ) -> None:
lowerCAmelCase_ = self._buckets
lowerCAmelCase_ = [None] * new_size
lowerCAmelCase_ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __a ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def __a ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def __a ( self , _a ) -> Iterator[int]:
lowerCAmelCase_ = self._get_bucket_index(_a )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase_ = self._get_next_ind(_a )
def __a ( self , _a , _a ) -> None:
for ind in self._iterate_buckets(_a ):
if self._try_set(_a , _a , _a ):
break
def __setitem__( self , _a , _a ) -> None:
if self._is_full():
self._size_up()
self._add_item(_a , _a )
def __delitem__( self , _a ) -> None:
for ind in self._iterate_buckets(_a ):
lowerCAmelCase_ = self._buckets[ind]
if item is None:
raise KeyError(_a )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase_ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , _a ) -> VAL:
for ind in self._iterate_buckets(_a ):
lowerCAmelCase_ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_a )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
lowerCAmelCase_ = " ,".join(
f"{item.key}: {item.val}" for item in self._buckets if item )
return f"HashMap({val_string})"
| 22 |
import math
from collections.abc import Iterator
from itertools import takewhile
def A(__a: int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A():
lowerCAmelCase_ = 2
while True:
if is_prime(__a ):
yield num
num += 1
def A(__a: int = 200_0000 ):
return sum(takewhile(lambda __a : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 22 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__lowerCamelCase : str =field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__lowerCamelCase : ClassVar[Features] =Features({'text': Value('string' )} )
__lowerCamelCase : ClassVar[Features] =Features({'labels': ClassLabel} )
__lowerCamelCase : str ="text"
__lowerCamelCase : str ="labels"
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __lowercase ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
__a = copy.deepcopy(self )
__a = self.label_schema.copy()
__a = features[self.label_column]
__a = label_schema
return task_template
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 302 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302 | 1 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ = '''▁'''
lowerCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __A( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = BertGenerationTokenizer
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
def UpperCAmelCase_ (self ):
super().setUp()
UpperCamelCase__ = BertGenerationTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """<s>"""
UpperCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 10_02 )
def UpperCAmelCase_ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = BertGenerationTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase_ (self ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """Hello World!"""
UpperCamelCase__ = [1_85_36, 22_60, 1_01]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
UpperCamelCase__ = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@require_torch
@slow
def UpperCAmelCase_ (self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
UpperCamelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCamelCase__ = """ """.join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = BertGenerationConfig()
UpperCamelCase__ = BertGenerationEncoder(SCREAMING_SNAKE_CASE_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE_ )
model(**SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = {"""input_ids""": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 178 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCamelCase_ = ['''text''', '''image''', '''audio''']
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(__a , __a ):
inputs.append(create_inputs(__a ) )
else:
raise ValueError(f"Invalid type requested: {input_type}" )
return inputs
def __magic_name__ ( __a : List ):
'''simple docstring'''
UpperCamelCase__ = []
for output in outputs:
if isinstance(__a , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__a , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__a , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"Invalid output: {output}" )
return output_types
@is_tool_test
class __A:
"""simple docstring"""
def UpperCAmelCase_ (self ):
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
UpperCamelCase__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCamelCase__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = create_inputs(self.tool.inputs )
UpperCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCamelCase__ = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) , self.tool.outputs )
def UpperCAmelCase_ (self ):
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = create_inputs(self.tool.inputs )
UpperCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ , self.tool.outputs ):
UpperCamelCase__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = create_inputs(self.tool.inputs )
UpperCamelCase__ = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ , self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(self.tool.outputs ) )
| 178 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str=7 , __UpperCAmelCase : str=3 , __UpperCAmelCase : Tuple=18 , __UpperCAmelCase : Dict=30 , __UpperCAmelCase : Any=400 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Tuple=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __UpperCAmelCase : Optional[Any]=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __UpperCAmelCase : Union[str, Any]=True , ):
a : int = size if size is not None else {"height": 224, "width": 224}
a : List[str] = crop_size if crop_size is not None else {"height": 18, "width": 18}
a : List[Any] = parent
a : Any = batch_size
a : str = num_channels
a : Optional[int] = image_size
a : Tuple = min_resolution
a : str = max_resolution
a : Dict = do_resize
a : Any = size
a : Dict = do_center_crop
a : List[str] = crop_size
a : str = do_normalize
a : Optional[int] = image_mean
a : Tuple = image_std
a : Any = do_convert_rgb
def __snake_case ( self : Union[str, Any]):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __snake_case ( self : Dict , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Any=False):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
a : str = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
a : Tuple = []
for i in range(self.batch_size):
a , a : List[str] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
a : List[Any] = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1)) for x in image_inputs]
if torchify:
a : Optional[Any] = [torch.from_numpy(__UpperCAmelCase) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def __snake_case ( self : List[str]):
a : Dict = ChineseCLIPImageProcessingTester(self , do_center_crop=__UpperCAmelCase)
@property
def __snake_case ( self : Union[str, Any]):
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : str):
a : Optional[int] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize"))
self.assertTrue(hasattr(__UpperCAmelCase , "size"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_center_crop"))
self.assertTrue(hasattr(__UpperCAmelCase , "center_crop"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize"))
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean"))
self.assertTrue(hasattr(__UpperCAmelCase , "image_std"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_convert_rgb"))
def __snake_case ( self : Any):
a : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 224, "width": 224})
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18})
a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def __snake_case ( self : str):
pass
def __snake_case ( self : Tuple):
# Initialize image_processing
a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image)
# Test not batched input
a : str = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a : Dict = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : List[Any]):
# Initialize image_processing
a : str = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray)
# Test not batched input
a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a : Union[str, Any] = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : List[str]):
# Initialize image_processing
a : str = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor)
# Test not batched input
a : List[str] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a : str = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def __snake_case ( self : Union[str, Any]):
a : str = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__UpperCAmelCase)
a : Dict = 3
@property
def __snake_case ( self : Optional[Any]):
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[int]):
a : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize"))
self.assertTrue(hasattr(__UpperCAmelCase , "size"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_center_crop"))
self.assertTrue(hasattr(__UpperCAmelCase , "center_crop"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize"))
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean"))
self.assertTrue(hasattr(__UpperCAmelCase , "image_std"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_convert_rgb"))
def __snake_case ( self : Any):
pass
def __snake_case ( self : Union[str, Any]):
# Initialize image_processing
a : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a : str = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image)
# Test not batched input
a : Tuple = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a : Optional[Any] = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 40 |
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class __a (unittest.TestCase):
'''simple docstring'''
def __init__( self , _a , _a = True , _a = None , _a = 32 , _a = True , _a = 1 / 255 , _a = True , _a = True , _a = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _a = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _a = True , _a=7 , _a=30 , _a=400 , _a=3 , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : Tuple = do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 288}
SCREAMING_SNAKE_CASE__ : List[str] = size_divisor
SCREAMING_SNAKE_CASE__ : Tuple = do_rescale
SCREAMING_SNAKE_CASE__ : List[str] = rescale_factor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_mean
SCREAMING_SNAKE_CASE__ : List[str] = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Dict = min_resolution
SCREAMING_SNAKE_CASE__ : str = max_resolution
def _a ( self ) -> List[str]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _a ( self , _a , _a=False ) -> int:
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE__ : List[Any] = self.size["""shortest_edge"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_inputs[0]
if isinstance(_a , Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = image.size
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Tuple = size / min(_a , _a )
if h < w:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = size, scale * w
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((1_333 / 800) * size )
if max(_a , _a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[str] = max_size / max(_a , _a )
SCREAMING_SNAKE_CASE__ : Any = newh * scale
SCREAMING_SNAKE_CASE__ : Any = neww * scale
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : Dict = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Any = max(_a , key=lambda _a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Any = max(_a , key=lambda _a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = BridgeTowerImageProcessor if is_vision_available() else None
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = BridgeTowerImageProcessingTester(self )
@property
def _a ( self ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """size_divisor""" ) )
def _a ( self ) -> List[str]:
"""simple docstring"""
pass
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(_a , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Dict = image_processing(_a , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 132 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 239 |
"""simple docstring"""
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[Any] ) -> Union[str, Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __lowerCamelCase ( a_ : Optional[int] , a_ : Any=0 ) -> Optional[Any]:
return sorted(a_ , key=lambda a_ : x[column] )
def __lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[int] , a_ : str=float('''inf''' ) ) -> str:
for i in range(points_counts - 1 ):
for j in range(i + 1 , a_ ):
__SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE :Optional[Any] = current_dis
return min_dis
def __lowerCamelCase ( a_ : List[Any] , a_ : Any , a_ : Optional[int]=float('''inf''' ) ) -> Optional[Any]:
for i in range(min(6 , points_counts - 1 ) , a_ ):
for j in range(max(0 , i - 6 ) , a_ ):
__SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE :int = current_dis
return min_dis
def __lowerCamelCase ( a_ : str , a_ : List[Any] , a_ : int ) -> Optional[int]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(a_ , a_ )
# recursion
__SCREAMING_SNAKE_CASE :int = points_counts // 2
__SCREAMING_SNAKE_CASE :Dict = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[:mid] , a_ )
__SCREAMING_SNAKE_CASE :Any = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[mid:] , points_counts - mid )
__SCREAMING_SNAKE_CASE :Union[str, Any] = min(a_ , a_ )
__SCREAMING_SNAKE_CASE :str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(a_ )
__SCREAMING_SNAKE_CASE :Dict = dis_between_closest_in_strip(
a_ , len(a_ ) , a_ )
return min(a_ , a_ )
def __lowerCamelCase ( a_ : int , a_ : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = column_based_sort(a_ , column=0 )
__SCREAMING_SNAKE_CASE :int = column_based_sort(a_ , column=1 )
return (
closest_pair_of_points_sqr(
a_ , a_ , a_ )
) ** 0.5
if __name__ == "__main__":
lowerCamelCase_ = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points))) | 239 | 1 |
def SCREAMING_SNAKE_CASE_ ( __A : int = 1_00_00_00 ) -> int:
"""simple docstring"""
a_ : Union[str, Any] = limit + 1
a_ : Optional[Any] = [0] * limit
for first_term in range(1 , __A ):
for n in range(__A , __A , __A ):
a_ : Optional[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 32 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
"""simple docstring"""
a_ : Optional[Any] = HfArgumentParser(__A )
a_ : Optional[int] = parser.parse_args_into_dataclasses()[0]
a_ : List[Any] = TensorFlowBenchmark(args=__A )
try:
a_ : List[str] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a_ : Dict = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
a_ : Dict = ' '.join(str(__A ).split(' ' )[:-1] )
a_ : int = ''
a_ : int = eval(str(__A ).split(' ' )[-1] )
a_ : Any = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__A )
if len(__A ) > 0:
a_ : str = full_error_msg + begin_error_msg + str(__A )
raise ValueError(__A )
benchmark.run()
if __name__ == "__main__":
main()
| 32 | 1 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase ( lowerCAmelCase_ ):
def UpperCAmelCase(self : Optional[Any] ) -> int:
snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "width_multiplier" ) )
class lowerCamelCase :
def __init__(self : str , _A : Tuple , _A : List[str]=1_3 , _A : Optional[Any]=6_4 , _A : Optional[Any]=2 , _A : Tuple=3 , _A : Dict="swish" , _A : str=3 , _A : Dict=3_2 , _A : Tuple=0.1 , _A : Dict=0.02 , _A : Optional[int]=True , _A : Dict=True , _A : Union[str, Any]=1_0 , _A : Optional[int]=None , _A : Any=0.25 , _A : int=0.0 , _A : str=0.0 , ) -> str:
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = make_divisible(5_1_2 * width_multiplier , divisor=8 )
snake_case = hidden_act
snake_case = conv_kernel_size
snake_case = output_stride
snake_case = classifier_dropout_prob
snake_case = use_labels
snake_case = is_training
snake_case = num_labels
snake_case = initializer_range
snake_case = scope
snake_case = width_multiplier
snake_case = ffn_dropout
snake_case = attn_dropout
def UpperCAmelCase(self : List[Any] ) -> Optional[int]:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.num_labels )
snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase(self : int ) -> Dict:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCAmelCase(self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : Optional[int] , _A : str ) -> Tuple:
snake_case = MobileViTVaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase(self : Tuple , _A : Optional[int] , _A : Any , _A : Any , _A : List[str] ) -> Optional[Any]:
snake_case = self.num_labels
snake_case = MobileViTVaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase(self : int , _A : Any , _A : List[str] , _A : str , _A : Optional[int] ) -> str:
snake_case = self.num_labels
snake_case = MobileViTVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase(self : Optional[int] ) -> int:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[Any] = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = False
def UpperCAmelCase(self : Dict ) -> Dict:
snake_case = MobileViTVaModelTester(self )
snake_case = MobileViTVaConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase(self : List[Any] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def UpperCAmelCase(self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def UpperCAmelCase(self : Tuple ) -> Dict:
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def UpperCAmelCase(self : str ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def UpperCAmelCase(self : Tuple ) -> Any:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase(self : Tuple ) -> Optional[Any]:
pass
def UpperCAmelCase(self : List[Any] ) -> List[str]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(__SCREAMING_SNAKE_CASE )
snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase(self : List[str] ) -> Any:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase(self : int ) -> List[Any]:
def check_hidden_states_output(_A : str , _A : Any , _A : Tuple ):
snake_case = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case = outputs.hidden_states
snake_case = 5
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case = 2
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase(self : int ) -> Tuple:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase(self : List[str] ) -> Tuple:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase(self : Optional[Any] ) -> List[str]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = MobileViTVaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def lowercase_ ( ):
"""simple docstring"""
snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase(self : int ) -> Dict:
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase(self : Optional[int] ) -> Union[str, Any]:
snake_case = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
__SCREAMING_SNAKE_CASE )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase(self : Optional[Any] ) -> Optional[int]:
snake_case = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case = model.to(__SCREAMING_SNAKE_CASE )
snake_case = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case = prepare_img()
snake_case = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case = model(**__SCREAMING_SNAKE_CASE )
snake_case = outputs.logits
# verify the logits
snake_case = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
snake_case = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase(self : str ) -> Dict:
snake_case = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case = model.to(__SCREAMING_SNAKE_CASE )
snake_case = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case = prepare_img()
snake_case = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case = model(**__SCREAMING_SNAKE_CASE )
snake_case = outputs.logits.detach().cpu()
snake_case = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(5_0, 6_0)] )
snake_case = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
snake_case = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )
snake_case = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
| 352 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Dict = ["image_processor", "tokenizer"]
UpperCAmelCase__ : Dict = "LayoutLMv2ImageProcessor"
UpperCAmelCase__ : Optional[Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__(self : str , _A : Any=None , _A : Tuple=None , **_A : Optional[Any] ) -> Optional[int]:
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _A , )
snake_case = kwargs.pop("feature_extractor" )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_A , _A )
def __call__(self : int , _A : List[str] , _A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _A : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _A : Union[List[List[int]], List[List[List[int]]]] = None , _A : Optional[Union[List[int], List[List[int]]]] = None , _A : bool = True , _A : Union[bool, str, PaddingStrategy] = False , _A : Union[bool, str, TruncationStrategy] = None , _A : Optional[int] = None , _A : int = 0 , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = True , _A : Optional[Union[str, TensorType]] = None , **_A : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
snake_case = self.image_processor(images=_A , return_tensors=_A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_A , _A ):
snake_case = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case = features["words"]
snake_case = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
# add pixel values
snake_case = features.pop("pixel_values" )
if return_overflowing_tokens is True:
snake_case = self.get_overflowing_images(_A , encoded_inputs["overflow_to_sample_mapping"] )
snake_case = images
return encoded_inputs
def UpperCAmelCase(self : Dict , _A : Dict , _A : List[str] ) -> Optional[int]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_A ) != len(_A ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(_A )} and {len(_A )}' )
return images_with_overflow
def UpperCAmelCase(self : Tuple , *_A : int , **_A : Dict ) -> str:
return self.tokenizer.batch_decode(*_A , **_A )
def UpperCAmelCase(self : str , *_A : List[Any] , **_A : List[Any] ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@property
def UpperCAmelCase(self : Tuple ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase(self : List[Any] ) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _A , )
return self.image_processor_class
@property
def UpperCAmelCase(self : Dict ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _A , )
return self.image_processor
| 137 | 0 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : str
A_ : str
A_ : Optional[str] = None
A_ : Optional[str] = None
A_ : Optional[str] = None
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : List[int]
A_ : Optional[List[int]] = None
A_ : Optional[List[int]] = None
A_ : Optional[Union[int, float]] = None
A_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[InputFeatures]
def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = os.path.join(
a__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , )
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + '''.lock'''
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case = torch.load(a__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case = (
processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
)
logger.info('''Training examples: %s''' , len(a__ ) )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(self.features , a__ )
def __len__(self : int ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Dict , a__ : List[Any] ):
"""simple docstring"""
return self.features[i]
def a (self : List[Any] ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
A_ : List[InputFeatures]
def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
__snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case = tf.data.Dataset.from_generator(
a__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def a (self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__(self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Any , a__ : Dict ):
"""simple docstring"""
return self.features[i]
def a (self : str ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Dict , a__ : Dict ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def a (self : Optional[int] , a__ : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def a (self : int ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def a (self : Any , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = []
for i, line in enumerate(a__ ):
if i == 0:
continue
__snake_case = '''%s-%s''' % (set_type, line[0])
__snake_case = line[5]
__snake_case = line[6]
__snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case = line[0]
examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) )
return examples
def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]:
__snake_case = {label: i for i, label in enumerate(snake_case_ )}
__snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
__snake_case = label_map[example.label] if example.label in label_map else 0
__snake_case = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
snake_case_ = {
'hans': 3,
}
snake_case_ = {
'hans': HansProcessor,
}
| 24 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'vit_msn'
def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 24 | 1 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__lowerCamelCase = logging.getLogger(__name__)
class A__ ( _snake_case ):
lowercase = "summarization"
lowercase = ["loss"]
lowercase = ROUGE_KEYS
lowercase = "rouge2"
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
A_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(UpperCamelCase__ , num_labels=UpperCamelCase__ , mode=self.mode , **UpperCamelCase__ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
A_ = Path(self.output_dir ) / """metrics.json"""
A_ = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
A_ = 0
A_ = defaultdict(UpperCamelCase__ )
A_ = self.config.model_type
A_ = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
A_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
A_ = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
A_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A_ = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A_ = get_git_info()["""repo_sha"""]
A_ = hparams.num_workers
A_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCamelCase__ ):
A_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A_ = self.decoder_start_token_id
A_ = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
A_ = False
A_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A_ = self.hparams.eval_max_gen_length
else:
A_ = self.model.config.max_length
A_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def snake_case_ ( self , UpperCamelCase__ ) -> Dict[str, List[str]]:
'''simple docstring'''
A_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(UpperCamelCase__ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
A_ = True
return readable_batch
def snake_case_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.model(UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = self.tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return lmap(str.strip , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.tokenizer.pad_token_id
A_ , A_ = batch["""input_ids"""], batch["""attention_mask"""]
A_ = batch["""labels"""]
if isinstance(self.model , UpperCamelCase__ ):
A_ = self.model._shift_right(UpperCamelCase__ )
else:
A_ = shift_tokens_right(UpperCamelCase__ , UpperCamelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A_ = decoder_input_ids
self.save_readable_batch(UpperCamelCase__ )
A_ = self(UpperCamelCase__ , attention_mask=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , use_cache=UpperCamelCase__ )
A_ = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A_ = nn.CrossEntropyLoss(ignore_index=UpperCamelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
A_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A_ = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
A_ , A_ = label_smoothed_nll_loss(
UpperCamelCase__ , UpperCamelCase__ , self.hparams.label_smoothing , ignore_index=UpperCamelCase__ )
return (loss,)
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self.tokenizer.pad_token_id
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = self._step(UpperCamelCase__ )
A_ = dict(zip(self.loss_names , UpperCamelCase__ ) )
# tokens per batch
A_ = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
A_ = batch["""input_ids"""].shape[0]
A_ = batch["""input_ids"""].eq(self.pad ).sum()
A_ = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return self._generative_step(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__="val" ) -> Dict:
'''simple docstring'''
self.step_count += 1
A_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A_ = losses["""loss"""]
A_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
A_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A_ = torch.tensor(UpperCamelCase__ ).type_as(UpperCamelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(UpperCamelCase__ )
A_ = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
A_ = self.step_count
self.metrics[prefix].append(UpperCamelCase__ ) # callback writes this to self.metrics_save_path
A_ = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return calculate_rouge(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> dict:
'''simple docstring'''
A_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A_ = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=UpperCamelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A_ = (time.time() - ta) / batch["""input_ids"""].shape[0]
A_ = self.ids_to_clean_text(UpperCamelCase__ )
A_ = self.ids_to_clean_text(batch["""labels"""] )
A_ = self._step(UpperCamelCase__ )
A_ = dict(zip(self.loss_names , UpperCamelCase__ ) )
A_ = self.calc_generative_metrics(UpperCamelCase__ , UpperCamelCase__ )
A_ = np.mean(lmap(UpperCamelCase__ , UpperCamelCase__ ) )
base_metrics.update(gen_time=UpperCamelCase__ , gen_len=UpperCamelCase__ , preds=UpperCamelCase__ , target=UpperCamelCase__ , **UpperCamelCase__ )
return base_metrics
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self._generative_step(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return self.validation_epoch_end(UpperCamelCase__ , prefix="""test""" )
def snake_case_ ( self , UpperCamelCase__ ) -> SeqaSeqDataset:
'''simple docstring'''
A_ = self.n_obs[type_path]
A_ = self.target_lens[type_path]
A_ = self.dataset_class(
self.tokenizer , type_path=UpperCamelCase__ , n_obs=UpperCamelCase__ , max_target_length=UpperCamelCase__ , **self.dataset_kwargs , )
return dataset
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
'''simple docstring'''
A_ = self.get_dataset(UpperCamelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A_ = dataset.make_sortish_sampler(UpperCamelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
def snake_case_ ( self ) -> DataLoader:
'''simple docstring'''
A_ = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=UpperCamelCase__ )
return dataloader
def snake_case_ ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def snake_case_ ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
add_generic_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"""--max_source_length""" , default=1024 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=UpperCamelCase__ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=UpperCamelCase__ )
parser.add_argument("""--max_tokens_per_batch""" , type=UpperCamelCase__ , default=UpperCamelCase__ )
parser.add_argument("""--logger_name""" , type=UpperCamelCase__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=UpperCamelCase__ , default=500 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=UpperCamelCase__ , default="""summarization""" , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=UpperCamelCase__ , default=0.0 , required=UpperCamelCase__ )
parser.add_argument("""--src_lang""" , type=UpperCamelCase__ , default="""""" , required=UpperCamelCase__ )
parser.add_argument("""--tgt_lang""" , type=UpperCamelCase__ , default="""""" , required=UpperCamelCase__ )
parser.add_argument("""--eval_beams""" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ )
parser.add_argument(
"""--val_metric""" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=UpperCamelCase__ , default=1 , required=UpperCamelCase__ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class A__ ( _snake_case ):
lowercase = "translation"
lowercase = ["loss"]
lowercase = ["bleu"]
lowercase = "bleu"
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(UpperCamelCase__ , **UpperCamelCase__ )
A_ = hparams.src_lang
A_ = hparams.tgt_lang
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> dict:
'''simple docstring'''
return calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=UpperCAmelCase__ )
check_output_dir(UpperCAmelCase__, expected_items=3 )
if model is None:
if "summarization" in args.task:
A_ = SummarizationModule(UpperCAmelCase__ )
else:
A_ = TranslationModule(UpperCAmelCase__ )
A_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
A_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A_ = os.environ.get("""WANDB_PROJECT""", UpperCAmelCase__ )
A_ = WandbLogger(name=model.output_dir.name, project=UpperCAmelCase__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
A_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
A_ = False
A_ = args.val_metric == """loss"""
A_ = generic_train(
UpperCAmelCase__, UpperCAmelCase__, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, UpperCAmelCase__ ), early_stopping_callback=UpperCAmelCase__, logger=UpperCAmelCase__, )
pickle_save(model.hparams, model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
A_ = """"""
A_ = sorted(glob.glob(os.path.join(args.output_dir, """*.ckpt""" ), recursive=UpperCAmelCase__ ) )
if checkpoints:
A_ = checkpoints[-1]
A_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
__lowerCamelCase = pl.Trainer.add_argparse_args(parser)
__lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase = parser.parse_args()
main(args)
| 101 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__lowerCamelCase = logging.getLogger(__name__)
class A__ ( _snake_case ):
lowercase = "summarization"
lowercase = ["loss"]
lowercase = ROUGE_KEYS
lowercase = "rouge2"
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
A_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(UpperCamelCase__ , num_labels=UpperCamelCase__ , mode=self.mode , **UpperCamelCase__ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
A_ = Path(self.output_dir ) / """metrics.json"""
A_ = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
A_ = 0
A_ = defaultdict(UpperCamelCase__ )
A_ = self.config.model_type
A_ = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
A_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
A_ = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
A_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A_ = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A_ = get_git_info()["""repo_sha"""]
A_ = hparams.num_workers
A_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCamelCase__ ):
A_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A_ = self.decoder_start_token_id
A_ = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
A_ = False
A_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A_ = self.hparams.eval_max_gen_length
else:
A_ = self.model.config.max_length
A_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def snake_case_ ( self , UpperCamelCase__ ) -> Dict[str, List[str]]:
'''simple docstring'''
A_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(UpperCamelCase__ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
A_ = True
return readable_batch
def snake_case_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.model(UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = self.tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return lmap(str.strip , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.tokenizer.pad_token_id
A_ , A_ = batch["""input_ids"""], batch["""attention_mask"""]
A_ = batch["""labels"""]
if isinstance(self.model , UpperCamelCase__ ):
A_ = self.model._shift_right(UpperCamelCase__ )
else:
A_ = shift_tokens_right(UpperCamelCase__ , UpperCamelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A_ = decoder_input_ids
self.save_readable_batch(UpperCamelCase__ )
A_ = self(UpperCamelCase__ , attention_mask=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , use_cache=UpperCamelCase__ )
A_ = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A_ = nn.CrossEntropyLoss(ignore_index=UpperCamelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
A_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A_ = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
A_ , A_ = label_smoothed_nll_loss(
UpperCamelCase__ , UpperCamelCase__ , self.hparams.label_smoothing , ignore_index=UpperCamelCase__ )
return (loss,)
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self.tokenizer.pad_token_id
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = self._step(UpperCamelCase__ )
A_ = dict(zip(self.loss_names , UpperCamelCase__ ) )
# tokens per batch
A_ = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
A_ = batch["""input_ids"""].shape[0]
A_ = batch["""input_ids"""].eq(self.pad ).sum()
A_ = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return self._generative_step(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__="val" ) -> Dict:
'''simple docstring'''
self.step_count += 1
A_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A_ = losses["""loss"""]
A_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
A_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A_ = torch.tensor(UpperCamelCase__ ).type_as(UpperCamelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(UpperCamelCase__ )
A_ = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
A_ = self.step_count
self.metrics[prefix].append(UpperCamelCase__ ) # callback writes this to self.metrics_save_path
A_ = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return calculate_rouge(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> dict:
'''simple docstring'''
A_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A_ = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=UpperCamelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A_ = (time.time() - ta) / batch["""input_ids"""].shape[0]
A_ = self.ids_to_clean_text(UpperCamelCase__ )
A_ = self.ids_to_clean_text(batch["""labels"""] )
A_ = self._step(UpperCamelCase__ )
A_ = dict(zip(self.loss_names , UpperCamelCase__ ) )
A_ = self.calc_generative_metrics(UpperCamelCase__ , UpperCamelCase__ )
A_ = np.mean(lmap(UpperCamelCase__ , UpperCamelCase__ ) )
base_metrics.update(gen_time=UpperCamelCase__ , gen_len=UpperCamelCase__ , preds=UpperCamelCase__ , target=UpperCamelCase__ , **UpperCamelCase__ )
return base_metrics
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self._generative_step(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return self.validation_epoch_end(UpperCamelCase__ , prefix="""test""" )
def snake_case_ ( self , UpperCamelCase__ ) -> SeqaSeqDataset:
'''simple docstring'''
A_ = self.n_obs[type_path]
A_ = self.target_lens[type_path]
A_ = self.dataset_class(
self.tokenizer , type_path=UpperCamelCase__ , n_obs=UpperCamelCase__ , max_target_length=UpperCamelCase__ , **self.dataset_kwargs , )
return dataset
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
'''simple docstring'''
A_ = self.get_dataset(UpperCamelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A_ = dataset.make_sortish_sampler(UpperCamelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
def snake_case_ ( self ) -> DataLoader:
'''simple docstring'''
A_ = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=UpperCamelCase__ )
return dataloader
def snake_case_ ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def snake_case_ ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
add_generic_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"""--max_source_length""" , default=1024 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=UpperCamelCase__ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=UpperCamelCase__ )
parser.add_argument("""--max_tokens_per_batch""" , type=UpperCamelCase__ , default=UpperCamelCase__ )
parser.add_argument("""--logger_name""" , type=UpperCamelCase__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=UpperCamelCase__ , default=500 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=UpperCamelCase__ , default="""summarization""" , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=UpperCamelCase__ , default=0.0 , required=UpperCamelCase__ )
parser.add_argument("""--src_lang""" , type=UpperCamelCase__ , default="""""" , required=UpperCamelCase__ )
parser.add_argument("""--tgt_lang""" , type=UpperCamelCase__ , default="""""" , required=UpperCamelCase__ )
parser.add_argument("""--eval_beams""" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ )
parser.add_argument(
"""--val_metric""" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=UpperCamelCase__ , default=1 , required=UpperCamelCase__ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class A__ ( _snake_case ):
lowercase = "translation"
lowercase = ["loss"]
lowercase = ["bleu"]
lowercase = "bleu"
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(UpperCamelCase__ , **UpperCamelCase__ )
A_ = hparams.src_lang
A_ = hparams.tgt_lang
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> dict:
'''simple docstring'''
return calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=UpperCAmelCase__ )
check_output_dir(UpperCAmelCase__, expected_items=3 )
if model is None:
if "summarization" in args.task:
A_ = SummarizationModule(UpperCAmelCase__ )
else:
A_ = TranslationModule(UpperCAmelCase__ )
A_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
A_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A_ = os.environ.get("""WANDB_PROJECT""", UpperCAmelCase__ )
A_ = WandbLogger(name=model.output_dir.name, project=UpperCAmelCase__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
A_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
A_ = False
A_ = args.val_metric == """loss"""
A_ = generic_train(
UpperCAmelCase__, UpperCAmelCase__, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, UpperCAmelCase__ ), early_stopping_callback=UpperCAmelCase__, logger=UpperCAmelCase__, )
pickle_save(model.hparams, model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
A_ = """"""
A_ = sorted(glob.glob(os.path.join(args.output_dir, """*.ckpt""" ), recursive=UpperCAmelCase__ ) )
if checkpoints:
A_ = checkpoints[-1]
A_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
__lowerCamelCase = pl.Trainer.add_argparse_args(parser)
__lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase = parser.parse_args()
main(args)
| 101 | 1 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__lowerCAmelCase: Union[str, Any] = math.sqrt(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.ndarray:
__lowerCAmelCase: Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__lowerCAmelCase: List[str] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , __SCREAMING_SNAKE_CASE ):
for j in range(0 , __SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: str = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> np.ndarray:
__lowerCAmelCase: Dict = np.zeros(img.shape )
__lowerCAmelCase: Union[str, Any] = get_gauss_kernel(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Any = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__lowerCAmelCase: Tuple = get_slice(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = img_s - img_s[kernel_size // 2, kernel_size // 2]
__lowerCAmelCase: List[Any] = vec_gaussian(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = np.multiply(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = np.multiply(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = np.sum(__SCREAMING_SNAKE_CASE ) / np.sum(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = val
return imga
def a__ ( __SCREAMING_SNAKE_CASE ) -> tuple:
__lowerCAmelCase: List[Any] = args[1] if args[1:] else "../image_data/lena.jpg"
__lowerCAmelCase: List[Any] = float(args[2] ) if args[2:] else 1.0
__lowerCAmelCase: List[Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__lowerCAmelCase: int = int(args[4] )
__lowerCAmelCase: str = kernel_size + abs(kernel_size % 2 - 1 )
else:
__lowerCAmelCase: str = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
__A , __A , __A , __A = parse_args(sys.argv)
__A = cva.imread(filename, 0)
cva.imshow("input image", img)
__A = img / 255
__A = out.astype("float32")
__A = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
__A = out * 255
__A = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 217 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class snake_case :
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=0.2 , UpperCamelCase__ : Any=0.2)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = bp_numa
__lowerCAmelCase: Optional[int] = bp_numa
__lowerCAmelCase: Tuple = bp_numa
__lowerCAmelCase: Optional[int] = conva_get[:2]
__lowerCAmelCase: int = conva_get[2]
__lowerCAmelCase: List[str] = size_pa
__lowerCAmelCase: Tuple = rate_w
__lowerCAmelCase: Dict = rate_t
__lowerCAmelCase: List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
__lowerCAmelCase: Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
__lowerCAmelCase: int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
__lowerCAmelCase: Optional[Any] = -2 * np.random.rand(self.conva[1]) + 1
__lowerCAmelCase: int = -2 * np.random.rand(self.num_bpa) + 1
__lowerCAmelCase: str = -2 * np.random.rand(self.num_bpa) + 1
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : int)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Any = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(UpperCamelCase__ , "wb") as f:
pickle.dump(UpperCamelCase__ , UpperCamelCase__)
print(f"Model saved: {save_path}")
@classmethod
def lowercase_ ( cls : Dict , UpperCamelCase__ : Union[str, Any])-> List[Any]:
'''simple docstring'''
with open(UpperCamelCase__ , "rb") as f:
__lowerCAmelCase: Dict = pickle.load(UpperCamelCase__) # noqa: S301
__lowerCAmelCase: Optional[int] = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
__lowerCAmelCase: List[str] = model_dic.get("size_pooling1")
__lowerCAmelCase: Union[str, Any] = model_dic.get("num_bp1")
__lowerCAmelCase: Any = model_dic.get("num_bp2")
__lowerCAmelCase: Union[str, Any] = model_dic.get("num_bp3")
__lowerCAmelCase: Optional[int] = model_dic.get("rate_weight")
__lowerCAmelCase: int = model_dic.get("rate_thre")
# create model instance
__lowerCAmelCase: Tuple = CNN(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
# modify model parameter
__lowerCAmelCase: Any = model_dic.get("w_conv1")
__lowerCAmelCase: Optional[Any] = model_dic.get("wkj")
__lowerCAmelCase: Any = model_dic.get("vji")
__lowerCAmelCase: Dict = model_dic.get("thre_conv1")
__lowerCAmelCase: int = model_dic.get("thre_bp2")
__lowerCAmelCase: Optional[int] = model_dic.get("thre_bp3")
return conv_ins
def lowercase_ ( self : Dict , UpperCamelCase__ : List[Any])-> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x))
def lowercase_ ( self : Dict , UpperCamelCase__ : List[Any])-> Optional[Any]:
'''simple docstring'''
return round(UpperCamelCase__ , 3)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int)-> Dict:
'''simple docstring'''
__lowerCAmelCase: List[Any] = convs[0]
__lowerCAmelCase: int = convs[1]
__lowerCAmelCase: Union[str, Any] = np.shape(UpperCamelCase__)[0]
# get the data slice of original image data, data_focus
__lowerCAmelCase: Optional[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__):
for j_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__):
__lowerCAmelCase: Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(UpperCamelCase__)
# calculate the feature map of every single kernel, and saved as list of matrix
__lowerCAmelCase: int = []
__lowerCAmelCase: Optional[int] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(UpperCamelCase__):
__lowerCAmelCase: List[str] = []
for i_focus in range(len(UpperCamelCase__)):
__lowerCAmelCase: Union[str, Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(UpperCamelCase__))
__lowerCAmelCase: str = np.asmatrix(UpperCamelCase__).reshape(
UpperCamelCase__ , UpperCamelCase__)
data_featuremap.append(UpperCamelCase__)
# expanding the data slice to One dimenssion
__lowerCAmelCase: Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(UpperCamelCase__))
__lowerCAmelCase: List[Any] = np.asarray(UpperCamelCase__)
return focus_list, data_featuremap
def lowercase_ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]="average_pool")-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = len(featuremaps[0])
__lowerCAmelCase: List[Any] = int(size_map / size_pooling)
__lowerCAmelCase: int = []
for i_map in range(len(UpperCamelCase__)):
__lowerCAmelCase: str = featuremaps[i_map]
__lowerCAmelCase: List[Any] = []
for i_focus in range(0 , UpperCamelCase__ , UpperCamelCase__):
for j_focus in range(0 , UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Any = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(UpperCamelCase__))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(UpperCamelCase__))
__lowerCAmelCase: Optional[int] = np.asmatrix(UpperCamelCase__).reshape(UpperCamelCase__ , UpperCamelCase__)
featuremap_pooled.append(UpperCamelCase__)
return featuremap_pooled
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str)-> int:
'''simple docstring'''
__lowerCAmelCase: List[Any] = []
for i in range(len(UpperCamelCase__)):
__lowerCAmelCase: Union[str, Any] = np.shape(data[i])
__lowerCAmelCase: int = data[i].reshape(1 , shapes[0] * shapes[1])
__lowerCAmelCase: Dict = data_listed.getA().tolist()[0]
data_expanded.extend(UpperCamelCase__)
__lowerCAmelCase: Any = np.asarray(UpperCamelCase__)
return data_expanded
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = np.asarray(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = np.shape(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def lowercase_ ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: Any = 0
for i_map in range(UpperCamelCase__):
__lowerCAmelCase: Optional[Any] = np.ones((size_map, size_map))
for i in range(0 , UpperCamelCase__ , UpperCamelCase__):
for j in range(0 , UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Optional[Any] = pd_pool[
i_pool
]
__lowerCAmelCase: str = i_pool + 1
__lowerCAmelCase: Dict = np.multiply(
UpperCamelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(UpperCamelCase__)
return pd_all
def lowercase_ ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str=bool)-> List[str]:
'''simple docstring'''
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(UpperCamelCase__)))
print((" - - Shape: Teach_Data ", np.shape(UpperCamelCase__)))
__lowerCAmelCase: str = 0
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: List[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
__lowerCAmelCase: Optional[Any] = 0
print(f"-------------Learning Time {rp}--------------")
for p in range(len(UpperCamelCase__)):
# print('------------Learning Image: %d--------------'%p)
__lowerCAmelCase: Dict = np.asmatrix(datas_train[p])
__lowerCAmelCase: Dict = np.asarray(datas_teach[p])
__lowerCAmelCase , __lowerCAmelCase: int = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowerCAmelCase: Any = self.pooling(UpperCamelCase__ , self.size_poolinga)
__lowerCAmelCase: Optional[Any] = np.shape(UpperCamelCase__)
__lowerCAmelCase: str = self._expand(UpperCamelCase__)
__lowerCAmelCase: str = data_bp_input
__lowerCAmelCase: int = np.dot(UpperCamelCase__ , self.vji.T) - self.thre_bpa
__lowerCAmelCase: int = self.sig(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = np.dot(UpperCamelCase__ , self.wkj.T) - self.thre_bpa
__lowerCAmelCase: str = self.sig(UpperCamelCase__)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__lowerCAmelCase: Union[str, Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(UpperCamelCase__ , (1 - bp_outa)))
__lowerCAmelCase: Any = np.multiply(
np.dot(UpperCamelCase__ , self.wkj) , np.multiply(UpperCamelCase__ , (1 - bp_outa)))
__lowerCAmelCase: str = np.dot(UpperCamelCase__ , self.vji)
__lowerCAmelCase: Union[str, Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
__lowerCAmelCase: str = pd_conva_pooled.T.getA().tolist()
__lowerCAmelCase: str = self._calculate_gradient_from_pool(
UpperCamelCase__ , UpperCamelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
__lowerCAmelCase: List[Any] = self._expand_mat(pd_conva_all[k_conv])
__lowerCAmelCase: int = self.rate_weight * np.dot(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: Tuple = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
__lowerCAmelCase: Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
__lowerCAmelCase: List[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__lowerCAmelCase: Union[str, Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__lowerCAmelCase: Tuple = self.thre_bpa - pd_k_all * self.rate_thre
__lowerCAmelCase: Optional[int] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__lowerCAmelCase: List[str] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__lowerCAmelCase: Tuple = rp + 1
__lowerCAmelCase: Optional[Any] = error_count / patterns
all_mse.append(UpperCamelCase__)
def draw_error():
__lowerCAmelCase: Dict = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(UpperCamelCase__ , "+-")
plt.plot(UpperCamelCase__ , "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(UpperCamelCase__ , alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, f" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Tuple)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: int = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(UpperCamelCase__)))
for p in range(len(UpperCamelCase__)):
__lowerCAmelCase: Dict = np.asmatrix(datas_test[p])
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowerCAmelCase: Tuple = self.pooling(UpperCamelCase__ , self.size_poolinga)
__lowerCAmelCase: List[str] = self._expand(UpperCamelCase__)
__lowerCAmelCase: int = data_bp_input
__lowerCAmelCase: List[Any] = bp_outa * self.vji.T - self.thre_bpa
__lowerCAmelCase: Any = self.sig(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = bp_outa * self.wkj.T - self.thre_bpa
__lowerCAmelCase: List[str] = self.sig(UpperCamelCase__)
produce_out.extend(bp_outa.getA().tolist())
__lowerCAmelCase: Tuple = [list(map(self.do_round , UpperCamelCase__)) for each in produce_out]
return np.asarray(UpperCamelCase__)
def lowercase_ ( self : int , UpperCamelCase__ : Any)-> Any:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = np.asmatrix(UpperCamelCase__)
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowerCAmelCase: Any = self.pooling(UpperCamelCase__ , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 217 | 1 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase__ = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
UpperCAmelCase__ = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
UpperCAmelCase__ = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
UpperCAmelCase__ = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
UpperCAmelCase__ = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Tuple ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=[1, 10, 100] , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Union[str, Any]=3.0 ):
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__lowerCAmelCase ) as executor:
_UpperCAmelCase = []
_UpperCAmelCase = Counter()
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(__lowerCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ):
for candidate in candidates:
_UpperCAmelCase = candidate + """\n""" + test_case
_UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCAmelCase = executor.submit(__lowerCAmelCase , *__lowerCAmelCase )
futures.append(__lowerCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__lowerCAmelCase ):
_UpperCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_UpperCAmelCase , _UpperCAmelCase = [], []
for result in results.values():
result.sort()
_UpperCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(__lowerCAmelCase ) )
correct.append(sum(__lowerCAmelCase ) )
_UpperCAmelCase = np.array(__lowerCAmelCase )
_UpperCAmelCase = np.array(__lowerCAmelCase )
_UpperCAmelCase = k
_UpperCAmelCase = {f'''pass@{k}''': estimate_pass_at_k(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
def estimator(lowercase ,lowercase ,lowercase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(lowercase ,lowercase ):
_UpperCAmelCase = itertools.repeat(lowercase ,len(lowercase ) )
else:
assert len(lowercase ) == len(lowercase )
_UpperCAmelCase = iter(lowercase )
return np.array([estimator(int(lowercase ) ,int(lowercase ) ,lowercase ) for n, c in zip(lowercase ,lowercase )] )
| 30 | """simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
A__ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
A__ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
A__ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
A__ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
A__ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
A__ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
A__ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
A__ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
A__ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
A__ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
A__ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
A__ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
A__ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
A__ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
A__ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
A__ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
A__ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
A__ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
A__ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
A__ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
A__ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
A__ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
A__ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A__ = key.split('.' )
A__ , A__ = int(key_split[2] ), int(key_split[4] )
A__ = config.vision_config.hidden_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A__ = key.split('.' )
A__ = int(key_split[3] )
A__ = config.text_config.hidden_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = rename_key(SCREAMING_SNAKE_CASE__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
A__ = val.squeeze_()
else:
A__ = val
return orig_state_dict
def _snake_case( ) -> Optional[int]:
'''simple docstring'''
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE__ : List[Any]=False ) -> Dict:
'''simple docstring'''
A__ = GroupViTConfig()
A__ = GroupViTModel(SCREAMING_SNAKE_CASE__ ).eval()
A__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['model']
A__ = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ , A__ = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE__ ) == 0)
# verify result
A__ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
A__ = prepare_img()
A__ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
with torch.no_grad():
A__ = model(**SCREAMING_SNAKE_CASE__ )
if model_name == "groupvit-gcc-yfcc":
A__ = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
A__ = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Successfully saved processor and model to' , SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='nielsr' )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='nielsr' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
lowercase_ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 7 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ):
"""simple docstring"""
__magic_name__ : list[int] = [0]
__magic_name__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__magic_name__ : int = 0
# the area corresponding to the grid that gives the product closest to target
__magic_name__ : int = 0
# an estimate of b, using the quadratic formula
__magic_name__ : float
# the largest integer less than b_estimate
__magic_name__ : int
# the largest integer less than b_estimate
__magic_name__ : int
# the triangle number corresponding to b_floor
__magic_name__ : int
# the triangle number corresponding to b_ceil
__magic_name__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__magic_name__ : List[Any] = floor(lowerCAmelCase )
__magic_name__ : Dict = ceil(lowerCAmelCase )
__magic_name__ : Any = triangle_numbers[b_floor]
__magic_name__ : Optional[int] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : Any = triangle_b_first_guess * triangle_a
__magic_name__ : Any = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : List[str] = triangle_b_second_guess * triangle_a
__magic_name__ : Optional[int] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }') | 331 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def snake_case (UpperCAmelCase__ ) -> None:
UpperCamelCase_ ,UpperCamelCase_: Dict = analyze_text(UpperCAmelCase__ )
UpperCamelCase_: List[str] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase_: Dict = sum(single_char_strings.values() )
# one length string
UpperCamelCase_: List[str] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase_: List[Any] = single_char_strings[ch]
UpperCamelCase_: int = my_str / all_sum
my_fir_sum += prob * math.loga(UpperCAmelCase__ ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
UpperCamelCase_: int = sum(two_char_strings.values() )
UpperCamelCase_: Optional[int] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase_: Tuple = cha + cha
if sequence in two_char_strings:
UpperCamelCase_: str = two_char_strings[sequence]
UpperCamelCase_: str = int(UpperCAmelCase__ ) / all_sum
my_sec_sum += prob * math.loga(UpperCAmelCase__ )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def snake_case (UpperCAmelCase__ ) -> tuple[dict, dict]:
UpperCamelCase_: Any = Counter() # type: ignore
UpperCamelCase_: Union[str, Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(UpperCAmelCase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def snake_case () -> Tuple:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 292 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=() , UpperCAmelCase__=None , UpperCAmelCase__="no" , UpperCAmelCase__="29500" ) -> List[Any]:
UpperCamelCase_: Any = False
UpperCamelCase_: List[str] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
UpperCamelCase_: List[Any] = True
elif "IPython" in sys.modules:
UpperCamelCase_: List[Any] = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
UpperCamelCase_: Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , UpperCAmelCase__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
UpperCamelCase_: List[str] = 8
UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , distributed_type='TPU' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*UpperCAmelCase__ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase__ , master_addr='127.0.01' , master_port=UpperCAmelCase__ , mixed_precision=UpperCAmelCase__ ):
UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , distributed_type='MULTI_GPU' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase_: Tuple = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=() , UpperCAmelCase__=2 ) -> Optional[int]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase__ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , debug=UpperCAmelCase__ )
start_processes(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' ) | 292 | 1 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase_ : Optional[Any] = TypeVar('KEY')
lowerCAmelCase_ : Any = TypeVar('VAL')
@dataclass(frozen=lowerCamelCase_ , slots=lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (Generic[KEY, VAL] ):
"""simple docstring"""
__a =42
__a =42
class __SCREAMING_SNAKE_CASE (_Item ):
"""simple docstring"""
def __init__( self : Optional[Any] ):
super().__init__(__a , __a )
def __bool__( self : Any ):
return False
lowerCAmelCase_ : List[str] = _DeletedItem()
class __SCREAMING_SNAKE_CASE (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : List[str] , __a : int = 8 , __a : float = 0.75 ):
_a = initial_block_size
_a = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_a = capacity_factor
_a = 0
def UpperCamelCase__ ( self : Union[str, Any] , __a : KEY ):
return hash(__a ) % len(self._buckets )
def UpperCamelCase__ ( self : Optional[int] , __a : int ):
return (ind + 1) % len(self._buckets )
def UpperCamelCase__ ( self : int , __a : int , __a : KEY , __a : VAL ):
_a = self._buckets[ind]
if not stored:
_a = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_a = _Item(__a , __a )
return True
else:
return False
def UpperCamelCase__ ( self : Any ):
_a = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def UpperCamelCase__ ( self : List[str] ):
if len(self._buckets ) <= self._initial_block_size:
return False
_a = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCamelCase__ ( self : Tuple , __a : int ):
_a = self._buckets
_a = [None] * new_size
_a = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCamelCase__ ( self : Any ):
self._resize(len(self._buckets ) * 2 )
def UpperCamelCase__ ( self : Union[str, Any] ):
self._resize(len(self._buckets ) // 2 )
def UpperCamelCase__ ( self : Union[str, Any] , __a : KEY ):
_a = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_a = self._get_next_ind(__a )
def UpperCamelCase__ ( self : Any , __a : KEY , __a : VAL ):
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ):
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : Dict , __a : KEY ):
for ind in self._iterate_buckets(__a ):
_a = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_a = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Any , __a : KEY ):
for ind in self._iterate_buckets(__a ):
_a = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : int ):
return self._len
def __iter__( self : Dict ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Tuple ):
_a = " ,".join(
f'{item.key}: {item.val}' for item in self._buckets if item )
return f'HashMap({val_string})'
| 63 |
from __future__ import annotations
def A ( _UpperCAmelCase : list[int] ) -> bool:
'''simple docstring'''
return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 | 0 |
from __future__ import annotations
import numpy as np
def __snake_case ( _UpperCAmelCase ):
__a , __a = np.shape(_UpperCAmelCase )
if rows != columns:
__a = (
'''\'table\' has to be of square shaped array but got a '''
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(_UpperCAmelCase )
__a = np.zeros((rows, columns) )
__a = np.zeros((rows, columns) )
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__a = sum(lower[i][k] * upper[k][j] for k in range(_UpperCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
__a = (table[i][j] - total) / upper[j][j]
__a = 1
for j in range(_UpperCAmelCase , _UpperCAmelCase ):
__a = sum(lower[i][k] * upper[k][j] for k in range(_UpperCAmelCase ) )
__a = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : str = KandinskyVaaImgaImgPipeline
UpperCamelCase__ : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCamelCase__ : Dict = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCamelCase__ : Any = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase__ : List[Any] = False
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Any):
'''simple docstring'''
torch.manual_seed(0)
__a = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__a = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE)
return model
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
torch.manual_seed(0)
__a = VQModel(**self.dummy_movq_kwargs)
return model
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.dummy_unet
__a = self.dummy_movq
__a = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__a = DDIMScheduler(**__SCREAMING_SNAKE_CASE)
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=0):
'''simple docstring'''
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE)
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__SCREAMING_SNAKE_CASE)
# create init_image
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE)
__a = image.cpu().permute(0 , 2 , 3 , 1)[0]
__a = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE)).convert('''RGB''').resize((256, 256))
if str(__SCREAMING_SNAKE_CASE).startswith('''mps'''):
__a = torch.manual_seed(__SCREAMING_SNAKE_CASE)
else:
__a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE)
__a = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = '''cpu'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**__SCREAMING_SNAKE_CASE)
__a = pipe.to(__SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE))
__a = output.images
__a = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''')
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')
__a = '''A red cartoon frog, 4k'''
__a = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa)
pipe_prior.to(__SCREAMING_SNAKE_CASE)
__a = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa)
__a = pipeline.to(__SCREAMING_SNAKE_CASE)
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = torch.Generator(device='''cpu''').manual_seed(0)
__a , __a = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__a = pipeline(
image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
| 131 | 0 |
from __future__ import annotations
from random import random
class __UpperCAmelCase :
def __init__( self : str, __A : int | None = None ):
UpperCAmelCase : int = value
UpperCAmelCase : Any = random()
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
def __repr__( self : Tuple ):
from pprint import pformat
if self.left is None and self.right is None:
return F'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{F'''{self.value}: {self.prior:.5}''': (self.left, self.right)}, indent=1 )
def __str__( self : Dict ):
UpperCAmelCase : Dict = str(self.value ) + ''' '''
UpperCAmelCase : Dict = str(self.left or '''''' )
UpperCAmelCase : List[str] = str(self.right or '''''' )
return value + left + right
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
UpperCAmelCase : Dict = split(root.left , __lowerCamelCase )
return left, root
else:
UpperCAmelCase : Optional[Any] = split(root.right , __lowerCamelCase )
return root, right
def a__ ( UpperCAmelCase : str , UpperCAmelCase : str ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
UpperCAmelCase : Optional[int] = merge(left.right , __lowerCamelCase )
return left
else:
UpperCAmelCase : str = merge(__lowerCamelCase , right.left )
return right
def a__ ( UpperCAmelCase : Any , UpperCAmelCase : Dict ) -> Node | None:
UpperCAmelCase : Union[str, Any] = Node(__lowerCamelCase )
UpperCAmelCase : Optional[Any] = split(__lowerCamelCase , __lowerCamelCase )
return merge(merge(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Node | None:
UpperCAmelCase : List[str] = split(__lowerCamelCase , value - 1 )
UpperCAmelCase : Tuple = split(__lowerCamelCase , __lowerCamelCase )
return merge(__lowerCamelCase , __lowerCamelCase )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
UpperCAmelCase : Any = insert(__lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
UpperCAmelCase : List[Any] = erase(__lowerCamelCase , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def a__ ( ) -> None:
UpperCAmelCase : str = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
UpperCAmelCase : Optional[Any] = input()
while args != "q":
UpperCAmelCase : Union[str, Any] = interact_treap(__lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
UpperCAmelCase : Optional[Any] = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 336 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase_ = 4
lowerCAmelCase_ = 3
class __A ( A_ ):
'''simple docstring'''
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : int = int(os.environ['''RANK'''] )
lowercase__ : str = int(os.environ['''WORLD_SIZE'''] )
lowercase__ : List[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=__lowerCamelCase )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase )
parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 )
lowercase__ : int = parser.parse_args()
lowercase__ : Optional[Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]}
lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase )
if not streaming:
lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) )
lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase )
lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase )
lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 16 | 0 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
_UpperCAmelCase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = int(sequence[i] , 2 )
return sequence
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_UpperCAmelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_UpperCAmelCase = gray_code_sequence_string(bit_count - 1 )
_UpperCAmelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_UpperCAmelCase = '''0''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_UpperCAmelCase = '''1''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class _a ( logging.LoggerAdapter):
"""simple docstring"""
@staticmethod
def lowercase__ ( __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] )->int:
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCAmelCase = kwargs.pop('''main_process_only''' , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop('''in_order''' , __UpperCamelCase )
if self.isEnabledFor(__UpperCamelCase ):
if self._should_log(__UpperCamelCase ):
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
elif in_order:
_UpperCAmelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
state.wait_for_everyone()
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = None ):
'''simple docstring'''
if log_level is None:
_UpperCAmelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = logging.getLogger(_SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_SCREAMING_SNAKE_CASE , {} )
| 326 | 1 |
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_UpperCAmelCase : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Dict = 2
while digits < n:
index += 1
_UpperCAmelCase : Union[str, Any] = len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def UpperCamelCase_ ( _UpperCAmelCase : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 31 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
# fmt: off
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
a = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
a = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : List[Any] ) ->int:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , **__UpperCAmelCase : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_image_processor()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = ['''image_processor''', '''feature_extractor''']
UpperCAmelCase : Dict = '''TvltImageProcessor'''
UpperCAmelCase : Tuple = '''TvltFeatureExtractor'''
def __init__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ):
super().__init__(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
_A = image_processor
_A = feature_extractor
def __call__( self : List[Any] , _UpperCAmelCase : str=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : List[str]=False , *_UpperCAmelCase : Any , **_UpperCAmelCase : str , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
_A = None
if images is not None:
_A = self.image_processor(_UpperCAmelCase , mask_pixel=_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
if images_mixed is not None:
_A = self.image_processor(_UpperCAmelCase , is_mixed=_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
if audio is not None:
_A = self.feature_extractor(
_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , mask_audio=_UpperCAmelCase , **_UpperCAmelCase )
_A = {}
if audio is not None:
output_dict.update(_UpperCAmelCase )
if images is not None:
output_dict.update(_UpperCAmelCase )
if images_mixed_dict is not None:
output_dict.update(_UpperCAmelCase )
return output_dict
@property
def lowerCAmelCase_ ( self : Any ):
_A = self.image_processor.model_input_names
_A = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 370 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _snake_case ( _snake_case : Dict ) -> Any:
'''simple docstring'''
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def _snake_case ( _snake_case : str ) -> Tuple:
'''simple docstring'''
for char in word:
_A = ord(_snake_case )
if not _is_chinese_char(_snake_case ):
return 0
return 1
def _snake_case ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
_A = set()
for token in tokens:
_A = len(_snake_case ) > 1 and is_chinese(_snake_case )
if chinese_word:
word_set.add(_snake_case )
_A = list(_snake_case )
return word_list
def _snake_case ( _snake_case : List[str] , _snake_case : set() ) -> Optional[Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_A = max([len(_snake_case ) for w in chinese_word_set] )
_A = bert_tokens
_A , _A = 0, len(_snake_case )
while start < end:
_A = True
if is_chinese(bert_word[start] ):
_A = min(end - start , _snake_case )
for i in range(_snake_case , 1 , -1 ):
_A = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_A = '##' + bert_word[j]
_A = start + i
_A = False
break
if single_word:
start += 1
return bert_word
def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ) -> str:
'''simple docstring'''
_A = []
for i in range(0 , len(_snake_case ) , 1_00 ):
_A = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
_A = [get_chinese_word(_snake_case ) for r in res]
ltp_res.extend(_snake_case )
assert len(_snake_case ) == len(_snake_case )
_A = []
for i in range(0 , len(_snake_case ) , 1_00 ):
_A = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(_snake_case ) == len(_snake_case )
_A = []
for input_ids, chinese_word in zip(_snake_case , _snake_case ):
_A = []
for id in input_ids:
_A = bert_tokenizer._convert_id_to_token(_snake_case )
input_tokens.append(_snake_case )
_A = add_sub_symbol(_snake_case , _snake_case )
_A = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_snake_case ):
if token[:2] == "##":
_A = token[2:]
# save chinese tokens' pos
if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ):
ref_id.append(_snake_case )
ref_ids.append(_snake_case )
assert len(_snake_case ) == len(_snake_case )
return ref_ids
def _snake_case ( _snake_case : List[str] ) -> Dict:
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_A = f.readlines()
_A = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_A = LTP(args.ltp ) # faster in GPU device
_A = BertTokenizer.from_pretrained(args.bert )
_A = prepare_ref(_snake_case , _snake_case , _snake_case )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_A = [json.dumps(_snake_case ) + '\n' for ref in ref_ids]
f.writelines(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
a = parser.parse_args()
main(args)
| 271 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , )->List[str]:
'''simple docstring'''
A_ : str = parent
A_ : int = batch_size
A_ : List[str] = image_size
A_ : Dict = num_channels
A_ : Tuple = embeddings_size
A_ : Union[str, Any] = hidden_sizes
A_ : Dict = depths
A_ : str = is_training
A_ : Union[str, Any] = use_labels
A_ : Union[str, Any] = hidden_act
A_ : Optional[Any] = num_labels
A_ : Tuple = scope
A_ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : str = None
if self.use_labels:
A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
A_ : Dict = RegNetModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Any = model(_SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
A_ : Union[str, Any] = self.num_labels
A_ : Dict = RegNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : int = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Tuple = self.prepare_config_and_inputs()
A_ , A_ , A_ : str = config_and_inputs
A_ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
snake_case = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Union[str, Any] = RegNetModelTester(self )
A_ : Union[str, Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self )->Tuple:
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _snake_case ( self )->Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _snake_case ( self )->str:
'''simple docstring'''
pass
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Any = [*signature.parameters.keys()]
A_ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(config=_SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : str = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A_ : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : int = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : int = layer_type
A_ : List[Any] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )->str:
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = RegNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ):
A_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self )->List[str]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : List[Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.default_image_processor
A_ : Any = prepare_img()
A_ : Optional[Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
A_ : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ : Optional[int] = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 186 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = """Hello world! cécé herlolip"""
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[Any] = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE )
roberta.eval() # disable dropout
A_ : Dict = roberta.model.encoder.sentence_encoder
A_ : Optional[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
A_ : Optional[int] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , SCREAMING_SNAKE_CASE )
A_ : List[str] = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
A_ : str = roberta_sent_encoder.embed_tokens.weight
A_ : int = roberta_sent_encoder.embed_positions.weight
A_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
A_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
A_ : int = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A_ : BertLayer = model.roberta.encoder.layer[i]
A_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
A_ : RobertaAttention = layer.attention
A_ : Dict = roberta_layer.self_attn_layer_norm.weight
A_ : str = roberta_layer.self_attn_layer_norm.bias
# self attention
A_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
A_ : str = roberta_layer.self_attn.q_proj.weight
A_ : List[str] = roberta_layer.self_attn.q_proj.bias
A_ : int = roberta_layer.self_attn.k_proj.weight
A_ : List[Any] = roberta_layer.self_attn.k_proj.bias
A_ : Dict = roberta_layer.self_attn.v_proj.weight
A_ : int = roberta_layer.self_attn.v_proj.bias
# self-attention output
A_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
A_ : Any = roberta_layer.self_attn.out_proj.weight
A_ : Optional[Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
A_ : Any = roberta_layer.final_layer_norm.weight
A_ : int = roberta_layer.final_layer_norm.bias
# intermediate
A_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
A_ : int = roberta_layer.fca.weight
A_ : List[str] = roberta_layer.fca.bias
# output
A_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
A_ : Optional[int] = roberta_layer.fca.weight
A_ : List[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
A_ : str = roberta.model.classification_heads['''mnli'''].dense.weight
A_ : int = roberta.model.classification_heads['''mnli'''].dense.bias
A_ : str = roberta.model.classification_heads['''mnli'''].out_proj.weight
A_ : Dict = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
A_ : int = roberta.model.encoder.lm_head.dense.weight
A_ : List[str] = roberta.model.encoder.lm_head.dense.bias
A_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
A_ : int = roberta.model.encoder.lm_head.layer_norm.bias
A_ : Optional[int] = roberta.model.encoder.lm_head.weight
A_ : Dict = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
A_ : torch.Tensor = roberta.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
A_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
A_ : str = roberta.model.classification_heads['''mnli'''](roberta.extract_features(SCREAMING_SNAKE_CASE ) )
else:
A_ : int = roberta.model(SCREAMING_SNAKE_CASE )[0]
print(our_output.shape , their_output.shape )
A_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A_ : Tuple = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
UpperCamelCase = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 186 | 1 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _snake_case ( A , A , A , A=5 ) -> List[str]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
lowerCAmelCase__ = torch.tensor(tokenizer.encode(A , add_special_tokens=A ) ).unsqueeze(0 ) # Batch size 1
lowerCAmelCase__ = model(A )[0] # The last hidden-state is the first element of the output tuple
lowerCAmelCase__ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowerCAmelCase__ = logits[0, masked_index, :]
lowerCAmelCase__ = logits.softmax(dim=0 )
lowerCAmelCase__ , lowerCAmelCase__ = prob.topk(k=A , dim=0 )
lowerCAmelCase__ = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A ) )] )
lowerCAmelCase__ = tokenizer.mask_token
lowerCAmelCase__ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
lowerCAmelCase__ = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(A ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(A ) , A ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A , A ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__UpperCAmelCase = CamembertTokenizer.from_pretrained('''camembert-base''')
__UpperCAmelCase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
__UpperCAmelCase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3)) | 228 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : int = "linear"
lowercase__ : Any = "cosine"
lowercase__ : Optional[int] = "cosine_with_restarts"
lowercase__ : Optional[Any] = "polynomial"
lowercase__ : Tuple = "constant"
lowercase__ : Optional[int] = "constant_with_warmup"
lowercase__ : Optional[int] = "piecewise_constant"
def _snake_case ( A , A = -1 ) -> Any:
return LambdaLR(A , lambda A : 1 , last_epoch=A )
def _snake_case ( A , A , A = -1 ) -> Optional[Any]:
def lr_lambda(A ):
if current_step < num_warmup_steps:
return float(A ) / float(max(1.0 , A ) )
return 1.0
return LambdaLR(A , A , last_epoch=A )
def _snake_case ( A , A , A = -1 ) -> Union[str, Any]:
lowerCAmelCase__ = {}
lowerCAmelCase__ = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase__ , lowerCAmelCase__ = rule_str.split(''':''' )
lowerCAmelCase__ = int(A )
lowerCAmelCase__ = float(A )
lowerCAmelCase__ = value
lowerCAmelCase__ = float(rule_list[-1] )
def create_rules_function(A , A ):
def rule_func(A ) -> float:
lowerCAmelCase__ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase__ = create_rules_function(A , A )
return LambdaLR(A , A , last_epoch=A )
def _snake_case ( A , A , A , A=-1 ) -> Optional[int]:
def lr_lambda(A ):
if current_step < num_warmup_steps:
return float(A ) / float(max(1 , A ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A , A , A )
def _snake_case ( A , A , A , A = 0.5 , A = -1 ) -> List[str]:
def lr_lambda(A ):
if current_step < num_warmup_steps:
return float(A ) / float(max(1 , A ) )
lowerCAmelCase__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A ) * 2.0 * progress )) )
return LambdaLR(A , A , A )
def _snake_case ( A , A , A , A = 1 , A = -1 ) -> Union[str, Any]:
def lr_lambda(A ):
if current_step < num_warmup_steps:
return float(A ) / float(max(1 , A ) )
lowerCAmelCase__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A ) * progress) % 1.0) )) )
return LambdaLR(A , A , A )
def _snake_case ( A , A , A , A=1E-7 , A=1.0 , A=-1 ) -> Union[str, Any]:
lowerCAmelCase__ = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(A ):
if current_step < num_warmup_steps:
return float(A ) / float(max(1 , A ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase__ = lr_init - lr_end
lowerCAmelCase__ = num_training_steps - num_warmup_steps
lowerCAmelCase__ = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase__ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A , A , A )
__UpperCAmelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def _snake_case ( A , A , A = None , A = None , A = None , A = 1 , A = 1.0 , A = -1 , ) -> int:
lowerCAmelCase__ = SchedulerType(A )
lowerCAmelCase__ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A , last_epoch=A )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A , step_rules=A , last_epoch=A )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A , num_warmup_steps=A , last_epoch=A )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A , num_warmup_steps=A , num_training_steps=A , num_cycles=A , last_epoch=A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A , num_warmup_steps=A , num_training_steps=A , power=A , last_epoch=A , )
return schedule_func(
A , num_warmup_steps=A , num_training_steps=A , last_epoch=A ) | 228 | 1 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:str = int(__snake_case )
SCREAMING_SNAKE_CASE:Any = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case=300 ):
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Optional[Any] = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
SCREAMING_SNAKE_CASE:int = F'''{elt:.6f}''' if isinstance(__snake_case , __snake_case ) else str(__snake_case )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _snake_case :
_A : List[Any] = 5
_A : Union[str, Any] = 0.2
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : str = True ,SCREAMING_SNAKE_CASE__ : Tuple = None ,SCREAMING_SNAKE_CASE__ : List[Any] = 300 ,):
SCREAMING_SNAKE_CASE:int = total
SCREAMING_SNAKE_CASE:int = "" if prefix is None else prefix
SCREAMING_SNAKE_CASE:Union[str, Any] = leave
SCREAMING_SNAKE_CASE:Optional[int] = parent
SCREAMING_SNAKE_CASE:Optional[int] = width
SCREAMING_SNAKE_CASE:Union[str, Any] = None
SCREAMING_SNAKE_CASE:str = None
SCREAMING_SNAKE_CASE:List[str] = None
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = False ,SCREAMING_SNAKE_CASE__ : List[str] = None ):
SCREAMING_SNAKE_CASE:str = value
if comment is not None:
SCREAMING_SNAKE_CASE:Optional[Any] = comment
if self.last_value is None:
SCREAMING_SNAKE_CASE:List[Any] = time.time()
SCREAMING_SNAKE_CASE:str = value
SCREAMING_SNAKE_CASE:Tuple = None
SCREAMING_SNAKE_CASE:Any = self.warmup
SCREAMING_SNAKE_CASE:Optional[int] = 1
self.update_bar(_a )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for ,self.total ):
if self.first_calls > 0:
self.first_calls -= 1
SCREAMING_SNAKE_CASE:Union[str, Any] = time.time()
SCREAMING_SNAKE_CASE:Optional[int] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
SCREAMING_SNAKE_CASE:List[str] = self.elapsed_time / (value - self.start_value)
else:
SCREAMING_SNAKE_CASE:List[str] = None
if value >= self.total:
SCREAMING_SNAKE_CASE:int = self.total
SCREAMING_SNAKE_CASE:List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
SCREAMING_SNAKE_CASE:List[str] = self.average_time_per_item * (self.total - value)
self.update_bar(_a )
SCREAMING_SNAKE_CASE:Any = value
SCREAMING_SNAKE_CASE:List[str] = current_time
if self.average_time_per_item is None:
SCREAMING_SNAKE_CASE:int = 1
else:
SCREAMING_SNAKE_CASE:Union[str, Any] = max(int(self.update_every / self.average_time_per_item ) ,1 )
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Tuple=None ):
SCREAMING_SNAKE_CASE:List[str] = " " * (len(str(self.total ) ) - len(str(_a ) )) + str(_a )
if self.elapsed_time is None:
SCREAMING_SNAKE_CASE:Optional[Any] = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
SCREAMING_SNAKE_CASE:Optional[int] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
SCREAMING_SNAKE_CASE:str = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:Dict = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
SCREAMING_SNAKE_CASE:Any = disp.display(disp.HTML(self.html_code ) ,display_id=_a )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCamelCase ( self : Union[str, Any] ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class _snake_case ( a_ ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
super().__init__(_a )
SCREAMING_SNAKE_CASE:Optional[Any] = None if column_names is None else [column_names]
SCREAMING_SNAKE_CASE:str = None
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:List[str] = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
SCREAMING_SNAKE_CASE:str = disp.display(disp.HTML(self.html_code ) ,display_id=_a )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Dict ):
if self.inner_table is None:
SCREAMING_SNAKE_CASE:Any = [list(values.keys() ), list(values.values() )]
else:
SCREAMING_SNAKE_CASE:Optional[int] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_a )
SCREAMING_SNAKE_CASE:Optional[int] = columns
self.inner_table.append([values[c] for c in columns] )
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=300 ):
SCREAMING_SNAKE_CASE:Optional[Any] = NotebookProgressBar(_a ,prefix=_a ,parent=self ,width=_a )
return self.child_bar
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = None
self.display()
class _snake_case ( a_ ):
def __init__( self : str ):
SCREAMING_SNAKE_CASE:Union[str, Any] = None
SCREAMING_SNAKE_CASE:List[Any] = None
SCREAMING_SNAKE_CASE:Tuple = False
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE:int = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
SCREAMING_SNAKE_CASE:str = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
SCREAMING_SNAKE_CASE:List[Any] = NotebookTrainingTracker(state.max_steps ,_a )
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:int = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 ,comment=F'''Epoch {epoch}/{state.num_train_epochs}''' ,force_update=self._force_next_update ,)
SCREAMING_SNAKE_CASE:Union[str, Any] = False
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict=None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if not has_length(_a ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE:Dict = self.training_tracker.add_child(len(_a ) )
else:
SCREAMING_SNAKE_CASE:Union[str, Any] = NotebookProgressBar(len(_a ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if self.prediction_bar is not None:
self.prediction_bar.close()
SCREAMING_SNAKE_CASE:Any = None
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str=None ,**SCREAMING_SNAKE_CASE__ : int ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
SCREAMING_SNAKE_CASE:Any = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
SCREAMING_SNAKE_CASE:Dict = state.global_step
self.training_tracker.write_line(_a )
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int=None ,**SCREAMING_SNAKE_CASE__ : int ):
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE:Dict = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history ):
if "loss" in log:
SCREAMING_SNAKE_CASE:Dict = log["loss"]
break
if self.first_column == "Epoch":
SCREAMING_SNAKE_CASE:Tuple = int(state.epoch )
else:
SCREAMING_SNAKE_CASE:Optional[int] = state.global_step
SCREAMING_SNAKE_CASE:Any = "eval"
for k in metrics:
if k.endswith("_loss" ):
SCREAMING_SNAKE_CASE:Tuple = re.sub(R"\_loss$" ,"" ,_a )
SCREAMING_SNAKE_CASE:int = metrics.pop("total_flos" ,_a )
SCREAMING_SNAKE_CASE:Any = metrics.pop("epoch" ,_a )
SCREAMING_SNAKE_CASE:Any = metrics.pop(F'''{metric_key_prefix}_runtime''' ,_a )
SCREAMING_SNAKE_CASE:Any = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' ,_a )
SCREAMING_SNAKE_CASE:List[Any] = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' ,_a )
SCREAMING_SNAKE_CASE:int = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' ,_a )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
SCREAMING_SNAKE_CASE:Optional[int] = v
else:
SCREAMING_SNAKE_CASE:List[str] = k.split("_" )
SCREAMING_SNAKE_CASE:Tuple = " ".join([part.capitalize() for part in splits[1:]] )
SCREAMING_SNAKE_CASE:Any = v
self.training_tracker.write_line(_a )
self.training_tracker.remove_child()
SCREAMING_SNAKE_CASE:List[Any] = None
# Evaluation takes a long time so we should force the next update.
SCREAMING_SNAKE_CASE:Union[str, Any] = True
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : int ):
self.training_tracker.update(
state.global_step ,comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' ,force_update=_a )
SCREAMING_SNAKE_CASE:Union[str, Any] = None
| 139 |
"""simple docstring"""
def __magic_name__ ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float:
lowercase : List[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __magic_name__ ( ) -> int:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCamelCase = 1024
lowerCamelCase = 4096
lowerCamelCase = 24
lowerCamelCase = 16
lowerCamelCase = [5, 11, 17, 23]
lowerCamelCase = [256, 512, 1024, 1024]
lowerCamelCase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase = 768
lowerCamelCase = [1, 1, 1, 0.5]
lowerCamelCase = [256, 512, 768, 768]
lowerCamelCase = 150
lowerCamelCase = 16
lowerCamelCase = (1, 384, 384)
lowerCamelCase = False
lowerCamelCase = """project"""
if "ade" in checkpoint_url:
lowerCamelCase = True
lowerCamelCase = 768
lowerCamelCase = [1, 1, 1, 0.5]
lowerCamelCase = 150
lowerCamelCase = 16
lowerCamelCase = """huggingface/label-files"""
lowerCamelCase = """ade20k-id2label.json"""
lowerCamelCase = json.load(open(cached_download(hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) ) , """r""" ) )
lowerCamelCase = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase = idalabel
lowerCamelCase = {v: k for k, v in idalabel.items()}
lowerCamelCase = [1, 150, 480, 480]
return config, expected_shape
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCamelCase = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCamelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCamelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCamelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
lowerCamelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCamelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCamelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCamelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCamelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCamelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCamelCase = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCamelCase = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCamelCase = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCamelCase = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCamelCase = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCamelCase = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase = in_proj_weight[: config.hidden_size, :]
lowerCamelCase = in_proj_bias[: config.hidden_size]
lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = get_dpt_config(lowerCamelCase__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase = torch.load(lowerCamelCase__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(lowerCamelCase__ )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase = state_dict.pop(lowerCamelCase__ )
lowerCamelCase = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# load HuggingFace model
lowerCamelCase = DPTForSemanticSegmentation(lowerCamelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# Check outputs on an image
lowerCamelCase = 480 if """ade""" in checkpoint_url else 384
lowerCamelCase = DPTImageProcessor(size=lowerCamelCase__ )
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors="""pt""" )
# forward pass
lowerCamelCase = model(**lowerCamelCase__ ).logits if """ade""" in checkpoint_url else model(**lowerCamelCase__ ).predicted_depth
if show_prediction:
lowerCamelCase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=lowerCamelCase__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 66 |
UpperCAmelCase : Tuple = "Tobias Carryer"
from time import time
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A , A=int(time() ) ) -> Optional[int]: # noqa: B008
'''simple docstring'''
lowerCamelCase = multiplier
lowerCamelCase = increment
lowerCamelCase = modulo
lowerCamelCase = seed
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
UpperCAmelCase : List[Any] = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 66 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ : str = get_tests_dir('fixtures')
lowerCAmelCase__ : List[str] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase__ : List[Any] = get_tests_dir('fixtures/dummy-config.json')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 0
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase__ = WavaVecaFeatureExtractor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCAmelCase__ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ ,'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained('bert-base' )
def __lowerCAmelCase ( self : List[Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ,revision='aaaaaa' )
def __lowerCAmelCase ( self : List[str] ):
with self.assertRaisesRegex(
lowerCamelCase__ ,'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' ,):
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def __lowerCAmelCase ( self : int ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=lowerCamelCase__ )
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
def __lowerCAmelCase ( self : str ):
try:
AutoConfig.register('custom' ,lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ ,lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoFeatureExtractor.register(lowerCamelCase__ ,lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase__ = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : Optional[int] ):
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = True
try:
AutoConfig.register('custom' ,lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ ,lowerCamelCase__ )
# If remote code is not set, the default is to use local
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowerCamelCase__ ,'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 98 | """simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self : List[str] ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : int ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def __lowerCAmelCase ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
def __lowerCAmelCase ( self : int ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 98 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
snake_case__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _snake_case , )
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : Optional[int] = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCAmelCase, lowerCAmelCase : int = image[0].size
lowerCAmelCase, lowerCAmelCase : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowerCAmelCase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
lowerCAmelCase : int = np.concatenate(_snake_case , axis=0 )
lowerCAmelCase : Optional[Any] = np.array(_snake_case ).astype(np.floataa ) / 255.0
lowerCAmelCase : List[Any] = image.transpose(0 , 3 , 1 , 2 )
lowerCAmelCase : List[str] = 2.0 * image - 1.0
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
elif isinstance(image[0] , torch.Tensor ):
lowerCAmelCase : Any = torch.cat(_snake_case , dim=0 )
return image
def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(_snake_case , torch.Tensor ):
return mask
elif isinstance(_snake_case , PIL.Image.Image ):
lowerCAmelCase : str = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
lowerCAmelCase, lowerCAmelCase : int = mask[0].size
lowerCAmelCase, lowerCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
lowerCAmelCase : Optional[int] = np.concatenate(_snake_case , axis=0 )
lowerCAmelCase : Dict = mask.astype(np.floataa ) / 255.0
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case )
elif isinstance(mask[0] , torch.Tensor ):
lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=0 )
return mask
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ):
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : int = 2_5_0 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : Optional[Any] = image
lowerCAmelCase : Tuple = _preprocess_image(UpperCamelCase_ )
lowerCAmelCase : int = original_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase : Optional[Any] = _preprocess_mask(UpperCamelCase_ )
lowerCAmelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype )
lowerCAmelCase : Union[str, Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : Union[str, Any] = original_image.shape
lowerCAmelCase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.device )
lowerCAmelCase : Optional[int] = eta
lowerCAmelCase : List[str] = self.scheduler.timesteps[0] + 1
lowerCAmelCase : List[str] = generator[0] if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowerCAmelCase : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# compute previous image: x_t -> x_t-1
lowerCAmelCase : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowerCAmelCase : Optional[Any] = self.scheduler.undo_step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : List[Any] = t
lowerCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Tuple = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_( a__ ):
__UpperCamelCase = (DDPMScheduler,)
def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCamelCase_ )
return config
def lowerCamelCase__ ( self : Optional[int] ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def lowerCamelCase__ ( self : Tuple ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = self.scheduler_classes[0]
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ )
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : Union[str, Any] = pred_prev_sample
lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Dict = len(UpperCamelCase_ )
lowerCAmelCase : Any = self.dummy_model()
lowerCAmelCase : Any = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : List[Any] = pred_prev_sample
lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Dict = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
lowerCAmelCase : Dict = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase_ ):
if i == len(UpperCamelCase_ ) - 1:
lowerCAmelCase : List[Any] = -1
else:
lowerCAmelCase : Union[str, Any] = timesteps[i + 1]
lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ )
lowerCAmelCase : Dict = prev_t.item()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config()
lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
lowerCAmelCase : int = len(UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
| 314 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''deberta-v2'''
def __init__( self : Any , _A : Tuple=12_8100 , _A : int=1536 , _A : int=24 , _A : List[Any]=24 , _A : Any=6144 , _A : List[Any]="gelu" , _A : List[str]=0.1 , _A : Any=0.1 , _A : Tuple=512 , _A : Optional[int]=0 , _A : List[Any]=0.02 , _A : Optional[int]=1e-7 , _A : Tuple=False , _A : Optional[Any]=-1 , _A : Union[str, Any]=0 , _A : Union[str, Any]=True , _A : List[Any]=None , _A : Optional[int]=0 , _A : Optional[int]="gelu" , **_A : List[str] , ):
"""simple docstring"""
super().__init__(**_A )
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Any = type_vocab_size
__SCREAMING_SNAKE_CASE : Any = initializer_range
__SCREAMING_SNAKE_CASE : str = relative_attention
__SCREAMING_SNAKE_CASE : Tuple = max_relative_positions
__SCREAMING_SNAKE_CASE : List[str] = pad_token_id
__SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(_A ) == str:
__SCREAMING_SNAKE_CASE : List[str] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
__SCREAMING_SNAKE_CASE : Optional[Any] = pos_att_type
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Any = kwargs.get('''pooler_hidden_size''' , _A )
__SCREAMING_SNAKE_CASE : Tuple = pooler_dropout
__SCREAMING_SNAKE_CASE : int = pooler_hidden_act
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return 12
def UpperCAmelCase__ ( self : List[Any] , _A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _A : int = -1 , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional["TensorType"] = None , _A : int = 3 , _A : int = 40 , _A : int = 40 , _A : "PreTrainedTokenizerBase" = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = super().generate_dummy_inputs(preprocessor=_A , framework=_A )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 303 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowercase_ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( snake_case ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : int = False
elif args.student_type == "gpt2":
__SCREAMING_SNAKE_CASE : Optional[int] = False
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : Dict = False
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sanity_checks(snake_case )
# ARGS #
init_gpu_params(snake_case )
set_seed(snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case ) , snake_case , indent=4 )
git_log(args.dump_path )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__SCREAMING_SNAKE_CASE : Any = tokenizer.all_special_tokens.index(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__SCREAMING_SNAKE_CASE : Any = special_tok_ids
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : List[str] = pickle.load(snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = np.maximum(snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__SCREAMING_SNAKE_CASE : Any = 0.0 # do not predict special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = LmSeqsDataset(params=snake_case , data=snake_case )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config )
__SCREAMING_SNAKE_CASE : Dict = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case )
else:
__SCREAMING_SNAKE_CASE : str = student_model_class(snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__SCREAMING_SNAKE_CASE : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case , snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case , snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : int = Distiller(
params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 303 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Dict = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = ["PoolFormerFeatureExtractor"]
_SCREAMING_SNAKE_CASE : str = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 92 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class _snake_case ( logging.LoggerAdapter ):
@staticmethod
def lowerCAmelCase__ ( a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowerCAmelCase__ ( self , a__ , a__ , *a__ , **a__ ) -> List[Any]:
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
snake_case_ = kwargs.pop("main_process_only" , a__ )
snake_case_ = kwargs.pop("in_order" , a__ )
if self.isEnabledFor(a__ ):
if self._should_log(a__ ):
snake_case_ , snake_case_ = self.process(a__ , a__ )
self.logger.log(a__ , a__ , *a__ , **a__ )
elif in_order:
snake_case_ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
snake_case_ , snake_case_ = self.process(a__ , a__ )
self.logger.log(a__ , a__ , *a__ , **a__ )
state.wait_for_everyone()
def UpperCamelCase_( snake_case : str , snake_case : str = None ):
'''simple docstring'''
if log_level is None:
snake_case_ = os.environ.get("ACCELERATE_LOG_LEVEL" , snake_case )
snake_case_ = logging.getLogger(snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case , {} )
| 92 | 1 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 283 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_snake_case = 2_99_79_24_58
# Symbols
_snake_case , _snake_case , _snake_case , _snake_case = symbols('''ct x y z''')
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return np.array(
[
[gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0],
[-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
if event is None:
lowerCamelCase : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_snake_case = transform(29_97_92_45)
print('''Example of four vector: ''')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
_snake_case = {ct: c, x: 1, y: 1, z: 1}
_snake_case = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 283 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: str = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[Any] = 'vit_msn'
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-06 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> Dict:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : List[Any] = layer_norm_eps
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Optional[int] = patch_size
UpperCAmelCase : str = num_channels
UpperCAmelCase : Union[str, Any] = qkv_bias
| 76 |
"""simple docstring"""
import math
import sys
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Dict = """"""
try:
with open(UpperCamelCase , """rb""" ) as binary_file:
UpperCAmelCase : str = binary_file.read()
for dat in data:
UpperCAmelCase : List[Any] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Optional[int] = {"""0""": """0""", """1""": """1"""}
UpperCAmelCase , UpperCAmelCase : Optional[int] = """""", """"""
UpperCAmelCase : int = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase : Any = lexicon[curr_string]
result += last_match_id
UpperCAmelCase : Any = last_match_id + """0"""
if math.loga(UpperCamelCase ).is_integer():
UpperCAmelCase : Optional[Any] = {}
for curr_key in list(UpperCamelCase ):
UpperCAmelCase : Dict = lexicon.pop(UpperCamelCase )
UpperCAmelCase : int = new_lex
UpperCAmelCase : int = last_match_id + """1"""
index += 1
UpperCAmelCase : List[str] = """"""
return result
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : Dict = 8
try:
with open(UpperCamelCase , """wb""" ) as opened_file:
UpperCAmelCase : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Any = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCAmelCase : List[str] = data_bits[counter:]
UpperCAmelCase : Tuple = data_bits[counter + 1 :]
return data_bits
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : int = read_file_binary(UpperCamelCase )
UpperCAmelCase : str = remove_prefix(UpperCamelCase )
UpperCAmelCase : Any = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 76 | 1 |
"""simple docstring"""
class lowerCAmelCase__ : # Public class to implement a graph
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : list[list[bool]]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = row
SCREAMING_SNAKE_CASE_ : Tuple = col
SCREAMING_SNAKE_CASE_ : Union[str, Any] = graph
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : list[list[bool]]):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : list[list[bool]]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
SCREAMING_SNAKE_CASE_ : Tuple = [-1, 0, 1, -1, 1, -1, 0, 1]
SCREAMING_SNAKE_CASE_ : Tuple = True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowercase_):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int): # And finally, count all islands.
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = [[False for j in range(self.COL)] for i in range(self.ROW)]
SCREAMING_SNAKE_CASE_ : Dict = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowercase_ , lowercase_ , lowercase_)
count += 1
return count
| 91 |
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : int = 0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : int = 0):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE_ : List[str] = ''''''
for ch in content:
ans += chr(ord(lowercase_) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : int = 0):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE_ : List[Any] = ''''''
for ch in content:
ans += chr(ord(lowercase_) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int = 0):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
try:
with open(lowercase_) as fin, open('''encrypt.out''' , '''w+''') as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowercase_ , lowercase_))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_)
try:
with open(lowercase_) as fin, open('''decrypt.out''' , '''w+''') as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowercase_ , lowercase_))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 91 | 1 |
import numpy as np
from PIL import Image
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = np.array(__lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__a = 0
__a = 0
__a = 0
__a = 0
# compute the shape of the output matrix
__a = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a = 0
__a = 0
return updated_arr
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = np.array(__lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__a = 0
__a = 0
__a = 0
__a = 0
# compute the shape of the output matrix
__a = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a = 0
__a = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
lowerCamelCase_ : Any = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 197 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[int] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 | 1 |
lowercase = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
lowercase = ["a", "b", "c", "d", "e"]
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = start
# add current to visited
visited.append(a_)
snake_case_ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case_ = topological_sort(a_ , a_ , a_)
# if all neighbors visited add current to sort
sort.append(a_)
# if all vertices haven't been visited select a new one to visit
if len(a_) != len(a_):
for vertice in vertices:
if vertice not in visited:
snake_case_ = topological_sort(a_ , a_ , a_)
# return sort
return sort
if __name__ == "__main__":
lowercase = topological_sort("a", [], [])
print(sort)
| 178 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=4 , a="gelu" , a=0.0 , a=0.1 , a=True , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_multiple_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = weight_tying
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self ) -> Dict:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self , a , a , a ) -> Any:
snake_case_ = GPTNeoXJapaneseModel(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
snake_case_ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a ) -> Union[str, Any]:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseModel(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a , a ) -> int:
snake_case_ = GPTNeoXJapaneseForCausalLM(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a , a , a ) -> Tuple:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
snake_case_ = model(a , attention_mask=a , use_cache=a )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(a , attention_mask=a , output_hidden_states=a )
snake_case_ = output_from_no_past['hidden_states'][0]
snake_case_ = model(
a , attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCAmelCase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = GPTNeoXJapaneseModelTester(self )
snake_case_ = ConfigTester(self , config_class=a , hidden_size=37 )
def _UpperCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a )
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ = None
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a )
@slow
def _UpperCamelCase ( self ) -> Any:
snake_case_ = 'abeja/gpt-neox-japanese-2.7b'
snake_case_ = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
snake_case_ = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
snake_case_ = GPTNeoXJapaneseTokenizer.from_pretrained(a )
snake_case_ = GPTNeoXJapaneseForCausalLM.from_pretrained(a )
snake_case_ = []
for prompt in prompts:
snake_case_ = tokenizer(a , return_tensors='pt' ).input_ids
snake_case_ = model.generate(a , max_length=50 )
snake_case_ = tokenizer.batch_decode(a , skip_special_tokens=a )
predicted_outputs += generated_string
self.assertListEqual(a , a )
| 178 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _a ( unittest.TestCase):
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=7 , _SCREAMING_SNAKE_CASE : Tuple=3 , _SCREAMING_SNAKE_CASE : Any=18 , _SCREAMING_SNAKE_CASE : str=30 , _SCREAMING_SNAKE_CASE : List[str]=400 , _SCREAMING_SNAKE_CASE : List[Any]=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : List[str]=True , )-> Any:
lowerCAmelCase__ : int = size if size is not None else {'height': 18, 'width': 18}
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : Optional[int] = num_channels
lowerCAmelCase__ : Dict = image_size
lowerCAmelCase__ : int = min_resolution
lowerCAmelCase__ : Tuple = max_resolution
lowerCAmelCase__ : List[str] = do_resize
lowerCAmelCase__ : Dict = size
lowerCAmelCase__ : Dict = do_normalize
def UpperCAmelCase__( self : List[str] )-> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _a ( _lowercase , unittest.TestCase):
_a : Optional[int] = ImageGPTImageProcessor if is_vision_available() else None
def UpperCAmelCase__( self : Dict )-> Optional[Any]:
lowerCAmelCase__ : Dict = ImageGPTImageProcessingTester(self )
@property
def UpperCAmelCase__( self : str )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__( self : Any )-> Optional[int]:
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''clusters''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
def UpperCAmelCase__( self : List[Any] )-> Any:
lowerCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
lowerCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def UpperCAmelCase__( self : Optional[Any] )-> int:
lowerCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
lowerCAmelCase__ : Optional[int] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , obj[key] ) )
else:
self.assertEqual(obj[key] , _A )
def UpperCAmelCase__( self : Any )-> Optional[int]:
lowerCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : List[Any] = os.path.join(_A , '''image_processor.json''' )
image_processor_first.to_json_file(_A )
lowerCAmelCase__ : Optional[Any] = self.image_processing_class.from_json_file(_A ).to_dict()
lowerCAmelCase__ : Optional[int] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _A )
def UpperCAmelCase__( self : int )-> Tuple:
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_A )
lowerCAmelCase__ : Optional[int] = self.image_processing_class.from_pretrained(_A ).to_dict()
lowerCAmelCase__ : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _A )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def UpperCAmelCase__( self : Dict )-> int:
pass
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
lowerCAmelCase__ : List[str] = Image.open(dataset[4]['''file'''] )
lowerCAmelCase__ : List[Any] = Image.open(dataset[5]['''file'''] )
lowerCAmelCase__ : Any = [imagea, imagea]
return images
@require_vision
@require_torch
class _a ( unittest.TestCase):
@slow
def UpperCAmelCase__( self : Optional[Any] )-> Any:
lowerCAmelCase__ : Optional[int] = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
lowerCAmelCase__ : Union[str, Any] = prepare_images()
# test non-batched
lowerCAmelCase__ : List[Any] = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
lowerCAmelCase__ : int = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _A )
# test batched
lowerCAmelCase__ : List[str] = image_processing(_A , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
lowerCAmelCase__ : Any = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _A )
| 357 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''BeitFeatureExtractor''']
lowerCamelCase = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 211 | 0 |
def A ( ) -> Union[str, Any]:
'''simple docstring'''
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = 2
while i * i <= n:
_UpperCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def A ( ) -> Tuple:
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(_UpperCAmelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 339 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "conditional_detr"
__lowerCAmelCase = ["past_key_values"]
__lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=300 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.02 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=2 , __A=5 , __A=2 , __A=1 , __A=1 , __A=2 , __A=5 , __A=2 , __A=0.25 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
a =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__A , __A ):
a =backbone_config.get('''model_type''' )
a =CONFIG_MAPPING[backbone_model_type]
a =config_class.from_dict(__A )
a =use_timm_backbone
a =backbone_config
a =num_channels
a =num_queries
a =d_model
a =encoder_ffn_dim
a =encoder_layers
a =encoder_attention_heads
a =decoder_ffn_dim
a =decoder_layers
a =decoder_attention_heads
a =dropout
a =attention_dropout
a =activation_dropout
a =activation_function
a =init_std
a =init_xavier_std
a =encoder_layerdrop
a =decoder_layerdrop
a =encoder_layers
a =auxiliary_loss
a =position_embedding_type
a =backbone
a =use_pretrained_backbone
a =dilation
# Hungarian matcher
a =class_cost
a =bbox_cost
a =giou_cost
# Loss coefficients
a =mask_loss_coefficient
a =dice_loss_coefficient
a =cls_loss_coefficient
a =bbox_loss_coefficient
a =giou_loss_coefficient
a =focal_alpha
super().__init__(is_encoder_decoder=__A , **__A )
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
return self.d_model
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
a =self.backbone_config.to_dict()
a =self.__class__.model_type
return output
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
return 12 | 81 | 0 |
"""simple docstring"""
from ... import PretrainedConfig
__A : Dict = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __UpperCamelCase ( _A ):
SCREAMING_SNAKE_CASE = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
SCREAMING_SNAKE_CASE = "nezha"
def __init__(self : Any , __SCREAMING_SNAKE_CASE : Optional[int]=2_1_1_2_8 , __SCREAMING_SNAKE_CASE : Dict=7_6_8 , __SCREAMING_SNAKE_CASE : Optional[int]=1_2 , __SCREAMING_SNAKE_CASE : Dict=1_2 , __SCREAMING_SNAKE_CASE : List[str]=3_0_7_2 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=5_1_2 , __SCREAMING_SNAKE_CASE : Optional[Any]=6_4 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Tuple=0.0_2 , __SCREAMING_SNAKE_CASE : Dict=1E-12 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Any=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : str=True , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = max_relative_position
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = classifier_dropout
A = use_cache
| 57 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
__A : Optional[int] = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
__A : Union[str, Any] = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = torch.load(lowercase__ , map_location="cpu" )
return sd
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__=rename_keys_prefix ):
"""simple docstring"""
A = OrderedDict()
A = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A = key
for name_pair in rename_keys_prefix:
A = new_key.replace(name_pair[0] , name_pair[1] )
A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
A = "pretraining"
if "vcr" in checkpoint_path:
A = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
A = {"visual_embedding_dim": 2_048}
elif "vqa" in checkpoint_path:
A = {"visual_embedding_dim": 2_048}
elif "nlvr" in checkpoint_path:
A = {"visual_embedding_dim": 1_024}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
A = {"visual_embedding_dim": 512}
A = "multichoice"
elif "vqa_advanced" in checkpoint_path:
A = {"visual_embedding_dim": 2_048}
A = "vqa_advanced"
elif "vqa" in checkpoint_path:
A = {"visual_embedding_dim": 2_048, "num_labels": 3_129}
A = "vqa"
elif "nlvr" in checkpoint_path:
A = {
"visual_embedding_dim": 1_024,
"num_labels": 2,
}
A = "nlvr"
A = VisualBertConfig(**lowercase__ )
# Load State Dict
A = load_state_dict(lowercase__ )
A = get_new_dict(lowercase__ , lowercase__ )
if model_type == "pretraining":
A = VisualBertForPreTraining(lowercase__ )
elif model_type == "vqa":
A = VisualBertForQuestionAnswering(lowercase__ )
elif model_type == "nlvr":
A = VisualBertForVisualReasoning(lowercase__ )
elif model_type == "multichoice":
A = VisualBertForMultipleChoice(lowercase__ )
model.load_state_dict(lowercase__ )
# Save Checkpoints
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
__A : Any = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 57 | 1 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , **__magic_name__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(features=__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase , **__lowercase )
snake_case_ : Tuple = Sql(
cache_dir=__lowercase , features=__lowercase , sql=__lowercase , con=__lowercase , **__lowercase , )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = None
snake_case_ : Optional[Any] = None
snake_case_ : Union[str, Any] = None
snake_case_ : int = None
self.builder.download_and_prepare(
download_config=__lowercase , download_mode=__lowercase , verification_mode=__lowercase , base_path=__lowercase , )
# Build dataset for splits
snake_case_ : List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=__lowercase , in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ) -> int:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
snake_case_ : Optional[int] = dataset
snake_case_ : int = name
snake_case_ : Optional[int] = con
snake_case_ : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
snake_case_ : List[Any] = num_proc
snake_case_ : str = to_sql_kwargs
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.to_sql_kwargs.pop('''sql''' , __lowercase )
snake_case_ : str = self.to_sql_kwargs.pop('''con''' , __lowercase )
snake_case_ : Dict = self.to_sql_kwargs.pop('''index''' , __lowercase )
snake_case_ : List[str] = self._write(index=__lowercase , **self.to_sql_kwargs )
return written
def lowerCamelCase (self , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : Optional[int] = args
snake_case_ : Optional[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
snake_case_ : Tuple = query_table(
table=self.dataset.data , key=slice(__lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
snake_case_ : List[Any] = batch.to_pandas()
snake_case_ : int = df.to_sql(self.name , self.con , index=__lowercase , **__lowercase )
return num_rows or len(__lowercase )
def lowerCamelCase (self , __magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
snake_case_ , snake_case_ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __lowercase , __lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 279 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
UpperCAmelCase = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCAmelCase ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = RealmTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : int=None , __lowercase : List[Any]=True , __lowercase : Any="[UNK]" , __lowercase : Union[str, Any]="[SEP]" , __lowercase : Union[str, Any]="[PAD]" , __lowercase : Tuple="[CLS]" , __lowercase : List[Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : Union[str, Any]=None , **__lowercase : int , ):
"""simple docstring"""
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
__lowercase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , __lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __lowercase ) != tokenize_chinese_chars
):
__lowercase =getattr(__lowercase , normalizer_state.pop('type' ) )
__lowercase =do_lower_case
__lowercase =strip_accents
__lowercase =tokenize_chinese_chars
__lowercase =normalizer_class(**__lowercase )
__lowercase =do_lower_case
def snake_case ( self : List[str] , __lowercase : Optional[Any] , **__lowercase : Any ):
"""simple docstring"""
__lowercase =PaddingStrategy.MAX_LENGTH
__lowercase =text
__lowercase =kwargs.pop('text_pair' , __lowercase )
__lowercase =kwargs.pop('return_tensors' , __lowercase )
__lowercase ={
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(__lowercase ):
if batch_text_pair is not None:
__lowercase =batch_text_pair[idx]
else:
__lowercase =None
__lowercase =super().__call__(__lowercase , __lowercase , return_tensors=__lowercase , **__lowercase )
__lowercase =encoded_candidates.get('input_ids' )
__lowercase =encoded_candidates.get('attention_mask' )
__lowercase =encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__lowercase )
__lowercase ={key: item for key, item in output_data.items() if len(__lowercase ) != 0}
return BatchEncoding(__lowercase , tensor_type=__lowercase )
def snake_case ( self : List[str] , __lowercase : Tuple , __lowercase : Optional[int]=None ):
"""simple docstring"""
__lowercase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self : List[str] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
"""simple docstring"""
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self : Dict , __lowercase : str , __lowercase : Optional[str] = None ):
"""simple docstring"""
__lowercase =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 141 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase (_A ):
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_A ):
return ext
raise Exception(
f'Unable to determine file format from file extension {path}. '
f'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_lowerCAmelCase : List[Any] = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
_lowerCAmelCase : Dict = PipelineDataFormat.from_str(
format=_A , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_A , _A )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = nlp
_lowerCAmelCase : List[str] = reader
@staticmethod
def a ( snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=snake_case__ , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=snake_case__ , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=snake_case__ , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=snake_case__ , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=snake_case__ , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=snake_case__ , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=snake_case__ , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=snake_case__ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self._nlp, []
for entry in self._reader:
_lowerCAmelCase : Optional[int] = nlp(**snake_case__ ) if self._reader.is_multi_columns else nlp(snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
outputs.append(snake_case__ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_lowerCAmelCase : str = self._reader.save_binary(snake_case__ )
logger.warning(F'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(snake_case__ )
| 25 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mvp"
__magic_name__ = ["past_key_values"]
__magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Any = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = decoder_layers
_lowerCAmelCase : int = decoder_attention_heads
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : Union[str, Any] = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : List[Any] = use_cache
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Optional[Any] = use_prompt
_lowerCAmelCase : Optional[Any] = prompt_length
_lowerCAmelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ):
_lowerCAmelCase : Any = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 25 | 1 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase: Optional[int] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _lowercase ( datasets.BuilderConfig ):
"""simple docstring"""
__A = None
def a( A : Dict , A : Any , ) -> List[str]:
"""simple docstring"""
import pyspark
def generate_fn():
a = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
a = df_with_partition_id.select("*" ).where(f'''part_id = {partition_id}''' ).drop("part_id" )
a = partition_df.collect()
a = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _lowercase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=None , ):
"""simple docstring"""
a = df
a = partition_order or range(self.df.rdd.getNumPartitions() )
a = _generate_iterable_examples(self.df , self.partition_order )
def __iter__(self ):
"""simple docstring"""
yield from self.generate_examples_fn()
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase_ )
return SparkExamplesIterable(self.df , partition_order=lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.split_shard_indices_by_worker(lowerCamelCase_ , lowerCamelCase_ )
return SparkExamplesIterable(self.df , partition_order=lowerCamelCase_ )
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return len(self.partition_order )
class _lowercase ( datasets.DatasetBuilder ):
"""simple docstring"""
__A = SparkConfig
def __init__(self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ):
"""simple docstring"""
import pyspark
a = pyspark.sql.SparkSession.builder.getOrCreate()
a = df
a = working_dir
super().__init__(
cache_dir=lowerCamelCase_ , config_name=str(self.df.semanticHash() ) , **lowerCamelCase_ , )
def UpperCamelCase_ (self ):
"""simple docstring"""
def create_cache_and_write_probe(lowerCamelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCamelCase_ )
a = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
a = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCamelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def UpperCamelCase_ (self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(lowerCamelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
a = self.df.count()
a = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
a = (
self.df.limit(lowerCamelCase_ )
.repartition(1 )
.mapInArrow(lowerCamelCase_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
a = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
a = min(lowerCamelCase_ , int(approx_total_size / max_shard_size ) )
a = self.df.repartition(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
import pyspark
a = ParquetWriter if file_format == """parquet""" else ArrowWriter
a = os.path.join(self._working_dir , os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath
a = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
a = self.config.features
a = self._writer_batch_size
a = self._fs.storage_options
def write_arrow(lowerCamelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
a = pyspark.TaskContext().taskAttemptId()
a = next(lowerCamelCase_ , lowerCamelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
a = 0
a = writer_class(
features=lowerCamelCase_ , path=working_fpath.replace("SSSSS" , F'''{shard_id:05d}''' ).replace("TTTTT" , F'''{task_id:05d}''' ) , writer_batch_size=lowerCamelCase_ , storage_options=lowerCamelCase_ , embed_local_files=lowerCamelCase_ , )
a = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
a = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
a = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F'''{shard_id:05d}''' ).replace("TTTTT" , F'''{task_id:05d}''' ) , writer_batch_size=lowerCamelCase_ , storage_options=lowerCamelCase_ , embed_local_files=lowerCamelCase_ , )
a = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase_ )
if writer._num_bytes > 0:
a = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ):
a = os.path.join(os.path.dirname(lowerCamelCase_ ) , os.path.basename(lowerCamelCase_ ) )
shutil.move(lowerCamelCase_ , lowerCamelCase_ )
a = (
self.df.mapInArrow(lowerCamelCase_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = "arrow" , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ):
"""simple docstring"""
self._validate_cache_dir()
a = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase_ )
a = not is_remote_filesystem(self._fs )
a = os.path.join if is_local else posixpath.join
a = """-TTTTT-SSSSS-of-NNNNN"""
a = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
a = path_join(self._output_dir , lowerCamelCase_ )
a = 0
a = 0
a = 0
a = []
a = []
for task_id, content in self._prepare_split_single(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
(
a
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase_ )
a = total_num_examples
a = total_num_bytes
# should rename everything at the end
logger.debug(F'''Renaming {total_shards} shards.''' )
if total_shards > 1:
a = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
a = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
rename(
lowerCamelCase_ , fpath.replace("SSSSS" , F'''{shard_id:05d}''' ).replace("TTTTT" , F'''{task_id:05d}''' ) , fpath.replace("TTTTT-SSSSS" , F'''{global_shard_id:05d}''' ).replace("NNNNN" , F'''{total_shards:05d}''' ) , )
a = []
a = 0
for i in range(len(lowerCamelCase_ ) ):
a = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase_ , len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect()
else:
# don't use any pattern
a = 0
a = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F'''{shard_id:05d}''' ).replace("TTTTT" , F'''{task_id:05d}''' ) , fpath.replace(lowerCamelCase_ , "" ) , )
def UpperCamelCase_ (self , lowerCamelCase_ , ):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 227 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = StableDiffusionDiffEditPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase = frozenset([] )
def _lowercase( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , )
UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , )
UpperCAmelCase : List[Any] = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , )
torch.manual_seed(0 )
UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
UpperCAmelCase : Optional[Any] = CLIPTextModel(A )
UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowercase( self , A , A=0 ) -> Optional[Any]:
UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : List[Any] = torch.manual_seed(A )
else:
UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : int = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self , A , A=0 ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : Any = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self , A , A=0 ) -> str:
UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : str = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self ) -> List[Any]:
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A , A , A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase : Any = self.get_dummy_inputs(A )
UpperCAmelCase : Optional[Any] = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A )
UpperCAmelCase : Tuple = pipe_loaded(**A )[0]
UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max()
self.assertLess(A , 1e-4 )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[int] = """cpu"""
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : List[str] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A )
UpperCAmelCase : List[Any] = pipe.generate_mask(**A )
UpperCAmelCase : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase : Optional[int] = np.array([0] * 9 )
UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = """cpu"""
UpperCAmelCase : List[str] = self.get_dummy_components()
UpperCAmelCase : Optional[Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A )
UpperCAmelCase : List[str] = pipe.invert(**A ).images
UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase : Dict = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
def _lowercase( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def _lowercase( self ) -> int:
UpperCAmelCase : List[Any] = """cpu"""
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""}
UpperCAmelCase : int = DPMSolverMultistepScheduler(**A )
UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A )
UpperCAmelCase : List[str] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A )
UpperCAmelCase : Any = pipe.invert(**A ).images
UpperCAmelCase : Dict = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase : Any = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _lowercase( cls ) -> Dict:
UpperCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) )
UpperCAmelCase : List[str] = raw_image
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Dict = torch.manual_seed(0 )
UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa )
UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = """a bowl of fruit"""
UpperCAmelCase : List[Any] = """a bowl of pears"""
UpperCAmelCase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , )
UpperCAmelCase : Tuple = pipe.invert(
prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents
UpperCAmelCase : Any = pipe(
prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : List[str] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa )
UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : int = """a bowl of fruit"""
UpperCAmelCase : int = """a bowl of pears"""
UpperCAmelCase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , )
UpperCAmelCase : Any = pipe.invert(
prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents
UpperCAmelCase : str = pipe(
prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : Tuple = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 265 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
lowerCamelCase_ : Optional[Any] = """gptj"""
lowerCamelCase_ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__(self , __magic_name__=5_0400 , __magic_name__=2048 , __magic_name__=4096 , __magic_name__=28 , __magic_name__=16 , __magic_name__=64 , __magic_name__=None , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0256 , __magic_name__=5_0256 , __magic_name__=False , **__magic_name__ , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = vocab_size
snake_case_ : Optional[int] = n_positions
snake_case_ : Tuple = n_embd
snake_case_ : int = n_layer
snake_case_ : Any = n_head
snake_case_ : Any = n_inner
snake_case_ : Dict = rotary_dim
snake_case_ : Tuple = activation_function
snake_case_ : Optional[Any] = resid_pdrop
snake_case_ : Any = embd_pdrop
snake_case_ : List[str] = attn_pdrop
snake_case_ : str = layer_norm_epsilon
snake_case_ : List[Any] = initializer_range
snake_case_ : Dict = use_cache
snake_case_ : List[Any] = bos_token_id
snake_case_ : Tuple = eos_token_id
super().__init__(
bos_token_id=__lowercase , eos_token_id=__lowercase , tie_word_embeddings=__lowercase , **__lowercase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
def __init__(self , __magic_name__ , __magic_name__ = "default" , __magic_name__ = None , __magic_name__ = False , ) -> Any:
'''simple docstring'''
super().__init__(__lowercase , task=__lowercase , patching_specs=__lowercase , use_past=__lowercase )
if not getattr(self._config , '''pad_token_id''' , __lowercase ):
# TODO: how to do that better?
snake_case_ : Tuple = 0
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case_ : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''' )
snake_case_ : str = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
snake_case_ : Any = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return self._config.n_head
def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = super(__lowercase , self ).generate_dummy_inputs(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
# We need to order the input in the way they appears in the forward()
snake_case_ : int = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case_ : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case_ : List[str] = seqlen + 2
snake_case_ : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case_ : Tuple = [
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(self.num_layers )
]
snake_case_ : Tuple = common_inputs['''attention_mask''']
if self.use_past:
snake_case_ : Tuple = ordered_inputs['''attention_mask'''].dtype
snake_case_ : Optional[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return 13
| 367 |
from math import isclose, sqrt
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, float, float]:
"""simple docstring"""
snake_case_ : Dict = point_y / 4 / point_x
snake_case_ : List[str] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
snake_case_ : Union[str, Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
snake_case_ : Tuple = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
snake_case_ : Union[str, Any] = outgoing_gradient**2 + 4
snake_case_ : Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
snake_case_ : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
snake_case_ : Dict = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
snake_case_ : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
snake_case_ : Any = x_minus if isclose(_UpperCamelCase , _UpperCamelCase ) else x_plus
snake_case_ : int = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCamelCase_ ( _UpperCamelCase = 1.4 , _UpperCamelCase = -9.6 ) -> int:
"""simple docstring"""
snake_case_ : int = 0
snake_case_ : float = first_x_coord
snake_case_ : float = first_y_coord
snake_case_ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
snake_case_ , snake_case_ , snake_case_ : str = next_point(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 279 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
_validate_point(UpperCamelCase_ )
_validate_point(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ) ) )
def _lowerCAmelCase ( UpperCamelCase_ ):
if point:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for item in point:
if not isinstance(UpperCamelCase_ , (int, float) ):
__SCREAMING_SNAKE_CASE = (
"""Expected a list of numbers as input, found """
f"{type(UpperCamelCase_ ).__name__}"
)
raise TypeError(UpperCamelCase_ )
else:
__SCREAMING_SNAKE_CASE = f"Expected a list of numbers as input, found {type(UpperCamelCase_ ).__name__}"
raise TypeError(UpperCamelCase_ )
else:
raise ValueError("""Missing an input""" )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
_validate_point(UpperCamelCase_ )
_validate_point(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase_ , UpperCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 | """simple docstring"""
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[str]:
lowercase__: str = []
lowercase__: str = 1_1
lowercase__: str = int('''1''' + '''0''' * digit_len )
for num in range(__UpperCAmelCase , __UpperCAmelCase ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(__UpperCAmelCase , __UpperCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
lowercase__: Dict = 1_0
return solutions
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 2 ) -> int:
lowercase__: List[str] = 1.0
for fraction in fraction_list(__UpperCAmelCase ):
lowercase__: List[str] = Fraction(__UpperCAmelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 177 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class a ( a__ ):
snake_case__ = '''MCTCTFeatureExtractor'''
snake_case__ = '''AutoTokenizer'''
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case )
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
def __call__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCAmelCase = kwargs.pop('raw_speech' )
else:
lowerCAmelCase = kwargs.pop('audio' , _snake_case )
lowerCAmelCase = kwargs.pop('sampling_rate' , _snake_case )
lowerCAmelCase = kwargs.pop('text' , _snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCAmelCase = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case )
if text is not None:
lowerCAmelCase = self.tokenizer(_snake_case , **_snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase = encodings['input_ids']
return inputs
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*_snake_case , **_snake_case )
lowerCAmelCase = kwargs.pop('input_features' , _snake_case )
lowerCAmelCase = kwargs.pop('labels' , _snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if input_features is not None:
lowerCAmelCase = self.feature_extractor.pad(_snake_case , *_snake_case , **_snake_case )
if labels is not None:
lowerCAmelCase = self.tokenizer.pad(_snake_case , **_snake_case )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCAmelCase = labels['input_ids']
return input_features
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@contextmanager
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer
yield
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
| 309 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = ''
lowerCAmelCase = ''
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 2_56
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = cva.imread(_snake_case , 0 )
lowerCAmelCase = copy.deepcopy(self.img )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
lowerCAmelCase = np.sum(_snake_case )
for i in range(len(_snake_case ) ):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last )
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_snake_case )
lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__UpperCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'trocr'
__magic_name__ = ['past_key_values']
__magic_name__ = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , __snake_case=5_0_2_6_5 , __snake_case=1_0_2_4 , __snake_case=1_2 , __snake_case=1_6 , __snake_case=4_0_9_6 , __snake_case="gelu" , __snake_case=5_1_2 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=2 , __snake_case=0.02 , __snake_case=0.0 , __snake_case=True , __snake_case=False , __snake_case=True , __snake_case=True , __snake_case=1 , __snake_case=0 , __snake_case=2 , **__snake_case , ):
snake_case = vocab_size
snake_case = d_model
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = activation_function
snake_case = max_position_embeddings
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = init_std
snake_case = decoder_layerdrop
snake_case = use_cache
snake_case = scale_embedding
snake_case = use_learned_position_embeddings
snake_case = layernorm_embedding
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , **__snake_case , )
| 127 |
def UpperCAmelCase__ (UpperCamelCase_ = 4_00_00_00 ):
"""simple docstring"""
snake_case = [0, 1]
snake_case = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
snake_case = 0
for j in range(len(UpperCamelCase_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 127 | 1 |
from __future__ import annotations
__UpperCamelCase : List[str] = tuple[int, int, int]
__UpperCamelCase : Optional[Any] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__UpperCamelCase : Optional[Any] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
__UpperCamelCase : int = "EGZWVONAHDCLFQMSIPJBYUKXTR"
__UpperCamelCase : List[Any] = "FOBHMDKEXQNRAULPGSJVTYICZW"
__UpperCamelCase : Dict = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
__UpperCamelCase : str = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
__UpperCamelCase : Optional[int] = "RMDJXFUWGISLHVTCQNKYPBEZOA"
__UpperCamelCase : Dict = "SGLCPQWZHKXAREONTFBVIYJUDM"
__UpperCamelCase : Optional[Any] = "HVSICLTYKQUBXDWAJZOMFGPREN"
__UpperCamelCase : List[Any] = "RZWQHFMVDBKICJLNTUXAGYPSOE"
__UpperCamelCase : Optional[Any] = "LFKIJODBEGAMQPXVUHYSTCZRWN"
__UpperCamelCase : Optional[Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(__lowerCamelCase ) )) < 3:
a = f'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(__lowerCamelCase )
# Checks if rotor positions are valid
a , a , a = rotpos
if not 0 < rotorposa <= len(__lowerCamelCase ):
a = f'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(__lowerCamelCase )
if not 0 < rotorposa <= len(__lowerCamelCase ):
a = f'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(__lowerCamelCase )
if not 0 < rotorposa <= len(__lowerCamelCase ):
a = f'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(__lowerCamelCase )
# Validates string and returns dict
a = _plugboard(__lowerCamelCase )
return rotpos, rotsel, pbdict
def __A ( __lowerCamelCase ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
a = f'Plugboard setting isn\'t type string ({type(__lowerCamelCase )})'
raise TypeError(__lowerCamelCase )
elif len(__lowerCamelCase ) % 2 != 0:
a = f'Odd number of symbols ({len(__lowerCamelCase )})'
raise Exception(__lowerCamelCase )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
a = set()
for i in pbstring:
if i not in abc:
a = f'\'{i}\' not in list of symbols'
raise Exception(__lowerCamelCase )
elif i in tmppbl:
a = f'Duplicate symbol ({i})'
raise Exception(__lowerCamelCase )
else:
tmppbl.add(__lowerCamelCase )
del tmppbl
# Created the dictionary
a = {}
for j in range(0 , len(__lowerCamelCase ) - 1 , 2 ):
a = pbstring[j + 1]
a = pbstring[j]
return pb
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = (rotora, rotora, rotora) , __lowerCamelCase = "" , ) -> str:
a = text.upper()
a , a , a = _validator(
__lowerCamelCase , __lowerCamelCase , plugb.upper() )
a , a , a = rotor_position
a , a , a = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
a = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
a = plugboard[symbol]
# rotor ra --------------------------
a = abc.index(__lowerCamelCase ) + rotorposa
a = rotora[index % len(__lowerCamelCase )]
# rotor rb --------------------------
a = abc.index(__lowerCamelCase ) + rotorposa
a = rotora[index % len(__lowerCamelCase )]
# rotor rc --------------------------
a = abc.index(__lowerCamelCase ) + rotorposa
a = rotora[index % len(__lowerCamelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
a = reflector[symbol]
# 2nd rotors
a = abc[rotora.index(__lowerCamelCase ) - rotorposa]
a = abc[rotora.index(__lowerCamelCase ) - rotorposa]
a = abc[rotora.index(__lowerCamelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
a = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(__lowerCamelCase ):
a = 0
rotorposa += 1
if rotorposa >= len(__lowerCamelCase ):
a = 0
rotorposa += 1
if rotorposa >= len(__lowerCamelCase ):
a = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = "This is my Python script that emulates the Enigma machine from WWII."
__UpperCamelCase : Union[str, Any] = (1, 1, 1)
__UpperCamelCase : List[Any] = "pictures"
__UpperCamelCase : Tuple = (rotora, rotora, rotora)
__UpperCamelCase : str = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 347 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = (IPNDMScheduler,)
UpperCamelCase__ = (('''num_inference_steps''', 50),)
def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
a = {"""num_train_timesteps""": 1000}
config.update(**__magic_name__ )
return config
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ):
scheduler.set_timesteps(__magic_name__ )
elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a = dummy_past_residuals[:]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.full_loop()
a = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 347 | 1 |
'''simple docstring'''
from math import pi
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 83 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model'}
_snake_case = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class UpperCamelCase ( snake_case_ ):
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None:
_a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
_a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
_a : Optional[Any] = 3
_a : Tuple = do_lower_case
_a : Tuple = remove_space
_a : Tuple = keep_accents
_a : Tuple = vocab_file
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_a : int = jieba
_a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowercase ( self : Optional[Any] ) -> Any:
return len(self.sp_model )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
_a : Tuple = self.__dict__.copy()
_a : Tuple = None
return state
def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict:
_a : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple = {}
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
if self.remove_space:
_a : Optional[int] = """ """.join(inputs.strip().split() )
else:
_a : List[Any] = inputs
_a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ )
_a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
_a : Union[str, Any] = outputs.lower()
return outputs
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
_a : str = self.preprocess_text(UpperCAmelCase__ )
_a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
_a : Union[str, Any] = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : Dict = cur_pieces[1:]
else:
_a : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int:
return self.sp_model.PieceToId(UpperCAmelCase__ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any:
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict:
_a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip()
return out_string
def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Optional[Any] = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Union[str, Any] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , """wb""" ) as fi:
_a : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]:
_a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 294 | 0 |
_A = "Alexander Joslin"
import operator as op
from .stack import Stack
def lowercase_ ( A__ ) -> int:
"""simple docstring"""
snake_case = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
snake_case = Stack()
snake_case = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(A__ ) )
elif i in operators:
# RULE 2
operator_stack.push(A__ )
elif i == ")":
# RULE 4
snake_case = operator_stack.peek()
operator_stack.pop()
snake_case = operand_stack.peek()
operand_stack.pop()
snake_case = operand_stack.peek()
operand_stack.pop()
snake_case = operators[opr](A__ , A__ )
operand_stack.push(A__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_A = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 137 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase(self : Tuple ) -> List[str]:
snake_case = ["a", "b", "c"]
# Defaults to last layer if both are None
snake_case , snake_case = get_aligned_output_features_output_indices(_A , _A , _A )
self.assertEqual(_A , ["c"] )
self.assertEqual(_A , [2] )
# Out indices set to match out features
snake_case , snake_case = get_aligned_output_features_output_indices(["a", "c"] , _A , _A )
self.assertEqual(_A , ["a", "c"] )
self.assertEqual(_A , [0, 2] )
# Out features set to match out indices
snake_case , snake_case = get_aligned_output_features_output_indices(_A , [0, 2] , _A )
self.assertEqual(_A , ["a", "c"] )
self.assertEqual(_A , [0, 2] )
# Out features selected from negative indices
snake_case , snake_case = get_aligned_output_features_output_indices(_A , [-3, -1] , _A )
self.assertEqual(_A , ["a", "c"] )
self.assertEqual(_A , [-3, -1] )
def UpperCAmelCase(self : Optional[int] ) -> str:
# Stage names must be set
with self.assertRaises(_A ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , _A )
# Out features must be a list
with self.assertRaises(_A ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(_A ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(_A ):
verify_out_features_out_indices(_A , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(_A ):
verify_out_features_out_indices(_A , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(_A ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(_A ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(_A ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def UpperCAmelCase(self : List[str] ) -> str:
snake_case = BackboneMixin()
snake_case = ["a", "b", "c"]
snake_case = ["a", "c"]
snake_case = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
snake_case = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
snake_case = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 137 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCamelCase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
snake_case : str = parent
snake_case : Optional[int] = 13
snake_case : Optional[Any] = 7
snake_case : Optional[int] = True
snake_case : str = True
snake_case : List[str] = True
snake_case : Optional[Any] = True
snake_case : Dict = 99
snake_case : str = 32
snake_case : List[Any] = 2
snake_case : Any = 4
snake_case : str = 37
snake_case : Dict = "gelu"
snake_case : Optional[Any] = 0.1
snake_case : List[str] = 0.1
snake_case : List[str] = 512
snake_case : str = 16
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 0.02
snake_case : Any = 3
snake_case : Dict = 4
snake_case : List[Any] = None
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Union[str, Any] = None
if self.use_input_mask:
snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Optional[int] = None
if self.use_token_type_ids:
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : List[str] = None
snake_case : List[Any] = None
snake_case : Dict = None
if self.use_labels:
snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Any = ids_tensor([self.batch_size] , self.num_choices )
snake_case : List[str] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Dict = TFRoFormerModel(config=SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case : str = [input_ids, input_mask]
snake_case : Any = model(SCREAMING_SNAKE_CASE )
snake_case : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[str] = True
snake_case : Tuple = TFRoFormerForCausalLM(config=SCREAMING_SNAKE_CASE )
snake_case : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case : List[str] = model(SCREAMING_SNAKE_CASE )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[str] = TFRoFormerForMaskedLM(config=SCREAMING_SNAKE_CASE )
snake_case : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Any = self.num_labels
snake_case : Dict = TFRoFormerForSequenceClassification(config=SCREAMING_SNAKE_CASE )
snake_case : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Tuple = self.num_choices
snake_case : List[str] = TFRoFormerForMultipleChoice(config=SCREAMING_SNAKE_CASE )
snake_case : int = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
snake_case : str = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
snake_case : int = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
snake_case : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : int = self.num_labels
snake_case : Any = TFRoFormerForTokenClassification(config=SCREAMING_SNAKE_CASE )
snake_case : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case : List[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[int] = TFRoFormerForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
snake_case : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case : Dict = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Any = config_and_inputs
snake_case : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
a__ : List[Any] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ : Union[str, Any] = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ : str = False
a__ : Dict = False
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = TFRoFormerModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[Any] = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
snake_case : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case : Dict = model(SCREAMING_SNAKE_CASE )[0]
# TODO Replace vocab size
snake_case : Dict = 50_000
snake_case : Optional[int] = [1, 6, vocab_size]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
snake_case : List[str] = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
a__ : Optional[int] = 1e-4
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = tf.constant([[4, 10]] )
snake_case : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
snake_case : Dict = emba(input_ids.shape )
snake_case : int = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=self.tolerance )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
snake_case : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
snake_case : Dict = emba.weight[:3, :5]
tf.debugging.assert_near(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=self.tolerance )
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
a__ : List[str] = 1e-4
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
snake_case : str = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
snake_case : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
snake_case : Any = embed_positions([2, 16, 768] )[None, None, :, :]
snake_case , snake_case : Optional[Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
snake_case : List[str] = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE , atol=self.tolerance )
| 148 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( lowerCamelCase_ , unittest.TestCase ):
a__ : Union[str, Any] = XLMRobertaTokenizer
a__ : Optional[int] = XLMRobertaTokenizerFast
a__ : List[str] = True
a__ : List[Any] = True
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : Any = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = "<pad>"
snake_case : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1_002 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case : Optional[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case : List[str] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
snake_case : Tuple = tempfile.mkdtemp()
snake_case : Tuple = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case : List[str] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
snake_case : Dict = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case : int = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Tuple = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case : Tuple = tempfile.mkdtemp()
snake_case : str = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE )
snake_case : str = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case : Dict = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Dict = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
shutil.rmtree(SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case : List[str] = tempfile.mkdtemp()
snake_case : List[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case : Optional[Any] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
shutil.rmtree(SCREAMING_SNAKE_CASE )
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE , f.name )
snake_case : int = XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE )
snake_case : Tuple = pickle.dumps(SCREAMING_SNAKE_CASE )
pickle.loads(SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : Dict = self.get_rust_tokenizer()
snake_case : Optional[Any] = "I was born in 92000, and this is falsé."
snake_case : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case : str = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
snake_case : str = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = self.get_rust_tokenizer()
snake_case : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE )
snake_case : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = "Hello World!"
snake_case : Optional[Any] = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
snake_case : Dict = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = {"input_ids": [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 148 | 1 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_UpperCAmelCase : Tuple = 25_6047
_UpperCAmelCase : Optional[Any] = 25_6145
@require_sentencepiece
@require_tokenizers
class lowercase ( __snake_case , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = NllbTokenizer
__SCREAMING_SNAKE_CASE : Any = NllbTokenizerFast
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Tuple = True
__SCREAMING_SNAKE_CASE : List[Any] = {}
def a ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = NllbTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
snake_case_ = NllbTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
snake_case_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
snake_case_ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def a ( self ):
snake_case_ = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
snake_case_ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(lowerCamelCase_ )
snake_case_ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
snake_case_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(lowerCamelCase_ )
snake_case_ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
snake_case_ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(lowerCamelCase_ )
snake_case_ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
snake_case_ = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(lowerCamelCase_ )
snake_case_ = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
def a ( self ):
if not self.test_seqaseq:
return
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Longer text that will definitely require truncation.
snake_case_ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
snake_case_ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
snake_case_ = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase_ , tgt_texts=lowerCamelCase_ , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
snake_case_ = tokenizer.prepare_seqaseq_batch(
lowerCamelCase_ , tgt_texts=lowerCamelCase_ , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
snake_case_ = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase_ , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , lowerCamelCase_ )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def a ( self ):
pass
def a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = [AddedToken('<special>' , lstrip=lowerCamelCase_ )]
snake_case_ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ )
snake_case_ = tokenizer_r.encode('Hey this is a <special> token' )
snake_case_ = tokenizer_r.encode('<special>' , add_special_tokens=lowerCamelCase_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
snake_case_ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
snake_case_ = self.tokenizer_class.from_pretrained(
lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ )
snake_case_ = tokenizer_p.encode('Hey this is a <special> token' )
snake_case_ = tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = "facebook/nllb-200-distilled-600M"
__SCREAMING_SNAKE_CASE : Tuple = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__SCREAMING_SNAKE_CASE : List[Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__SCREAMING_SNAKE_CASE : List[str] = [
256_047,
16_297,
134_408,
8_165,
248_066,
14_734,
950,
1_135,
105_721,
3_573,
83,
27_352,
108,
49_486,
2,
]
@classmethod
def a ( cls ):
snake_case_ = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
snake_case_ = 1
return cls
def a ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 25_6057 )
def a ( self ):
snake_case_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def a ( self ):
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
snake_case_ = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
snake_case_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def a ( self ):
snake_case_ = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
snake_case_ = 10
snake_case_ = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def a ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_6203, 3] )
def a ( self ):
snake_case_ = tempfile.mkdtemp()
snake_case_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
snake_case_ = NllbTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def a ( self ):
snake_case_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
snake_case_ = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
snake_case_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self ):
snake_case_ = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='pt' )
snake_case_ = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='pt' )
snake_case_ = targets["""input_ids"""]
snake_case_ = shift_tokens_right(
lowerCamelCase_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
snake_case_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# A, test, EOS, en_XX
'input_ids': [[25_6047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_6057,
} , )
@require_torch
def a ( self ):
snake_case_ = True
snake_case_ = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
snake_case_ = False
snake_case_ = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 365 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : Dict = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 200 | 0 |
class A__ :
"""simple docstring"""
def __init__( self , lowercase) -> None:
'''simple docstring'''
a__ : Optional[Any] = len(lowercase)
a__ : Tuple = [0] * len_array
if len_array > 0:
a__ : List[Any] = array[0]
for i in range(1 , lowercase):
a__ : List[str] = self.prefix_sum[i - 1] + array[i]
def __lowercase ( self , lowercase , lowercase) -> int:
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __lowercase ( self , lowercase) -> bool:
'''simple docstring'''
a__ : Union[str, Any] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase)
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def A_ ( A__ ) -> Tuple:
# A local function to see if a dot lands in the circle.
def is_in_circle(A__ , A__ ) -> bool:
a__ : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ : List[str] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(A__ ) )
# The ratio of the area for circle to square is pi/4.
a__ : Optional[Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def A_ ( A__ , A__ , A__ = 0.0 , A__ = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(A__ , A__ ) ) for _ in range(A__ ) ) * (max_value - min_value)
def A_ ( A__ , A__ = 0.0 , A__ = 1.0 ) -> None:
def identity_function(A__ ) -> float:
return x
a__ : List[Any] = area_under_curve_estimator(
A__ , A__ , A__ , A__ )
a__ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def A_ ( A__ ) -> None:
def function_to_integrate(A__ ) -> float:
return sqrt(4.0 - x * x )
a__ : Dict = area_under_curve_estimator(
A__ , A__ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : int = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ["""LayoutLMv2FeatureExtractor"""]
_UpperCAmelCase : Any = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 365 |
from collections import deque
class lowerCAmelCase :
def __init__( self : str , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : int ) -> None:
lowerCamelCase__ : Optional[int] = process_name # process name
lowerCamelCase__ : Optional[int] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase__ : str = arrival_time
lowerCamelCase__ : List[Any] = burst_time # remaining burst time
lowerCamelCase__ : Any = 0 # total time of the process wait in ready queue
lowerCamelCase__ : Tuple = 0 # time from arrival time to completion time
class lowerCAmelCase :
def __init__( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : list[int] , UpperCAmelCase : deque[Process] , UpperCAmelCase : int , ) -> None:
# total number of mlfq's queues
lowerCamelCase__ : Optional[int] = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase__ : List[str] = time_slices
# unfinished process is in this ready_queue
lowerCamelCase__ : List[str] = queue
# current time
lowerCamelCase__ : Optional[Any] = current_time
# finished process is in this sequence queue
lowerCamelCase__ : deque[Process] = deque()
def A_ ( self : Tuple ) -> list[str]:
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def A_ ( self : Tuple , UpperCAmelCase : list[Process] ) -> list[int]:
lowerCamelCase__ : Tuple = []
for i in range(len(UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def A_ ( self : Union[str, Any] , UpperCAmelCase : list[Process] ) -> list[int]:
lowerCamelCase__ : int = []
for i in range(len(UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def A_ ( self : Optional[int] , UpperCAmelCase : list[Process] ) -> list[int]:
lowerCamelCase__ : Tuple = []
for i in range(len(UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def A_ ( self : str , UpperCAmelCase : deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def A_ ( self : int , UpperCAmelCase : Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def A_ ( self : Optional[int] , UpperCAmelCase : deque[Process] ) -> deque[Process]:
lowerCamelCase__ : deque[Process] = deque() # sequence deque of finished process
while len(UpperCAmelCase ) != 0:
lowerCamelCase__ : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase__ : Optional[int] = 0
# set the process's turnaround time because it is finished
lowerCamelCase__ : Union[str, Any] = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase__ : Any = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCAmelCase )
self.finish_queue.extend(UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def A_ ( self : str , UpperCAmelCase : deque[Process] , UpperCAmelCase : int ) -> tuple[deque[Process], deque[Process]]:
lowerCamelCase__ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCAmelCase ) ):
lowerCamelCase__ : Dict = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase__ : List[str] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase__ : Any = 0
# set the finish time
lowerCamelCase__ : int = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase__ : Dict = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCAmelCase )
self.finish_queue.extend(UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def A_ ( self : Dict ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_UpperCAmelCase : List[str] = Process("""P1""", 0, 53)
_UpperCAmelCase : Union[str, Any] = Process("""P2""", 0, 17)
_UpperCAmelCase : int = Process("""P3""", 0, 68)
_UpperCAmelCase : str = Process("""P4""", 0, 24)
_UpperCAmelCase : Optional[int] = 3
_UpperCAmelCase : Optional[Any] = [17, 25]
_UpperCAmelCase : Optional[int] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
_UpperCAmelCase : Tuple = Process("""P1""", 0, 53)
_UpperCAmelCase : Any = Process("""P2""", 0, 17)
_UpperCAmelCase : Any = Process("""P3""", 0, 68)
_UpperCAmelCase : List[Any] = Process("""P4""", 0, 24)
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Optional[int] = [17, 25]
_UpperCAmelCase : Optional[int] = deque([Pa, Pa, Pa, Pa])
_UpperCAmelCase : Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_UpperCAmelCase : Dict = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 45 | 0 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_snake_case ,_snake_case ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_snake_case ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | """simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Optional[Any] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase : Dict = """CLIPImageProcessor"""
UpperCAmelCase : Dict = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__(self : Union[str, Any] , _A : Dict=None , _A : Tuple=None , **_A : Optional[int]) -> Optional[int]:
__snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _A , )
__snake_case : List[Any] = kwargs.pop('feature_extractor')
__snake_case : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_A , _A)
def __call__(self : Dict , _A : Tuple=None , _A : Optional[int]=None , _A : Tuple=None , **_A : Any) -> int:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
__snake_case : List[str] = self.tokenizer(_A , return_tensors=_A , **_A)
if images is not None:
__snake_case : Any = self.image_processor(_A , return_tensors=_A , **_A)
if text is not None and images is not None:
__snake_case : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A) , tensor_type=_A)
def _lowercase (self : List[Any] , *_A : Dict , **_A : int) -> int:
return self.tokenizer.batch_decode(*_A , **_A)
def _lowercase (self : List[Any] , *_A : Union[str, Any] , **_A : Tuple) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A)
@property
def _lowercase (self : Union[str, Any]) -> List[Any]:
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 172 | 0 |
from __future__ import annotations
from fractions import Fraction
def lowerCamelCase ( a_ , a_ ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCamelCase ( a_ ) -> list[str]:
lowerCAmelCase_ = []
lowerCAmelCase_ = 11
lowerCAmelCase_ = int('1' + '0' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
lowerCAmelCase_ = 10
return solutions
def lowerCamelCase ( a_ = 2 ) -> int:
lowerCAmelCase_ = 1.0
for fraction in fraction_list(a_ ):
lowerCAmelCase_ = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 352 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> None:
'''simple docstring'''
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 14 | 0 |