Spaces:
Runtime error
Runtime error
#!/usr/bin/env python | |
# coding=utf-8 | |
# Copyright 2021 The HuggingFace Team. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
""" | |
Fine-tuning the library models for summarization. | |
""" | |
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments. | |
import json | |
import logging | |
import os | |
import sys | |
import warnings | |
from dataclasses import dataclass, field | |
from typing import Optional | |
import datasets | |
import evaluate | |
import nltk # Here to have a nice missing dependency error message early on | |
import numpy as np | |
import tensorflow as tf | |
from datasets import load_dataset | |
from filelock import FileLock | |
import transformers | |
from transformers import ( | |
AutoConfig, | |
AutoTokenizer, | |
DataCollatorForSeq2Seq, | |
HfArgumentParser, | |
KerasMetricCallback, | |
PushToHubCallback, | |
TFAutoModelForSeq2SeqLM, | |
TFTrainingArguments, | |
create_optimizer, | |
set_seed, | |
) | |
from transformers.trainer_utils import get_last_checkpoint | |
from transformers.utils import check_min_version, is_offline_mode, send_example_telemetry | |
from transformers.utils.versions import require_version | |
# region Checking dependencies | |
# Will error if the minimal version of Transformers is not installed. Remove at your own risks. | |
check_min_version("4.33.0.dev0") | |
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") | |
logger = logging.getLogger(__name__) | |
try: | |
nltk.data.find("tokenizers/punkt") | |
except (LookupError, OSError): | |
if is_offline_mode(): | |
raise LookupError( | |
"Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files" | |
) | |
with FileLock(".lock") as lock: | |
nltk.download("punkt", quiet=True) | |
# endregion | |
# region Arguments | |
class ModelArguments: | |
""" | |
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. | |
""" | |
model_name_or_path: str = field( | |
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} | |
) | |
config_name: Optional[str] = field( | |
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} | |
) | |
tokenizer_name: Optional[str] = field( | |
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} | |
) | |
cache_dir: Optional[str] = field( | |
default=None, | |
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, | |
) | |
use_fast_tokenizer: bool = field( | |
default=True, | |
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, | |
) | |
model_revision: str = field( | |
default="main", | |
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, | |
) | |
token: str = field( | |
default=None, | |
metadata={ | |
"help": ( | |
"The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " | |
"generated when running `huggingface-cli login` (stored in `~/.huggingface`)." | |
) | |
}, | |
) | |
use_auth_token: bool = field( | |
default=None, | |
metadata={ | |
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`." | |
}, | |
) | |
trust_remote_code: bool = field( | |
default=False, | |
metadata={ | |
"help": ( | |
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option" | |
"should only be set to `True` for repositories you trust and in which you have read the code, as it will" | |
"execute code present on the Hub on your local machine." | |
) | |
}, | |
) | |
class DataTrainingArguments: | |
""" | |
Arguments pertaining to what data we are going to input our model for training and eval. | |
""" | |
dataset_name: Optional[str] = field( | |
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} | |
) | |
dataset_config_name: Optional[str] = field( | |
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} | |
) | |
text_column: Optional[str] = field( | |
default=None, | |
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."}, | |
) | |
summary_column: Optional[str] = field( | |
default=None, | |
metadata={"help": "The name of the column in the datasets containing the summaries (for summarization)."}, | |
) | |
train_file: Optional[str] = field( | |
default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."} | |
) | |
validation_file: Optional[str] = field( | |
default=None, | |
metadata={ | |
"help": ( | |
"An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." | |
) | |
}, | |
) | |
test_file: Optional[str] = field( | |
default=None, | |
metadata={ | |
"help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." | |
}, | |
) | |
overwrite_cache: bool = field( | |
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} | |
) | |
preprocessing_num_workers: Optional[int] = field( | |
default=None, | |
metadata={"help": "The number of processes to use for the preprocessing."}, | |
) | |
max_source_length: Optional[int] = field( | |
default=1024, | |
metadata={ | |
"help": ( | |
"The maximum total input sequence length after tokenization. Sequences longer " | |
"than this will be truncated, sequences shorter will be padded." | |
) | |
}, | |
) | |
max_target_length: Optional[int] = field( | |
default=128, | |
metadata={ | |
"help": ( | |
"The maximum total sequence length for target text after tokenization. Sequences longer " | |
"than this will be truncated, sequences shorter will be padded." | |
) | |
}, | |
) | |
val_max_target_length: Optional[int] = field( | |
default=None, | |
metadata={ | |
"help": ( | |
"The maximum total sequence length for validation target text after tokenization. Sequences longer " | |
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`." | |
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " | |
"during ``evaluate`` and ``predict``." | |
) | |
}, | |
) | |
pad_to_max_length: bool = field( | |
default=False, | |
metadata={ | |
"help": ( | |
"Whether to pad all samples to model maximum sentence length. " | |
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More " | |
"efficient on GPU but very bad for TPU." | |
) | |
}, | |
) | |
max_train_samples: Optional[int] = field( | |
default=None, | |
metadata={ | |
"help": ( | |
"For debugging purposes or quicker training, truncate the number of training examples to this " | |
"value if set." | |
) | |
}, | |
) | |
max_eval_samples: Optional[int] = field( | |
default=None, | |
metadata={ | |
"help": ( | |
"For debugging purposes or quicker training, truncate the number of evaluation examples to this " | |
"value if set." | |
) | |
}, | |
) | |
max_predict_samples: Optional[int] = field( | |
default=None, | |
metadata={ | |
"help": ( | |
"For debugging purposes or quicker training, truncate the number of prediction examples to this " | |
"value if set." | |
) | |
}, | |
) | |
num_beams: Optional[int] = field( | |
default=None, | |
metadata={ | |
"help": ( | |
"Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " | |
"which is used during ``evaluate`` and ``predict``." | |
) | |
}, | |
) | |
ignore_pad_token_for_loss: bool = field( | |
default=True, | |
metadata={ | |
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." | |
}, | |
) | |
source_prefix: Optional[str] = field( | |
default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."} | |
) | |
def __post_init__(self): | |
if self.dataset_name is None and self.train_file is None and self.validation_file is None: | |
raise ValueError("Need either a dataset name or a training/validation file.") | |
else: | |
if self.train_file is not None: | |
extension = self.train_file.split(".")[-1] | |
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." | |
if self.validation_file is not None: | |
extension = self.validation_file.split(".")[-1] | |
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." | |
if self.val_max_target_length is None: | |
self.val_max_target_length = self.max_target_length | |
# endregion | |
# region Dataset name mappings | |
summarization_name_mapping = { | |
"amazon_reviews_multi": ("review_body", "review_title"), | |
"big_patent": ("description", "abstract"), | |
"cnn_dailymail": ("article", "highlights"), | |
"orange_sum": ("text", "summary"), | |
"pn_summary": ("article", "summary"), | |
"psc": ("extract_text", "summary_text"), | |
"samsum": ("dialogue", "summary"), | |
"thaisum": ("body", "summary"), | |
"xglue": ("news_body", "news_title"), | |
"xsum": ("document", "summary"), | |
"wiki_summary": ("article", "highlights"), | |
"multi_news": ("document", "summary"), | |
} | |
# endregion | |
def main(): | |
# region Argument parsing | |
# See all possible arguments in src/transformers/training_args.py | |
# or by passing the --help flag to this script. | |
# We now keep distinct sets of args, for a cleaner separation of concerns. | |
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) | |
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): | |
# If we pass only one argument to the script and it's the path to a json file, | |
# let's parse it to get our arguments. | |
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) | |
else: | |
model_args, data_args, training_args = parser.parse_args_into_dataclasses() | |
if model_args.use_auth_token is not None: | |
warnings.warn("The `use_auth_token` argument is deprecated and will be removed in v4.34.", FutureWarning) | |
if model_args.token is not None: | |
raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") | |
model_args.token = model_args.use_auth_token | |
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The | |
# information sent is the one passed as arguments along with your Python/PyTorch versions. | |
send_example_telemetry("run_summarization", model_args, data_args, framework="tensorflow") | |
# endregion | |
# region Logging | |
logging.basicConfig( | |
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", | |
datefmt="%m/%d/%Y %H:%M:%S", | |
handlers=[logging.StreamHandler(sys.stdout)], | |
) | |
logger.setLevel(logging.INFO) | |
datasets.utils.logging.set_verbosity(logging.INFO) | |
transformers.utils.logging.set_verbosity(logging.INFO) | |
# Log on each process the small summary: | |
logger.info(f"Training/evaluation parameters {training_args}") | |
# endregion | |
# region T5 special-casing | |
if data_args.source_prefix is None and model_args.model_name_or_path in [ | |
"t5-small", | |
"t5-base", | |
"t5-large", | |
"t5-3b", | |
"t5-11b", | |
]: | |
logger.warning( | |
"You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with " | |
"`--source_prefix 'summarize: ' `" | |
) | |
# endregion | |
# region Detecting last checkpoint | |
last_checkpoint = None | |
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: | |
last_checkpoint = get_last_checkpoint(training_args.output_dir) | |
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: | |
raise ValueError( | |
f"Output directory ({training_args.output_dir}) already exists and is not empty. " | |
"Use --overwrite_output_dir to overcome." | |
) | |
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: | |
logger.info( | |
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " | |
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." | |
) | |
# endregion | |
# Set seed before initializing model. | |
set_seed(training_args.seed) | |
# region Load datasets | |
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) | |
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ | |
# (the dataset will be downloaded automatically from the datasets Hub). | |
# | |
# For CSV/JSON files this script will use the first column for the full texts and the second column for the | |
# summaries (unless you specify column names for this with the `text_column` and `summary_column` arguments). | |
# | |
# In distributed training, the load_dataset function guarantee that only one local process can concurrently | |
# download the dataset. | |
if data_args.dataset_name is not None: | |
# Downloading and loading a dataset from the hub. | |
raw_datasets = load_dataset( | |
data_args.dataset_name, | |
data_args.dataset_config_name, | |
cache_dir=model_args.cache_dir, | |
token=model_args.token, | |
) | |
else: | |
data_files = {} | |
if data_args.train_file is not None: | |
data_files["train"] = data_args.train_file | |
extension = data_args.train_file.split(".")[-1] | |
if data_args.validation_file is not None: | |
data_files["validation"] = data_args.validation_file | |
extension = data_args.validation_file.split(".")[-1] | |
if data_args.test_file is not None: | |
data_files["test"] = data_args.test_file | |
extension = data_args.test_file.split(".")[-1] | |
raw_datasets = load_dataset( | |
extension, | |
data_files=data_files, | |
cache_dir=model_args.cache_dir, | |
token=model_args.token, | |
) | |
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at | |
# https://huggingface.co/docs/datasets/loading_datasets.html. | |
# endregion | |
# region Load model config and tokenizer | |
# | |
# Distributed training: | |
# The .from_pretrained methods guarantee that only one local process can concurrently | |
# download model & vocab. | |
config = AutoConfig.from_pretrained( | |
model_args.config_name if model_args.config_name else model_args.model_name_or_path, | |
cache_dir=model_args.cache_dir, | |
revision=model_args.model_revision, | |
token=model_args.token, | |
trust_remote_code=model_args.trust_remote_code, | |
) | |
tokenizer = AutoTokenizer.from_pretrained( | |
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, | |
cache_dir=model_args.cache_dir, | |
use_fast=model_args.use_fast_tokenizer, | |
revision=model_args.model_revision, | |
token=model_args.token, | |
trust_remote_code=model_args.trust_remote_code, | |
) | |
prefix = data_args.source_prefix if data_args.source_prefix is not None else "" | |
# endregion | |
# region Dataset preprocessing | |
# We need to tokenize inputs and targets. | |
if training_args.do_train: | |
column_names = raw_datasets["train"].column_names | |
elif training_args.do_eval: | |
column_names = raw_datasets["validation"].column_names | |
else: | |
logger.info("There is nothing to do. Please pass `do_train`, and/or `do_eval`.") | |
return | |
# Get the column names for input/target. | |
dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None) | |
if data_args.text_column is None: | |
text_column = dataset_columns[0] if dataset_columns is not None else column_names[0] | |
else: | |
text_column = data_args.text_column | |
if text_column not in column_names: | |
raise ValueError( | |
f"--text_column' value '{data_args.text_column}' needs to be one of: {', '.join(column_names)}" | |
) | |
if data_args.summary_column is None: | |
summary_column = dataset_columns[1] if dataset_columns is not None else column_names[1] | |
else: | |
summary_column = data_args.summary_column | |
if summary_column not in column_names: | |
raise ValueError( | |
f"--summary_column' value '{data_args.summary_column}' needs to be one of: {', '.join(column_names)}" | |
) | |
# Temporarily set max_target_length for training. | |
max_target_length = data_args.max_target_length | |
padding = "max_length" if data_args.pad_to_max_length else False | |
def preprocess_function(examples): | |
inputs = examples[text_column] | |
targets = examples[summary_column] | |
inputs = [prefix + inp for inp in inputs] | |
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True) | |
# Tokenize targets with the `text_target` keyword argument | |
labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True) | |
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore | |
# padding in the loss. | |
if padding == "max_length" and data_args.ignore_pad_token_for_loss: | |
labels["input_ids"] = [ | |
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] | |
] | |
model_inputs["labels"] = labels["input_ids"] | |
return model_inputs | |
if training_args.do_train: | |
if "train" not in raw_datasets: | |
raise ValueError("--do_train requires a train dataset") | |
train_dataset = raw_datasets["train"] | |
if data_args.max_train_samples is not None: | |
max_train_samples = min(len(train_dataset), data_args.max_train_samples) | |
train_dataset = train_dataset.select(range(max_train_samples)) | |
with training_args.main_process_first(desc="train dataset map pre-processing"): | |
train_dataset = train_dataset.map( | |
preprocess_function, | |
batched=True, | |
num_proc=data_args.preprocessing_num_workers, | |
remove_columns=column_names, | |
load_from_cache_file=not data_args.overwrite_cache, | |
desc="Running tokenizer on train dataset", | |
) | |
else: | |
train_dataset = None | |
if training_args.do_eval: | |
max_target_length = data_args.val_max_target_length | |
if "validation" not in raw_datasets: | |
raise ValueError("--do_eval requires a validation dataset") | |
eval_dataset = raw_datasets["validation"] | |
if data_args.max_eval_samples is not None: | |
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) | |
eval_dataset = eval_dataset.select(range(max_eval_samples)) | |
with training_args.main_process_first(desc="validation dataset map pre-processing"): | |
eval_dataset = eval_dataset.map( | |
preprocess_function, | |
batched=True, | |
num_proc=data_args.preprocessing_num_workers, | |
remove_columns=column_names, | |
load_from_cache_file=not data_args.overwrite_cache, | |
desc="Running tokenizer on validation dataset", | |
) | |
else: | |
eval_dataset = None | |
# endregion | |
# region Text preprocessing | |
def postprocess_text(preds, labels): | |
preds = [pred.strip() for pred in preds] | |
labels = [label.strip() for label in labels] | |
# rougeLSum expects newline after each sentence | |
preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds] | |
labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels] | |
return preds, labels | |
# endregion | |
with training_args.strategy.scope(): | |
# region Prepare model | |
model = TFAutoModelForSeq2SeqLM.from_pretrained( | |
model_args.model_name_or_path, | |
config=config, | |
cache_dir=model_args.cache_dir, | |
revision=model_args.model_revision, | |
token=model_args.token, | |
trust_remote_code=model_args.trust_remote_code, | |
) | |
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch | |
# on a small vocab and want a smaller embedding size, remove this test. | |
embeddings = model.get_input_embeddings() | |
# Matt: This is a temporary workaround as we transition our models to exclusively using Keras embeddings. | |
# As soon as the transition is complete, all embeddings should be keras.Embeddings layers, and | |
# the weights will always be in embeddings.embeddings. | |
if hasattr(embeddings, "embeddings"): | |
embedding_size = embeddings.embeddings.shape[0] | |
else: | |
embedding_size = embeddings.weight.shape[0] | |
if len(tokenizer) > embedding_size: | |
model.resize_token_embeddings(len(tokenizer)) | |
# endregion | |
# region Prepare TF Dataset objects | |
if model.config.decoder_start_token_id is None: | |
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") | |
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id | |
data_collator = DataCollatorForSeq2Seq( | |
tokenizer, | |
model=model, | |
label_pad_token_id=label_pad_token_id, | |
pad_to_multiple_of=128, # Reduce the number of unique shapes for XLA, especially for generation | |
return_tensors="np", | |
) | |
dataset_options = tf.data.Options() | |
dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF | |
num_replicas = training_args.strategy.num_replicas_in_sync | |
total_train_batch_size = training_args.per_device_train_batch_size * num_replicas | |
total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas | |
# model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in | |
# training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also | |
# use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names | |
# yourself if you use this method, whereas they are automatically inferred from the model input names when | |
# using model.prepare_tf_dataset() | |
# For more info see the docs: | |
# https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset | |
# https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset | |
tf_train_dataset = model.prepare_tf_dataset( | |
train_dataset, | |
collate_fn=data_collator, | |
batch_size=total_train_batch_size, | |
shuffle=True, | |
).with_options(dataset_options) | |
tf_eval_dataset = model.prepare_tf_dataset( | |
eval_dataset, | |
collate_fn=data_collator, | |
batch_size=total_eval_batch_size, | |
shuffle=False, | |
).with_options(dataset_options) | |
# endregion | |
# region Optimizer, loss and LR scheduling | |
num_train_steps = int(len(tf_train_dataset) * training_args.num_train_epochs) | |
if training_args.warmup_steps > 0: | |
num_warmup_steps = training_args.warmup_steps | |
elif training_args.warmup_ratio > 0: | |
num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) | |
else: | |
num_warmup_steps = 0 | |
if training_args.do_train: | |
optimizer, lr_schedule = create_optimizer( | |
init_lr=training_args.learning_rate, | |
num_train_steps=num_train_steps, | |
num_warmup_steps=num_warmup_steps, | |
adam_beta1=training_args.adam_beta1, | |
adam_beta2=training_args.adam_beta2, | |
adam_epsilon=training_args.adam_epsilon, | |
weight_decay_rate=training_args.weight_decay, | |
adam_global_clipnorm=training_args.max_grad_norm, | |
) | |
else: | |
optimizer = None | |
# endregion | |
# region Metric and KerasMetricCallback | |
if training_args.do_eval: | |
metric = evaluate.load("rouge") | |
if data_args.val_max_target_length is None: | |
data_args.val_max_target_length = data_args.max_target_length | |
gen_kwargs = { | |
"max_length": data_args.val_max_target_length if data_args is not None else config.max_length, | |
"num_beams": data_args.num_beams, | |
"no_repeat_ngram_size": 0, # Not supported under XLA right now, and some models set it by default | |
} | |
def compute_metrics(preds): | |
predictions, labels = preds | |
if isinstance(predictions, tuple): | |
predictions = predictions[0] | |
decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True) | |
labels = np.where(labels != -100, labels, tokenizer.pad_token_id) | |
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) | |
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) | |
metrics = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True) | |
# Only print the mid f-measures, but there are a lot of other statistics in there too! | |
metrics = {key: round(val.mid.fmeasure * 100, 4) for key, val in metrics.items()} | |
return metrics | |
# The KerasMetricCallback allows metrics that are too complex to write as standard Keras metrics | |
# to be computed each epoch. Any Python code can be included in the metric_fn. This is especially | |
# useful for metrics like BLEU and ROUGE that perform string comparisons on decoded model outputs. | |
# For more information, see the docs at | |
# https://huggingface.co/docs/transformers/main_classes/keras_callbacks#transformers.KerasMetricCallback | |
metric_callback = KerasMetricCallback( | |
metric_fn=compute_metrics, | |
eval_dataset=tf_eval_dataset, | |
predict_with_generate=True, | |
use_xla_generation=True, | |
generate_kwargs=gen_kwargs, | |
) | |
callbacks = [metric_callback] | |
else: | |
callbacks = [] | |
# endregion | |
# region Preparing push_to_hub and model card | |
push_to_hub_model_id = training_args.push_to_hub_model_id | |
model_name = model_args.model_name_or_path.split("/")[-1] | |
if not push_to_hub_model_id: | |
if data_args.dataset_name is not None: | |
push_to_hub_model_id = f"{model_name}-finetuned-{data_args.dataset_name}" | |
else: | |
push_to_hub_model_id = f"{model_name}-finetuned-summarization" | |
model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "summarization"} | |
if data_args.dataset_name is not None: | |
model_card_kwargs["dataset_tags"] = data_args.dataset_name | |
if data_args.dataset_config_name is not None: | |
model_card_kwargs["dataset_args"] = data_args.dataset_config_name | |
model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" | |
else: | |
model_card_kwargs["dataset"] = data_args.dataset_name | |
if training_args.push_to_hub: | |
# Because this training can be quite long, we save once per epoch. | |
callbacks.append( | |
PushToHubCallback( | |
output_dir=training_args.output_dir, | |
hub_model_id=push_to_hub_model_id, | |
hub_token=training_args.push_to_hub_token, | |
tokenizer=tokenizer, | |
**model_card_kwargs, | |
) | |
) | |
# endregion | |
# region Training | |
# Transformers models compute the right loss for their task by default when labels are passed, and will | |
# use this for training unless you specify your own loss function in compile(). | |
model.compile(optimizer=optimizer, jit_compile=training_args.xla) | |
eval_metrics = None | |
if training_args.do_train: | |
logger.info("***** Running training *****") | |
logger.info(f" Num examples = {len(train_dataset)}") | |
logger.info(f" Num Epochs = {training_args.num_train_epochs}") | |
logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") | |
logger.info(f" Total train batch size = {total_train_batch_size}") | |
logger.info(f" Total optimization steps = {num_train_steps}") | |
if training_args.xla and not data_args.pad_to_max_length: | |
logger.warning( | |
"XLA training may be slow at first when --pad_to_max_length is not set " | |
"until all possible shapes have been compiled." | |
) | |
history = model.fit(tf_train_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks) | |
eval_metrics = {key: val[-1] for key, val in history.history.items()} | |
# endregion | |
# region Validation | |
if training_args.do_eval and not training_args.do_train: | |
# Do a standalone evaluation run | |
logger.info("Evaluation...") | |
# Compiling generation with XLA yields enormous speedups, see https://huggingface.co/blog/tf-xla-generate | |
def generate(**kwargs): | |
return model.generate(**kwargs) | |
for batch, labels in tf_eval_dataset: | |
batch.update(gen_kwargs) | |
generated_tokens = generate(**batch) | |
if isinstance(generated_tokens, tuple): | |
generated_tokens = generated_tokens[0] | |
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) | |
labels = np.where(labels != -100, labels, tokenizer.pad_token_id) | |
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) | |
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) | |
metric.add_batch(predictions=decoded_preds, references=decoded_labels) | |
eval_metrics = metric.compute(use_stemmer=True) | |
result = {key: round(val.mid.fmeasure * 100, 4) for key, val in eval_metrics.items()} | |
logger.info(result) | |
# endregion | |
if training_args.output_dir is not None and eval_metrics is not None: | |
output_eval_file = os.path.join(training_args.output_dir, "all_results.json") | |
with open(output_eval_file, "w") as writer: | |
writer.write(json.dumps(eval_metrics)) | |
if training_args.output_dir is not None and not training_args.push_to_hub: | |
# If we're not pushing to hub, at least save a local copy when we're done | |
model.save_pretrained(training_args.output_dir) | |
if __name__ == "__main__": | |
main() | |