# Generates positive movie reviews by tuning a pretrained model on IMDB dataset # with a sentiment reward function import json import os import sys from typing import List import torch from datasets import load_dataset from peft import LoraConfig from peft.utils.config import TaskType from transformers import pipeline import trlx from trlx.data.default_configs import TRLConfig, default_ppo_config def get_positive_score(scores): "Extract value associated with a positive sentiment from pipeline's output" return dict(map(lambda x: tuple(x.values()), scores))["POSITIVE"] def main(hparams={}): # Merge sweep config with default config if given config = TRLConfig.update(default_ppo_config().to_dict(), hparams) if torch.cuda.is_available(): device = int(os.environ.get("LOCAL_RANK", 0)) else: device = -1 sentiment_fn = pipeline( "sentiment-analysis", "lvwerra/distilbert-imdb", top_k=2, truncation=True, batch_size=256, device=device, ) # Just insert your peft config here (the type must be an instance of peft.PeftConfig or a dict). config.model.peft_config = LoraConfig( r=8, task_type=TaskType.CAUSAL_LM, lora_alpha=32, lora_dropout=0.1, ) def reward_fn(samples: List[str], **kwargs) -> List[float]: sentiments = list(map(get_positive_score, sentiment_fn(samples))) return sentiments # Take few words off of movies reviews as prompts imdb = load_dataset("imdb", split="train+test") prompts = [" ".join(review.split()[:4]) for review in imdb["text"]] trlx.train( reward_fn=reward_fn, prompts=prompts, eval_prompts=["I don't know much about Hungarian underground"] * 256, config=config, ) if __name__ == "__main__": hparams = {} if len(sys.argv) == 1 else json.loads(sys.argv[1]) main(hparams)