|
{ |
|
"id": "2211.00053", |
|
"annotator": "jiangshu", |
|
"input": [ |
|
"\\documentclass{article} \n", |
|
"\\usepackage{iclr2023_conference,times}\n", |
|
"\\usepackage{amsmath,amsfonts,bm}\n", |
|
"\\newcommand{\\figleft}{{\\em (Left)}}\n", |
|
"\\newcommand{\\figcenter}{{\\em (Center)}}\n", |
|
"\\newcommand{\\figright}{{\\em (Right)}}\n", |
|
"\\newcommand{\\figtop}{{\\em (Top)}}\n", |
|
"\\newcommand{\\figbottom}{{\\em (Bottom)}}\n", |
|
"\\newcommand{\\captiona}{{\\em (a)}}\n", |
|
"\\newcommand{\\captionb}{{\\em (b)}}\n", |
|
"\\newcommand{\\captionc}{{\\em (c)}}\n", |
|
"\\newcommand{\\captiond}{{\\em (d)}}\n", |
|
"\\newcommand{\\newterm}[1]{{\\bf #1}}\n", |
|
"\\def\\figref#1{figure~\\ref{#1}}\n", |
|
"\\def\\Figref#1{Figure~\\ref{#1}}\n", |
|
"\\def\\twofigref#1#2{figures \\ref{#1} and \\ref{#2}}\n", |
|
"\\def\\quadfigref#1#2#3#4{figures \\ref{#1}, \\ref{#2}, \\ref{#3} and \\ref{#4}}\n", |
|
"\\def\\secref#1{section~\\ref{#1}}\n", |
|
"\\def\\Secref#1{Section~\\ref{#1}}\n", |
|
"\\def\\twosecrefs#1#2{sections \\ref{#1} and \\ref{#2}}\n", |
|
"\\def\\secrefs#1#2#3{sections \\ref{#1}, \\ref{#2} and \\ref{#3}}\n", |
|
"\\def\\eqref#1{equation~\\ref{#1}}\n", |
|
"\\def\\Eqref#1{Equation~\\ref{#1}}\n", |
|
"\\def\\plaineqref#1{\\ref{#1}}\n", |
|
"\\def\\chapref#1{chapter~\\ref{#1}}\n", |
|
"\\def\\Chapref#1{Chapter~\\ref{#1}}\n", |
|
"\\def\\rangechapref#1#2{chapters\\ref{#1}--\\ref{#2}}\n", |
|
"\\def\\algref#1{algorithm~\\ref{#1}}\n", |
|
"\\def\\Algref#1{Algorithm~\\ref{#1}}\n", |
|
"\\def\\twoalgref#1#2{algorithms \\ref{#1} and \\ref{#2}}\n", |
|
"\\def\\Twoalgref#1#2{Algorithms \\ref{#1} and \\ref{#2}}\n", |
|
"\\def\\partref#1{part~\\ref{#1}}\n", |
|
"\\def\\Partref#1{Part~\\ref{#1}}\n", |
|
"\\def\\twopartref#1#2{parts \\ref{#1} and \\ref{#2}}\n", |
|
"\\def\\ceil#1{\\lceil #1 \\rceil}\n", |
|
"\\def\\floor#1{\\lfloor #1 \\rfloor}\n", |
|
"\\def\\1{\\bm{1}}\n", |
|
"\\newcommand{\\train}{\\mathcal{D}}\n", |
|
"\\newcommand{\\valid}{\\mathcal{D_{\\mathrm{valid}}}}\n", |
|
"\\newcommand{\\test}{\\mathcal{D_{\\mathrm{test}}}}\n", |
|
"\\def\\eps{{\\epsilon}}\n", |
|
"\\def\\reta{{\\textnormal{$\\eta$}}}\n", |
|
"\\def\\ra{{\\textnormal{a}}}\n", |
|
"\\def\\rb{{\\textnormal{b}}}\n", |
|
"\\def\\rc{{\\textnormal{c}}}\n", |
|
"\\def\\rd{{\\textnormal{d}}}\n", |
|
"\\def\\re{{\\textnormal{e}}}\n", |
|
"\\def\\rf{{\\textnormal{f}}}\n", |
|
"\\def\\rg{{\\textnormal{g}}}\n", |
|
"\\def\\rh{{\\textnormal{h}}}\n", |
|
"\\def\\ri{{\\textnormal{i}}}\n", |
|
"\\def\\rj{{\\textnormal{j}}}\n", |
|
"\\def\\rk{{\\textnormal{k}}}\n", |
|
"\\def\\rl{{\\textnormal{l}}}\n", |
|
"\\def\\rn{{\\textnormal{n}}}\n", |
|
"\\def\\ro{{\\textnormal{o}}}\n", |
|
"\\def\\rp{{\\textnormal{p}}}\n", |
|
"\\def\\rq{{\\textnormal{q}}}\n", |
|
"\\def\\rr{{\\textnormal{r}}}\n", |
|
"\\def\\rs{{\\textnormal{s}}}\n", |
|
"\\def\\rt{{\\textnormal{t}}}\n", |
|
"\\def\\ru{{\\textnormal{u}}}\n", |
|
"\\def\\rv{{\\textnormal{v}}}\n", |
|
"\\def\\rw{{\\textnormal{w}}}\n", |
|
"\\def\\rx{{\\textnormal{x}}}\n", |
|
"\\def\\ry{{\\textnormal{y}}}\n", |
|
"\\def\\rz{{\\textnormal{z}}}\n", |
|
"\\def\\rvepsilon{{\\mathbf{\\epsilon}}}\n", |
|
"\\def\\rvtheta{{\\mathbf{\\theta}}}\n", |
|
"\\def\\rva{{\\mathbf{a}}}\n", |
|
"\\def\\rvb{{\\mathbf{b}}}\n", |
|
"\\def\\rvc{{\\mathbf{c}}}\n", |
|
"\\def\\rvd{{\\mathbf{d}}}\n", |
|
"\\def\\rve{{\\mathbf{e}}}\n", |
|
"\\def\\rvf{{\\mathbf{f}}}\n", |
|
"\\def\\rvg{{\\mathbf{g}}}\n", |
|
"\\def\\rvh{{\\mathbf{h}}}\n", |
|
"\\def\\rvu{{\\mathbf{i}}}\n", |
|
"\\def\\rvj{{\\mathbf{j}}}\n", |
|
"\\def\\rvk{{\\mathbf{k}}}\n", |
|
"\\def\\rvl{{\\mathbf{l}}}\n", |
|
"\\def\\rvm{{\\mathbf{m}}}\n", |
|
"\\def\\rvn{{\\mathbf{n}}}\n", |
|
"\\def\\rvo{{\\mathbf{o}}}\n", |
|
"\\def\\rvp{{\\mathbf{p}}}\n", |
|
"\\def\\rvq{{\\mathbf{q}}}\n", |
|
"\\def\\rvr{{\\mathbf{r}}}\n", |
|
"\\def\\rvs{{\\mathbf{s}}}\n", |
|
"\\def\\rvt{{\\mathbf{t}}}\n", |
|
"\\def\\rvu{{\\mathbf{u}}}\n", |
|
"\\def\\rvv{{\\mathbf{v}}}\n", |
|
"\\def\\rvw{{\\mathbf{w}}}\n", |
|
"\\def\\rvx{{\\mathbf{x}}}\n", |
|
"\\def\\rvy{{\\mathbf{y}}}\n", |
|
"\\def\\rvz{{\\mathbf{z}}}\n", |
|
"\\def\\erva{{\\textnormal{a}}}\n", |
|
"\\def\\ervb{{\\textnormal{b}}}\n", |
|
"\\def\\ervc{{\\textnormal{c}}}\n", |
|
"\\def\\ervd{{\\textnormal{d}}}\n", |
|
"\\def\\erve{{\\textnormal{e}}}\n", |
|
"\\def\\ervf{{\\textnormal{f}}}\n", |
|
"\\def\\ervg{{\\textnormal{g}}}\n", |
|
"\\def\\ervh{{\\textnormal{h}}}\n", |
|
"\\def\\ervi{{\\textnormal{i}}}\n", |
|
"\\def\\ervj{{\\textnormal{j}}}\n", |
|
"\\def\\ervk{{\\textnormal{k}}}\n", |
|
"\\def\\ervl{{\\textnormal{l}}}\n", |
|
"\\def\\ervm{{\\textnormal{m}}}\n", |
|
"\\def\\ervn{{\\textnormal{n}}}\n", |
|
"\\def\\ervo{{\\textnormal{o}}}\n", |
|
"\\def\\ervp{{\\textnormal{p}}}\n", |
|
"\\def\\ervq{{\\textnormal{q}}}\n", |
|
"\\def\\ervr{{\\textnormal{r}}}\n", |
|
"\\def\\ervs{{\\textnormal{s}}}\n", |
|
"\\def\\ervt{{\\textnormal{t}}}\n", |
|
"\\def\\ervu{{\\textnormal{u}}}\n", |
|
"\\def\\ervv{{\\textnormal{v}}}\n", |
|
"\\def\\ervw{{\\textnormal{w}}}\n", |
|
"\\def\\ervx{{\\textnormal{x}}}\n", |
|
"\\def\\ervy{{\\textnormal{y}}}\n", |
|
"\\def\\ervz{{\\textnormal{z}}}\n", |
|
"\\def\\rmA{{\\mathbf{A}}}\n", |
|
"\\def\\rmB{{\\mathbf{B}}}\n", |
|
"\\def\\rmC{{\\mathbf{C}}}\n", |
|
"\\def\\rmD{{\\mathbf{D}}}\n", |
|
"\\def\\rmE{{\\mathbf{E}}}\n", |
|
"\\def\\rmF{{\\mathbf{F}}}\n", |
|
"\\def\\rmG{{\\mathbf{G}}}\n", |
|
"\\def\\rmH{{\\mathbf{H}}}\n", |
|
"\\def\\rmI{{\\mathbf{I}}}\n", |
|
"\\def\\rmJ{{\\mathbf{J}}}\n", |
|
"\\def\\rmK{{\\mathbf{K}}}\n", |
|
"\\def\\rmL{{\\mathbf{L}}}\n", |
|
"\\def\\rmM{{\\mathbf{M}}}\n", |
|
"\\def\\rmN{{\\mathbf{N}}}\n", |
|
"\\def\\rmO{{\\mathbf{O}}}\n", |
|
"\\def\\rmP{{\\mathbf{P}}}\n", |
|
"\\def\\rmQ{{\\mathbf{Q}}}\n", |
|
"\\def\\rmR{{\\mathbf{R}}}\n", |
|
"\\def\\rmS{{\\mathbf{S}}}\n", |
|
"\\def\\rmT{{\\mathbf{T}}}\n", |
|
"\\def\\rmU{{\\mathbf{U}}}\n", |
|
"\\def\\rmV{{\\mathbf{V}}}\n", |
|
"\\def\\rmW{{\\mathbf{W}}}\n", |
|
"\\def\\rmX{{\\mathbf{X}}}\n", |
|
"\\def\\rmY{{\\mathbf{Y}}}\n", |
|
"\\def\\rmZ{{\\mathbf{Z}}}\n", |
|
"\\def\\ermA{{\\textnormal{A}}}\n", |
|
"\\def\\ermB{{\\textnormal{B}}}\n", |
|
"\\def\\ermC{{\\textnormal{C}}}\n", |
|
"\\def\\ermD{{\\textnormal{D}}}\n", |
|
"\\def\\ermE{{\\textnormal{E}}}\n", |
|
"\\def\\ermF{{\\textnormal{F}}}\n", |
|
"\\def\\ermG{{\\textnormal{G}}}\n", |
|
"\\def\\ermH{{\\textnormal{H}}}\n", |
|
"\\def\\ermI{{\\textnormal{I}}}\n", |
|
"\\def\\ermJ{{\\textnormal{J}}}\n", |
|
"\\def\\ermK{{\\textnormal{K}}}\n", |
|
"\\def\\ermL{{\\textnormal{L}}}\n", |
|
"\\def\\ermM{{\\textnormal{M}}}\n", |
|
"\\def\\ermN{{\\textnormal{N}}}\n", |
|
"\\def\\ermO{{\\textnormal{O}}}\n", |
|
"\\def\\ermP{{\\textnormal{P}}}\n", |
|
"\\def\\ermQ{{\\textnormal{Q}}}\n", |
|
"\\def\\ermR{{\\textnormal{R}}}\n", |
|
"\\def\\ermS{{\\textnormal{S}}}\n", |
|
"\\def\\ermT{{\\textnormal{T}}}\n", |
|
"\\def\\ermU{{\\textnormal{U}}}\n", |
|
"\\def\\ermV{{\\textnormal{V}}}\n", |
|
"\\def\\ermW{{\\textnormal{W}}}\n", |
|
"\\def\\ermX{{\\textnormal{X}}}\n", |
|
"\\def\\ermY{{\\textnormal{Y}}}\n", |
|
"\\def\\ermZ{{\\textnormal{Z}}}\n", |
|
"\\def\\vzero{{\\bm{0}}}\n", |
|
"\\def\\vone{{\\bm{1}}}\n", |
|
"\\def\\vmu{{\\bm{\\mu}}}\n", |
|
"\\def\\vtheta{{\\bm{\\theta}}}\n", |
|
"\\def\\va{{\\bm{a}}}\n", |
|
"\\def\\vb{{\\bm{b}}}\n", |
|
"\\def\\vc{{\\bm{c}}}\n", |
|
"\\def\\vd{{\\bm{d}}}\n", |
|
"\\def\\ve{{\\bm{e}}}\n", |
|
"\\def\\vf{{\\bm{f}}}\n", |
|
"\\def\\vg{{\\bm{g}}}\n", |
|
"\\def\\vh{{\\bm{h}}}\n", |
|
"\\def\\vi{{\\bm{i}}}\n", |
|
"\\def\\vj{{\\bm{j}}}\n", |
|
"\\def\\vk{{\\bm{k}}}\n", |
|
"\\def\\vl{{\\bm{l}}}\n", |
|
"\\def\\vm{{\\bm{m}}}\n", |
|
"\\def\\vn{{\\bm{n}}}\n", |
|
"\\def\\vo{{\\bm{o}}}\n", |
|
"\\def\\vp{{\\bm{p}}}\n", |
|
"\\def\\vq{{\\bm{q}}}\n", |
|
"\\def\\vr{{\\bm{r}}}\n", |
|
"\\def\\vs{{\\bm{s}}}\n", |
|
"\\def\\vt{{\\bm{t}}}\n", |
|
"\\def\\vu{{\\bm{u}}}\n", |
|
"\\def\\vv{{\\bm{v}}}\n", |
|
"\\def\\vw{{\\bm{w}}}\n", |
|
"\\def\\vx{{\\bm{x}}}\n", |
|
"\\def\\vy{{\\bm{y}}}\n", |
|
"\\def\\vz{{\\bm{z}}}\n", |
|
"\\def\\evalpha{{\\alpha}}\n", |
|
"\\def\\evbeta{{\\beta}}\n", |
|
"\\def\\evepsilon{{\\epsilon}}\n", |
|
"\\def\\evlambda{{\\lambda}}\n", |
|
"\\def\\evomega{{\\omega}}\n", |
|
"\\def\\evmu{{\\mu}}\n", |
|
"\\def\\evpsi{{\\psi}}\n", |
|
"\\def\\evsigma{{\\sigma}}\n", |
|
"\\def\\evtheta{{\\theta}}\n", |
|
"\\def\\eva{{a}}\n", |
|
"\\def\\evb{{b}}\n", |
|
"\\def\\evc{{c}}\n", |
|
"\\def\\evd{{d}}\n", |
|
"\\def\\eve{{e}}\n", |
|
"\\def\\evf{{f}}\n", |
|
"\\def\\evg{{g}}\n", |
|
"\\def\\evh{{h}}\n", |
|
"\\def\\evi{{i}}\n", |
|
"\\def\\evj{{j}}\n", |
|
"\\def\\evk{{k}}\n", |
|
"\\def\\evl{{l}}\n", |
|
"\\def\\evm{{m}}\n", |
|
"\\def\\evn{{n}}\n", |
|
"\\def\\evo{{o}}\n", |
|
"\\def\\evp{{p}}\n", |
|
"\\def\\evq{{q}}\n", |
|
"\\def\\evr{{r}}\n", |
|
"\\def\\evs{{s}}\n", |
|
"\\def\\evt{{t}}\n", |
|
"\\def\\evu{{u}}\n", |
|
"\\def\\evv{{v}}\n", |
|
"\\def\\evw{{w}}\n", |
|
"\\def\\evx{{x}}\n", |
|
"\\def\\evy{{y}}\n", |
|
"\\def\\evz{{z}}\n", |
|
"\\def\\mA{{\\bm{A}}}\n", |
|
"\\def\\mB{{\\bm{B}}}\n", |
|
"\\def\\mC{{\\bm{C}}}\n", |
|
"\\def\\mD{{\\bm{D}}}\n", |
|
"\\def\\mE{{\\bm{E}}}\n", |
|
"\\def\\mF{{\\bm{F}}}\n", |
|
"\\def\\mG{{\\bm{G}}}\n", |
|
"\\def\\mH{{\\bm{H}}}\n", |
|
"\\def\\mI{{\\bm{I}}}\n", |
|
"\\def\\mJ{{\\bm{J}}}\n", |
|
"\\def\\mK{{\\bm{K}}}\n", |
|
"\\def\\mL{{\\bm{L}}}\n", |
|
"\\def\\mM{{\\bm{M}}}\n", |
|
"\\def\\mN{{\\bm{N}}}\n", |
|
"\\def\\mO{{\\bm{O}}}\n", |
|
"\\def\\mP{{\\bm{P}}}\n", |
|
"\\def\\mQ{{\\bm{Q}}}\n", |
|
"\\def\\mR{{\\bm{R}}}\n", |
|
"\\def\\mS{{\\bm{S}}}\n", |
|
"\\def\\mT{{\\bm{T}}}\n", |
|
"\\def\\mU{{\\bm{U}}}\n", |
|
"\\def\\mV{{\\bm{V}}}\n", |
|
"\\def\\mW{{\\bm{W}}}\n", |
|
"\\def\\mX{{\\bm{X}}}\n", |
|
"\\def\\mY{{\\bm{Y}}}\n", |
|
"\\def\\mZ{{\\bm{Z}}}\n", |
|
"\\def\\mBeta{{\\bm{\\beta}}}\n", |
|
"\\def\\mPhi{{\\bm{\\Phi}}}\n", |
|
"\\def\\mLambda{{\\bm{\\Lambda}}}\n", |
|
"\\def\\mSigma{{\\bm{\\Sigma}}}\n", |
|
"\\DeclareMathAlphabet{\\mathsfit}{\\encodingdefault}{\\sfdefault}{m}{sl}\n", |
|
"\\SetMathAlphabet{\\mathsfit}{bold}{\\encodingdefault}{\\sfdefault}{bx}{n}\n", |
|
"\\newcommand{\\tens}[1]{\\bm{\\mathsfit{#1}}}\n", |
|
"\\def\\tA{{\\tens{A}}}\n", |
|
"\\def\\tB{{\\tens{B}}}\n", |
|
"\\def\\tC{{\\tens{C}}}\n", |
|
"\\def\\tD{{\\tens{D}}}\n", |
|
"\\def\\tE{{\\tens{E}}}\n", |
|
"\\def\\tF{{\\tens{F}}}\n", |
|
"\\def\\tG{{\\tens{G}}}\n", |
|
"\\def\\tH{{\\tens{H}}}\n", |
|
"\\def\\tI{{\\tens{I}}}\n", |
|
"\\def\\tJ{{\\tens{J}}}\n", |
|
"\\def\\tK{{\\tens{K}}}\n", |
|
"\\def\\tL{{\\tens{L}}}\n", |
|
"\\def\\tM{{\\tens{M}}}\n", |
|
"\\def\\tN{{\\tens{N}}}\n", |
|
"\\def\\tO{{\\tens{O}}}\n", |
|
"\\def\\tP{{\\tens{P}}}\n", |
|
"\\def\\tQ{{\\tens{Q}}}\n", |
|
"\\def\\tR{{\\tens{R}}}\n", |
|
"\\def\\tS{{\\tens{S}}}\n", |
|
"\\def\\tT{{\\tens{T}}}\n", |
|
"\\def\\tU{{\\tens{U}}}\n", |
|
"\\def\\tV{{\\tens{V}}}\n", |
|
"\\def\\tW{{\\tens{W}}}\n", |
|
"\\def\\tX{{\\tens{X}}}\n", |
|
"\\def\\tY{{\\tens{Y}}}\n", |
|
"\\def\\tZ{{\\tens{Z}}}\n", |
|
"\\def\\gA{{\\mathcal{A}}}\n", |
|
"\\def\\gB{{\\mathcal{B}}}\n", |
|
"\\def\\gC{{\\mathcal{C}}}\n", |
|
"\\def\\gD{{\\mathcal{D}}}\n", |
|
"\\def\\gE{{\\mathcal{E}}}\n", |
|
"\\def\\gF{{\\mathcal{F}}}\n", |
|
"\\def\\gG{{\\mathcal{G}}}\n", |
|
"\\def\\gH{{\\mathcal{H}}}\n", |
|
"\\def\\gI{{\\mathcal{I}}}\n", |
|
"\\def\\gJ{{\\mathcal{J}}}\n", |
|
"\\def\\gK{{\\mathcal{K}}}\n", |
|
"\\def\\gL{{\\mathcal{L}}}\n", |
|
"\\def\\gM{{\\mathcal{M}}}\n", |
|
"\\def\\gN{{\\mathcal{N}}}\n", |
|
"\\def\\gO{{\\mathcal{O}}}\n", |
|
"\\def\\gP{{\\mathcal{P}}}\n", |
|
"\\def\\gQ{{\\mathcal{Q}}}\n", |
|
"\\def\\gR{{\\mathcal{R}}}\n", |
|
"\\def\\gS{{\\mathcal{S}}}\n", |
|
"\\def\\gT{{\\mathcal{T}}}\n", |
|
"\\def\\gU{{\\mathcal{U}}}\n", |
|
"\\def\\gV{{\\mathcal{V}}}\n", |
|
"\\def\\gW{{\\mathcal{W}}}\n", |
|
"\\def\\gX{{\\mathcal{X}}}\n", |
|
"\\def\\gY{{\\mathcal{Y}}}\n", |
|
"\\def\\gZ{{\\mathcal{Z}}}\n", |
|
"\\def\\sA{{\\mathbb{A}}}\n", |
|
"\\def\\sB{{\\mathbb{B}}}\n", |
|
"\\def\\sC{{\\mathbb{C}}}\n", |
|
"\\def\\sD{{\\mathbb{D}}}\n", |
|
"\\def\\sF{{\\mathbb{F}}}\n", |
|
"\\def\\sG{{\\mathbb{G}}}\n", |
|
"\\def\\sH{{\\mathbb{H}}}\n", |
|
"\\def\\sI{{\\mathbb{I}}}\n", |
|
"\\def\\sJ{{\\mathbb{J}}}\n", |
|
"\\def\\sK{{\\mathbb{K}}}\n", |
|
"\\def\\sL{{\\mathbb{L}}}\n", |
|
"\\def\\sM{{\\mathbb{M}}}\n", |
|
"\\def\\sN{{\\mathbb{N}}}\n", |
|
"\\def\\sO{{\\mathbb{O}}}\n", |
|
"\\def\\sP{{\\mathbb{P}}}\n", |
|
"\\def\\sQ{{\\mathbb{Q}}}\n", |
|
"\\def\\sR{{\\mathbb{R}}}\n", |
|
"\\def\\sS{{\\mathbb{S}}}\n", |
|
"\\def\\sT{{\\mathbb{T}}}\n", |
|
"\\def\\sU{{\\mathbb{U}}}\n", |
|
"\\def\\sV{{\\mathbb{V}}}\n", |
|
"\\def\\sW{{\\mathbb{W}}}\n", |
|
"\\def\\sX{{\\mathbb{X}}}\n", |
|
"\\def\\sY{{\\mathbb{Y}}}\n", |
|
"\\def\\sZ{{\\mathbb{Z}}}\n", |
|
"\\def\\emLambda{{\\Lambda}}\n", |
|
"\\def\\emA{{A}}\n", |
|
"\\def\\emB{{B}}\n", |
|
"\\def\\emC{{C}}\n", |
|
"\\def\\emD{{D}}\n", |
|
"\\def\\emE{{E}}\n", |
|
"\\def\\emF{{F}}\n", |
|
"\\def\\emG{{G}}\n", |
|
"\\def\\emH{{H}}\n", |
|
"\\def\\emI{{I}}\n", |
|
"\\def\\emJ{{J}}\n", |
|
"\\def\\emK{{K}}\n", |
|
"\\def\\emL{{L}}\n", |
|
"\\def\\emM{{M}}\n", |
|
"\\def\\emN{{N}}\n", |
|
"\\def\\emO{{O}}\n", |
|
"\\def\\emP{{P}}\n", |
|
"\\def\\emQ{{Q}}\n", |
|
"\\def\\emR{{R}}\n", |
|
"\\def\\emS{{S}}\n", |
|
"\\def\\emT{{T}}\n", |
|
"\\def\\emU{{U}}\n", |
|
"\\def\\emV{{V}}\n", |
|
"\\def\\emW{{W}}\n", |
|
"\\def\\emX{{X}}\n", |
|
"\\def\\emY{{Y}}\n", |
|
"\\def\\emZ{{Z}}\n", |
|
"\\def\\emSigma{{\\Sigma}}\n", |
|
"\\newcommand{\\etens}[1]{\\mathsfit{#1}}\n", |
|
"\\def\\etLambda{{\\etens{\\Lambda}}}\n", |
|
"\\def\\etA{{\\etens{A}}}\n", |
|
"\\def\\etB{{\\etens{B}}}\n", |
|
"\\def\\etC{{\\etens{C}}}\n", |
|
"\\def\\etD{{\\etens{D}}}\n", |
|
"\\def\\etE{{\\etens{E}}}\n", |
|
"\\def\\etF{{\\etens{F}}}\n", |
|
"\\def\\etG{{\\etens{G}}}\n", |
|
"\\def\\etH{{\\etens{H}}}\n", |
|
"\\def\\etI{{\\etens{I}}}\n", |
|
"\\def\\etJ{{\\etens{J}}}\n", |
|
"\\def\\etK{{\\etens{K}}}\n", |
|
"\\def\\etL{{\\etens{L}}}\n", |
|
"\\def\\etM{{\\etens{M}}}\n", |
|
"\\def\\etN{{\\etens{N}}}\n", |
|
"\\def\\etO{{\\etens{O}}}\n", |
|
"\\def\\etP{{\\etens{P}}}\n", |
|
"\\def\\etQ{{\\etens{Q}}}\n", |
|
"\\def\\etR{{\\etens{R}}}\n", |
|
"\\def\\etS{{\\etens{S}}}\n", |
|
"\\def\\etT{{\\etens{T}}}\n", |
|
"\\def\\etU{{\\etens{U}}}\n", |
|
"\\def\\etV{{\\etens{V}}}\n", |
|
"\\def\\etW{{\\etens{W}}}\n", |
|
"\\def\\etX{{\\etens{X}}}\n", |
|
"\\def\\etY{{\\etens{Y}}}\n", |
|
"\\def\\etZ{{\\etens{Z}}}\n", |
|
"\\newcommand{\\pdata}{p_{\\rm{data}}}\n", |
|
"\\newcommand{\\ptrain}{\\hat{p}_{\\rm{data}}}\n", |
|
"\\newcommand{\\Ptrain}{\\hat{P}_{\\rm{data}}}\n", |
|
"\\newcommand{\\pmodel}{p_{\\rm{model}}}\n", |
|
"\\newcommand{\\Pmodel}{P_{\\rm{model}}}\n", |
|
"\\newcommand{\\ptildemodel}{\\tilde{p}_{\\rm{model}}}\n", |
|
"\\newcommand{\\pencode}{p_{\\rm{encoder}}}\n", |
|
"\\newcommand{\\pdecode}{p_{\\rm{decoder}}}\n", |
|
"\\newcommand{\\precons}{p_{\\rm{reconstruct}}}\n", |
|
"\\newcommand{\\laplace}{\\mathrm{Laplace}} \n", |
|
"\\newcommand{\\E}{\\mathbb{E}}\n", |
|
"\\newcommand{\\Ls}{\\mathcal{L}}\n", |
|
"\\newcommand{\\R}{\\mathbb{R}}\n", |
|
"\\newcommand{\\emp}{\\tilde{p}}\n", |
|
"\\newcommand{\\lr}{\\alpha}\n", |
|
"\\newcommand{\\reg}{\\lambda}\n", |
|
"\\newcommand{\\rect}{\\mathrm{rectifier}}\n", |
|
"\\newcommand{\\softmax}{\\mathrm{softmax}}\n", |
|
"\\newcommand{\\sigmoid}{\\sigma}\n", |
|
"\\newcommand{\\softplus}{\\zeta}\n", |
|
"\\newcommand{\\KL}{D_{\\mathrm{KL}}}\n", |
|
"\\newcommand{\\Var}{\\mathrm{Var}}\n", |
|
"\\newcommand{\\standarderror}{\\mathrm{SE}}\n", |
|
"\\newcommand{\\Cov}{\\mathrm{Cov}}\n", |
|
"\\newcommand{\\normlzero}{L^0}\n", |
|
"\\newcommand{\\normlone}{L^1}\n", |
|
"\\newcommand{\\normltwo}{L^2}\n", |
|
"\\newcommand{\\normlp}{L^p}\n", |
|
"\\newcommand{\\normmax}{L^\\infty}\n", |
|
"\\newcommand{\\parents}{Pa} \n", |
|
"\\DeclareMathOperator*{\\argmax}{arg\\,max}\n", |
|
"\\DeclareMathOperator*{\\argmin}{arg\\,min}\n", |
|
"\\DeclareMathOperator{\\sign}{sign}\n", |
|
"\\DeclareMathOperator{\\Tr}{Tr}\n", |
|
"\\let\\ab\\allowbreak\n", |
|
"\\usepackage{hyperref}\n", |
|
"\\usepackage{paralist}\n", |
|
"\\usepackage{amsmath}\n", |
|
"\\usepackage{amssymb}\n", |
|
"\\usepackage{wrapfig}\n", |
|
"\\usepackage{multirow}\n", |
|
"\\usepackage{multicol}\n", |
|
"\\usepackage{multirow}\n", |
|
"\\usepackage{dsfont}\n", |
|
"\\usepackage{tabularx}\n", |
|
"\\usepackage{graphicx}\n", |
|
"\\usepackage{xcolor}\n", |
|
"\\usepackage{soul}\n", |
|
"\\usepackage{xspace}\n", |
|
"\\usepackage{booktabs}\n", |
|
"\\usepackage{caption}\n", |
|
"\\usepackage{enumitem}\n", |
|
"\\usepackage{algorithm}\n", |
|
"\\usepackage[noend]{algorithmic}\n", |
|
"\\usepackage{pythonhighlight}\n", |
|
"\\usepackage{pifont}\n", |
|
"\\usepackage[most]{tcolorbox}\n", |
|
"\\usepackage[framemethod=tikz]{mdframed}\n", |
|
"\\definecolor{qualcolor}{RGB}{128,64,0}\n", |
|
"\\gdef\\Sepline{\n", |
|
" \\par\\noindent\\makebox[\\linewidth][l]{\n", |
|
" \\hspace*{-\\mdflength{innerleftmargin}}\n", |
|
" \\tikz\\draw[thick,dashed,gray!60] (0,0) --\n", |
|
" (\\textwidth+\\the\\mdflength{innerleftmargin}+\\the\\mdflength{innerrightmargin},0);\n", |
|
" }\\par\\nobreak}\n", |
|
"\\newcommand{\\cmark}{\\ding{51}}\n", |
|
"\\newcommand{\\xmark}{\\ding{55}}\n", |
|
"\\newcommand{\\sbf}{\\ensuremath{\\mathbf{s}}}\n", |
|
"\\newcommand{\\txb}{\\ensuremath{\\tilde{\\mathbf{x}}}}\n", |
|
"\\newcommand{\\tyb}{\\ensuremath{\\tilde{\\mathbf{y}}}}\n", |
|
"\\newcommand{\\ty}{\\ensuremath{\\tilde{y}}}\n", |
|
"\\newcommand{\\xb}{\\ensuremath{\\mathbf{x}}}\n", |
|
"\\newcommand{\\eb}{\\ensuremath{\\mathbf{e}}}\n", |
|
"\\newcommand{\\yb}{\\ensuremath{\\mathbf{y}}}\n", |
|
"\\newcommand{\\Tmc}{\\ensuremath{\\mathcal{T}}}\n", |
|
"\\newcommand{\\Pmc}{\\ensuremath{\\mathcal{P}}}\n", |
|
"\\newcommand{\\Omc}{\\ensuremath{\\mathcal{O}}}\n", |
|
"\\newcommand{\\Vmc}{\\ensuremath{\\mathcal{V}}}\n", |
|
"\\newcommand{\\Emc}{\\ensuremath{\\mathcal{E}}}\n", |
|
"\\newcommand{\\Yhatmc}{\\ensuremath{\\hat{\\mathcal{Y}}}}\n", |
|
"\\newcommand{\\vocab}{\\ensuremath{\\mathcal{V}}}\n", |
|
"\\DeclareMathOperator*{\\argtopK}{arg\\,\\mathrm{top}\\text{-}\\mathrm{K}}\n", |
|
"\\newcommand{\\corr}{\\xspace}\n", |
|
"\\usepackage[normalem]{ulem}\n", |
|
"\\setlength{\\marginparwidth}{2cm}\n", |
|
"\\usepackage[colorinlistoftodos]{todonotes}\n", |
|
"\\definecolor{green(pigment)}{rgb}{0.0, 0.65, 0.31}\n", |
|
"\\usepackage{array}\n", |
|
"\\usepackage{makecell}\n", |
|
"\\renewcommand\\theadfont{}\n", |
|
"\\usepackage{framed}\n", |
|
"\\usepackage{hhline}\n", |
|
"\\usepackage{courierten}\n", |
|
"\\usepackage[T1]{fontenc} \n", |
|
"\\newcommand\\modelfont[1]{{\\usefont{T1}{courierten}{m}{n}#1}}\n", |
|
"\\newcommand\\myfontsize{\\fontsize{8.3pt}{10.3pt}\\selectfont}\n", |
|
"\\newcommand{\\methodnamelong}{\\modelfont{Self-Correction for Sequence Generation}\\xspace}\n", |
|
"\\newcommand{\\methodnamewithacronymhighlighted}{\\modelfont{\\ul{Pre}diction-\\ul{C}orrect\\ul{i}on for \\ul{Se}quence Generation}\\xspace}\n", |
|
"\\newcommand{\\methodnameshort}{\\modelfont{Self-Correction}\\xspace}\n", |
|
"\\newcommand{\\methodnamenospace}{\\modelfont{Self-Correction}\\xspace}\n", |
|
"\\newcommand{\\method}{\\textsc{Self-Correct}\\xspace}\n", |
|
"\\newcommand{\\myparagraph}[1]{\\par\\noindent\\textbf{{#1}}} \n", |
|
"\\newcommand{\\myparagraphsmall}[1]{\\par\\noindent\\textit{{#1}}}\n", |
|
"\\title{Generating Sequences by \\\\Learning to [Self-]Correct}\n", |
|
"\\author{Sean Welleck\\textsuperscript{1,3,*} \\hspace{1pt}\n", |
|
"Ximing Lu\\textsuperscript{1,*}\n", |
|
"\\AND Peter West\\textsuperscript{3,$\\dagger$} \\hspace{1pt} Faeze Brahman\\textsuperscript{1,$\\dagger$}\n", |
|
"\\AND Tianxiao Shen\\textsuperscript{3} \\hspace{1pt} Daniel Khashabi\\textsuperscript{2} \\hspace{1pt} Yejin Choi\\textsuperscript{1,3} \\\\\n", |
|
"\\textsuperscript{1}Allen Institute for Artificial Intelligence\\\\\n", |
|
" \\textsuperscript{2}Center for Language and Speech Processing, Johns Hopkins University \\\\\n", |
|
" \\textsuperscript{3}Paul G. Allen School of Computer Science \\& Engineering, University of Washington \\\\\n", |
|
"}\n", |
|
"\\newcommand{\\fix}{\\marginpar{FIX}}\n", |
|
"\\newcommand{\\new}{\\marginpar{NEW}}\n", |
|
"\\iclrfinalcopy \n", |
|
"\\begin{document}\n", |
|
"\\maketitle\n", |
|
"\\renewcommand*\\thefootnote{\\textbf{$*$}}\\footnotetext{First authors, contributed equally. \\textbf{$\\dagger$}Second authors, contributed equally.}\n", |
|
"\\renewcommand*{\\thefootnote}{\\arabic{footnote}}\n", |
|
"\\setcounter{footnote}{0}\n", |
|
"\\begin{abstract}\n", |
|
"Language models, whether fine-tuned or prompted with few-shot demonstrations, frequently violate these constraints, and lack a mechanism to iteratively revise their outputs.\n", |
|
"Moreover, some powerful language models are of extreme scale or inaccessible, making it inefficient, if not infeasible, to update their parameters for task-specific adaptation. \n", |
|
"We present \\textsc{self-correction}, \n", |
|
"\\end{abstract}\n", |
|
"\\section{Introduction}\n", |
|
"The standard practice for natural language generation tasks is inherently single-pass: applying a decoding procedure \n", |
|
"to either a few-shot prompted language model or one tuned for a given task, then considering the generation as ``finished''~(e.g. \\citet{radford2019language,brown2020,chen2021codex}).\n", |
|
"Powerful generation models\n", |
|
"often meet most of the task requirements, yet miss a few\n", |
|
"(e.g., omitting a subset of keywords), \n", |
|
"or generate incorrect hypotheses that nevertheless provide useful structure \n", |
|
"(e.g., a correct problem solving strategy with a missing step). \n", |
|
"However, after generating even a slightly sub-optimal sequence, the single-pass paradigm requires models to ``start from scratch'', \n", |
|
"effectively discarding work already done. \n", |
|
"A more natural, intuitive approach\n", |
|
"is leveraging the generation as a useful starting point\n", |
|
"to refine \n", |
|
"into a higher quality output.\n", |
|
"To formalize this intuition, we introduce \\methodnamelong. \n", |
|
"\\autoref{fig:teaser} demonstrates its central principle: a generation model is re-framed as a base\n", |
|
"\\begin{figure}[t]\n", |
|
" \\centering\n", |
|
" \\includegraphics[width=0.99\\textwidth]{images/summary_figure}\n", |
|
" \\caption{\\textsc{Self-corrector}s decompose generation into a base generator that proposes an initial hypothesis, and a corrector that iteratively improves its quality.}\n", |
|
" \\label{fig:teaser}\n", |
|
"\\end{figure}\n", |
|
"\\section{Self-correcting sequence generators}\n", |
|
"\\label{sec:method}\n", |
|
"A typical autoregressive text generator (e.g. GPT-3~\\citep{brown2020}) maps an input prompt to a distribution over outputs using a single parameterized module (e.g. a large transformer), $p_0(y|x)$.\n", |
|
"\\begin{align}\n", |
|
"\\label{eqn:model-onestep}\n", |
|
"p(y|x)=\\sum_{y_0}\\underbrace{p_0(y_0|x)}_{\\text{generator}}\\underbrace{p_\\theta(y|y_0,x)}_{\\text{corrector}}\n", |
|
"\\end{align}\n", |
|
"Since a model of this form can both generate and correct its generations, we call it a \\modelfont{Self-Corrector}.\n", |
|
"Self-correctors have several unique properties compared to typical generators.\n", |
|
"First, a self-corrector \n", |
|
"decouples generation and correction, \n", |
|
"allowing us to \\emph{freely parameterize each module} -- \n", |
|
"for instance, by prompting a single language model or using two different language models. \n", |
|
"In this paper, we develop a framework to train a separate corrector model\n", |
|
"(\\S\\ref{ssec:learning}).\n", |
|
"In \\S\\ref{ssec:learning}, we propose a training algorithm for the corrector that is dedicated to improving generations, where the improvement can be in any aspect, measured by scalar values.\n", |
|
"The feedback can be of many forms, e.g. a sentence, a compiler trace, etc. \n", |
|
"In contrast, a typical generator that generates in a single pass does not leverage feedback on its own generation.\n", |
|
"In this paper, \n", |
|
"Next, we describe our training framework of the corrector.\n", |
|
"\\subsection{Learning a Corrector}\n", |
|
"\\label{ssec:learning}\n", |
|
"Our goal is to have the generator generate an initial hypothesis, \n", |
|
"Here, quality is measured with a scalar value function $v(y)$ which we assume is accessible at training time (e.g. a classifier).\n", |
|
"Since direct supervision on how to improve hypotheses is not available, we design a new algorithm to train the corrector, which we refer to as self-corrective learning.\n", |
|
"The algorithm collects a pool of generations, \n", |
|
"groups them and \n", |
|
"As training progresses, more \n", |
|
"Algorithm~\\ref{alg:main} summarizes self-corrective learning, detailed below.\n", |
|
"\\begin{figure}[t]\n", |
|
" \\centering\n", |
|
" \\includegraphics[width=0.99\\textwidth]{images/learning_figure_v2.png}\n", |
|
" }\n", |
|
" \\label{fig:learning}\n", |
|
"\\end{figure}\n", |
|
"\\myparagraph{Initialization.}\n", |
|
"Self-corrective learning begins with a generator $p_0(y_0|x)$, a corrector \n", |
|
"$p_\\theta(y'|y,x)$\n", |
|
", a set of training prompts $X$, and a value function \n", |
|
"$v:\\mathcal Y \\rightarrow\\mathbb R$.\n", |
|
"Optionally,\n", |
|
"$f: \\mathcal Y \\rightarrow \\mathcal F$ and learn $p_\\theta(y'|y,x,f(y))$,\n", |
|
"where $\\mathcal F$ \n", |
|
"is arbitrary.\n", |
|
"Formally, \n", |
|
"\\begin{align}\n", |
|
"D_x=\\{(x,y,v(y), f(y))\\ |\\ \\text{for all } y\\in y^{1:N}\\sim q(p_0(\\cdot|x))\\},\\quad D=\\bigcup_{x\\in X} D_x,\n", |
|
"\\label{eqn:d0}\n", |
|
"\\end{align}\n", |
|
"where $y^{1:N}$ denotes $N$ outputs generated with decoding algorithm $q$ (e.g. temperature sampling).\n", |
|
"When available, $(x, y, v(y), f(y))$ examples from another source (e.g. a dataset) can also be added.\n", |
|
"\\newcommand{\\algcomment}[1]{{\\footnotesize \\fontfamily{cmtt}\\selectfont // #1}}\n", |
|
"\\renewcommand{\\algorithmiccomment}[1]{\\hfill{\\(\\triangleright\\)~#1}\\par}\n", |
|
"\\begin{figure}[t]\n", |
|
"\\vspace{-1em}\n", |
|
"\\begin{algorithm}[H]\n", |
|
"\\small\n", |
|
"\\begin{algorithmic}\n", |
|
"\\\\\n", |
|
"\\text{Initialize datapool }$D$ by sampling from $p_0$\\algorithmiccomment{Initialization: Eq.~\\ref{eqn:d0}}\n", |
|
"\\FOR{$\\text{iteration}\\in\\{1,2,\\ldots\\}$}\n", |
|
"\\FOR{$x \\in X$} \n", |
|
"\\STATE Sample hypotheses $y$ from datapool $D$ \n", |
|
"\\STATE Generate corrections $y'\\sim p_\\theta(\\cdot|y,x,f(y))$\n", |
|
"\\STATE Add all $(x,y',v(y'),f(y'))$ to the datapool $D$\n", |
|
"\\ENDFOR\n", |
|
"\\FOR{step in $1,2,\\ldots,M$ }\n", |
|
"\\item Compute the loss and update $\\theta$ using gradient descent\n", |
|
"\\algorithmiccomment{Learning}\n", |
|
"\\ENDFOR\n", |
|
"\\ENDFOR\n", |
|
"\\end{algorithmic}\n", |
|
"\\caption{Self-corrective learning}\n", |
|
"\\label{alg:main}\n", |
|
"\\end{algorithm}\n", |
|
"\\vspace{-1em}\n", |
|
"\\end{figure}\n", |
|
"We use \n", |
|
"\\begin{align}\n", |
|
"\\label{eqn:pairing}\n", |
|
" P_x=\\{(x,y,y')\\mid v(y)<v(y')\\text{ for all } y,y'\\in D_x\\times D_x\\},\\quad P=\\bigcup_{x\\in X} P_x,\n", |
|
"\\end{align}\n", |
|
"\\myparagraph{Learning.}\n", |
|
"\\begin{align}\n", |
|
"\\label{eqn:residual-subsample}\n", |
|
" \\mathds P[(x, y,y')]&\\propto \\exp\\big(\\underbrace{\\alpha\\cdot(v(y')-v(y))}_{\\text{improvement}}+\\underbrace{\\beta\\cdot s(y,y')}_{\\text{proximity}}\\big)/Z(y), \n", |
|
"\\end{align}\n", |
|
"where $s(y,y')$ is a similarity function and $Z(y)$\n", |
|
"normalizes over the available corrections for $y$ in $P_x$.\n", |
|
"Increasing the hyperparameter $\\alpha\\in\\mathbb{R}_{\\geq 0}$ puts more weight on targets that add more value, while\n", |
|
"increasing $\\beta\\in\\mathbb{R}_{\\geq 0}$ retains more similar targets. \n", |
|
"We update the corrector using the cross-entropy loss $\\mathcal{L}(\\theta) = -\\log p_\\theta(y'|y,x,f(y))$ on batches sampled in this way.\n", |
|
"\\begin{align}\n", |
|
"\\label{eqn:exploration}\n", |
|
"D'_x&=\\{(x,y',v(y'), f(y'))\\ |\\ \\text{for all } y'\\in y'^{1:N}\\sim q(p_\\theta(\\cdot|y,x,f(y))\\},\\quad D'=\\bigcup_{x\\in X} D'_x\n", |
|
"\\end{align}\n", |
|
"and updating the datapool $D\\leftarrow D\\cup D'$.\n", |
|
"or from the datapool; \n", |
|
"we use the latter in our experiments.\n", |
|
"\\myparagraph{Inference.}\n", |
|
"Since marginalizing over the intermediate generations in Eq.~\\ref{eqn:model-onestep} is intractable, we approximate each summation with a single sequence generated with a decoding algorithm $q(\\cdot)$.\n", |
|
"\\begin{itemize}[leftmargin=*,topsep=0pt,itemsep=-1ex,partopsep=1ex,parsep=1ex]\n", |
|
" \\item Generation: $y_0\\sim q(p_0(y_0|x))$;\n", |
|
" \\item Correction: $y_{t+1}\\sim q(p_\\theta(y_{t+1}|y_{t},x, f(y_t)))$,\\quad $t=0,1,\\dots,T-1$.\n", |
|
"\\end{itemize}\n", |
|
"The stopping time $T$ is either fixed, or when a target value is obtained (if $v(y)$ is available).\n" |
|
], |
|
"output": { |
|
"What experiments do you suggest doing?": [ |
|
"1. Evaluating Self-Correction on diverse tasks: The authors should evaluate the proposed self-correction on a diversity of tasks. The evaluation task should be the ones that require satisfying semantic constraints, such as ensuring that programs are correct, using certain keywords, or avoiding undesirable content.", |
|
"2. Improvement with self-correctors: The authors should apply self-correctors on the base generators on different tasks. Then compare if self-correctors bring improvements upon generators.", |
|
"3. Correcting generators that are much larger than the corrector: The authors should evaluate the performance of using a small corrector to correct a large generator. They can compare the performance in the following two cases: (1) training with a small generator, then swapping in the larger generator at test time; (2) training with the larger generator, i.e. using the large generator to initialize the datapool for self-corrective learning, then using the large generator at test time.", |
|
"4. Self-correctors leveraging explicit feedback during training and inference: The authors should incorporate natural language feedbacks that pointing out the downsides of the generated content to the self-correctors. Then compare the performance among the generator-only, generator+self-correct, and generator+self-correct with feedbacks.", |
|
"5. Effect of multiple corrections: The authors should apply multiple corrections on different tasks and report the performance change when the number of corrections increases.", |
|
"6. Effect of pairing and proportional sampling: The authors should ablate two components in the proposed self-correction: (i) samples pairs for learning proportional to Equation 4. (ii) only pairs sequences that improve value. Then show the performance change.", |
|
"7. Effect of exploration: The authors should ablate the effect of exploration. They could train a baseline only on correction pairs induced from the base generator and show the results." |
|
], |
|
"Why do you suggest these experiments?": [ |
|
"1. To show that Self-Correction is effective on various tasks that aligns with the paper\u2019s motivation.", |
|
"2. To show that self-correctors is effective in correcting the original generators and thus improve the generation quality.", |
|
"3. To show that a self-corrector can improve the outputs of a generator that is much larger than the corrector.", |
|
"4. To demonstrate self-correct\u2019s capacity to incorporate explicit natural language feedback and leveraging such feed backs could further improve the performance.", |
|
"5. To explore if multiple corrections lead to better performance.", |
|
"6. This is an ablation study to show how different components in the proposed self-correct method effect the performance.", |
|
"7. To understand how exploration effects the self-correct performance." |
|
] |
|
}, |
|
"paper_info": { |
|
"title": "Generating Sequences by Learning to Self-Correct", |
|
"authors": [ |
|
"Sean Welleck", |
|
"Ximing Lu", |
|
"Peter West", |
|
"Faeze Brahman", |
|
"Tianxiao Shen", |
|
"Daniel Khashabi", |
|
"Yejin Choi" |
|
], |
|
"abstract": "Sequence generation applications require satisfying semantic constraints,\nsuch as ensuring that programs are correct, using certain keywords, or avoiding\nundesirable content. Language models, whether fine-tuned or prompted with\nfew-shot demonstrations, frequently violate these constraints, and lack a\nmechanism to iteratively revise their outputs. Moreover, some powerful language\nmodels are of extreme scale or inaccessible, making it inefficient, if not\ninfeasible, to update their parameters for task-specific adaptation. We present\nSelf-Correction, an approach that decouples an imperfect base generator (an\noff-the-shelf language model or supervised sequence-to-sequence model) from a\nseparate corrector that learns to iteratively correct imperfect generations. To\ntrain the corrector, we propose an online training procedure that can use\neither scalar or natural language feedback on intermediate imperfect\ngenerations. We show that Self-Correction improves upon the base generator in\nthree diverse generation tasks - mathematical program synthesis,\nlexically-constrained generation, and toxicity control - even when the\ncorrector is much smaller than the base generator.", |
|
"comments": null |
|
}, |
|
"raw_data": { |
|
"context_before_exp": [ |
|
"\n", |
|
"\\documentclass{article} \n", |
|
"\\usepackage{iclr2023_conference,times}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\usepackage{amsmath,amsfonts,bm}\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\figleft}{{\\em (Left)}}\n", |
|
"\\newcommand{\\figcenter}{{\\em (Center)}}\n", |
|
"\\newcommand{\\figright}{{\\em (Right)}}\n", |
|
"\\newcommand{\\figtop}{{\\em (Top)}}\n", |
|
"\\newcommand{\\figbottom}{{\\em (Bottom)}}\n", |
|
"\\newcommand{\\captiona}{{\\em (a)}}\n", |
|
"\\newcommand{\\captionb}{{\\em (b)}}\n", |
|
"\\newcommand{\\captionc}{{\\em (c)}}\n", |
|
"\\newcommand{\\captiond}{{\\em (d)}}\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\newterm}[1]{{\\bf #1}}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\def\\figref#1{figure~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\Figref#1{Figure~\\ref{#1}}\n", |
|
"\\def\\twofigref#1#2{figures \\ref{#1} and \\ref{#2}}\n", |
|
"\\def\\quadfigref#1#2#3#4{figures \\ref{#1}, \\ref{#2}, \\ref{#3} and \\ref{#4}}\n", |
|
"\n", |
|
"\\def\\secref#1{section~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\Secref#1{Section~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\twosecrefs#1#2{sections \\ref{#1} and \\ref{#2}}\n", |
|
"\n", |
|
"\\def\\secrefs#1#2#3{sections \\ref{#1}, \\ref{#2} and \\ref{#3}}\n", |
|
"\n", |
|
"\\def\\eqref#1{equation~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\Eqref#1{Equation~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\plaineqref#1{\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\chapref#1{chapter~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\Chapref#1{Chapter~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\rangechapref#1#2{chapters\\ref{#1}--\\ref{#2}}\n", |
|
"\n", |
|
"\\def\\algref#1{algorithm~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\Algref#1{Algorithm~\\ref{#1}}\n", |
|
"\\def\\twoalgref#1#2{algorithms \\ref{#1} and \\ref{#2}}\n", |
|
"\\def\\Twoalgref#1#2{Algorithms \\ref{#1} and \\ref{#2}}\n", |
|
"\n", |
|
"\\def\\partref#1{part~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\Partref#1{Part~\\ref{#1}}\n", |
|
"\\def\\twopartref#1#2{parts \\ref{#1} and \\ref{#2}}\n", |
|
"\n", |
|
"\\def\\ceil#1{\\lceil #1 \\rceil}\n", |
|
"\\def\\floor#1{\\lfloor #1 \\rfloor}\n", |
|
"\\def\\1{\\bm{1}}\n", |
|
"\\newcommand{\\train}{\\mathcal{D}}\n", |
|
"\\newcommand{\\valid}{\\mathcal{D_{\\mathrm{valid}}}}\n", |
|
"\\newcommand{\\test}{\\mathcal{D_{\\mathrm{test}}}}\n", |
|
"\n", |
|
"\\def\\eps{{\\epsilon}}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\def\\reta{{\\textnormal{$\\eta$}}}\n", |
|
"\\def\\ra{{\\textnormal{a}}}\n", |
|
"\\def\\rb{{\\textnormal{b}}}\n", |
|
"\\def\\rc{{\\textnormal{c}}}\n", |
|
"\\def\\rd{{\\textnormal{d}}}\n", |
|
"\\def\\re{{\\textnormal{e}}}\n", |
|
"\\def\\rf{{\\textnormal{f}}}\n", |
|
"\\def\\rg{{\\textnormal{g}}}\n", |
|
"\\def\\rh{{\\textnormal{h}}}\n", |
|
"\\def\\ri{{\\textnormal{i}}}\n", |
|
"\\def\\rj{{\\textnormal{j}}}\n", |
|
"\\def\\rk{{\\textnormal{k}}}\n", |
|
"\\def\\rl{{\\textnormal{l}}}\n", |
|
"\n", |
|
"\\def\\rn{{\\textnormal{n}}}\n", |
|
"\\def\\ro{{\\textnormal{o}}}\n", |
|
"\\def\\rp{{\\textnormal{p}}}\n", |
|
"\\def\\rq{{\\textnormal{q}}}\n", |
|
"\\def\\rr{{\\textnormal{r}}}\n", |
|
"\\def\\rs{{\\textnormal{s}}}\n", |
|
"\\def\\rt{{\\textnormal{t}}}\n", |
|
"\\def\\ru{{\\textnormal{u}}}\n", |
|
"\\def\\rv{{\\textnormal{v}}}\n", |
|
"\\def\\rw{{\\textnormal{w}}}\n", |
|
"\\def\\rx{{\\textnormal{x}}}\n", |
|
"\\def\\ry{{\\textnormal{y}}}\n", |
|
"\\def\\rz{{\\textnormal{z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\rvepsilon{{\\mathbf{\\epsilon}}}\n", |
|
"\\def\\rvtheta{{\\mathbf{\\theta}}}\n", |
|
"\\def\\rva{{\\mathbf{a}}}\n", |
|
"\\def\\rvb{{\\mathbf{b}}}\n", |
|
"\\def\\rvc{{\\mathbf{c}}}\n", |
|
"\\def\\rvd{{\\mathbf{d}}}\n", |
|
"\\def\\rve{{\\mathbf{e}}}\n", |
|
"\\def\\rvf{{\\mathbf{f}}}\n", |
|
"\\def\\rvg{{\\mathbf{g}}}\n", |
|
"\\def\\rvh{{\\mathbf{h}}}\n", |
|
"\\def\\rvu{{\\mathbf{i}}}\n", |
|
"\\def\\rvj{{\\mathbf{j}}}\n", |
|
"\\def\\rvk{{\\mathbf{k}}}\n", |
|
"\\def\\rvl{{\\mathbf{l}}}\n", |
|
"\\def\\rvm{{\\mathbf{m}}}\n", |
|
"\\def\\rvn{{\\mathbf{n}}}\n", |
|
"\\def\\rvo{{\\mathbf{o}}}\n", |
|
"\\def\\rvp{{\\mathbf{p}}}\n", |
|
"\\def\\rvq{{\\mathbf{q}}}\n", |
|
"\\def\\rvr{{\\mathbf{r}}}\n", |
|
"\\def\\rvs{{\\mathbf{s}}}\n", |
|
"\\def\\rvt{{\\mathbf{t}}}\n", |
|
"\\def\\rvu{{\\mathbf{u}}}\n", |
|
"\\def\\rvv{{\\mathbf{v}}}\n", |
|
"\\def\\rvw{{\\mathbf{w}}}\n", |
|
"\\def\\rvx{{\\mathbf{x}}}\n", |
|
"\\def\\rvy{{\\mathbf{y}}}\n", |
|
"\\def\\rvz{{\\mathbf{z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\erva{{\\textnormal{a}}}\n", |
|
"\\def\\ervb{{\\textnormal{b}}}\n", |
|
"\\def\\ervc{{\\textnormal{c}}}\n", |
|
"\\def\\ervd{{\\textnormal{d}}}\n", |
|
"\\def\\erve{{\\textnormal{e}}}\n", |
|
"\\def\\ervf{{\\textnormal{f}}}\n", |
|
"\\def\\ervg{{\\textnormal{g}}}\n", |
|
"\\def\\ervh{{\\textnormal{h}}}\n", |
|
"\\def\\ervi{{\\textnormal{i}}}\n", |
|
"\\def\\ervj{{\\textnormal{j}}}\n", |
|
"\\def\\ervk{{\\textnormal{k}}}\n", |
|
"\\def\\ervl{{\\textnormal{l}}}\n", |
|
"\\def\\ervm{{\\textnormal{m}}}\n", |
|
"\\def\\ervn{{\\textnormal{n}}}\n", |
|
"\\def\\ervo{{\\textnormal{o}}}\n", |
|
"\\def\\ervp{{\\textnormal{p}}}\n", |
|
"\\def\\ervq{{\\textnormal{q}}}\n", |
|
"\\def\\ervr{{\\textnormal{r}}}\n", |
|
"\\def\\ervs{{\\textnormal{s}}}\n", |
|
"\\def\\ervt{{\\textnormal{t}}}\n", |
|
"\\def\\ervu{{\\textnormal{u}}}\n", |
|
"\\def\\ervv{{\\textnormal{v}}}\n", |
|
"\\def\\ervw{{\\textnormal{w}}}\n", |
|
"\\def\\ervx{{\\textnormal{x}}}\n", |
|
"\\def\\ervy{{\\textnormal{y}}}\n", |
|
"\\def\\ervz{{\\textnormal{z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\rmA{{\\mathbf{A}}}\n", |
|
"\\def\\rmB{{\\mathbf{B}}}\n", |
|
"\\def\\rmC{{\\mathbf{C}}}\n", |
|
"\\def\\rmD{{\\mathbf{D}}}\n", |
|
"\\def\\rmE{{\\mathbf{E}}}\n", |
|
"\\def\\rmF{{\\mathbf{F}}}\n", |
|
"\\def\\rmG{{\\mathbf{G}}}\n", |
|
"\\def\\rmH{{\\mathbf{H}}}\n", |
|
"\\def\\rmI{{\\mathbf{I}}}\n", |
|
"\\def\\rmJ{{\\mathbf{J}}}\n", |
|
"\\def\\rmK{{\\mathbf{K}}}\n", |
|
"\\def\\rmL{{\\mathbf{L}}}\n", |
|
"\\def\\rmM{{\\mathbf{M}}}\n", |
|
"\\def\\rmN{{\\mathbf{N}}}\n", |
|
"\\def\\rmO{{\\mathbf{O}}}\n", |
|
"\\def\\rmP{{\\mathbf{P}}}\n", |
|
"\\def\\rmQ{{\\mathbf{Q}}}\n", |
|
"\\def\\rmR{{\\mathbf{R}}}\n", |
|
"\\def\\rmS{{\\mathbf{S}}}\n", |
|
"\\def\\rmT{{\\mathbf{T}}}\n", |
|
"\\def\\rmU{{\\mathbf{U}}}\n", |
|
"\\def\\rmV{{\\mathbf{V}}}\n", |
|
"\\def\\rmW{{\\mathbf{W}}}\n", |
|
"\\def\\rmX{{\\mathbf{X}}}\n", |
|
"\\def\\rmY{{\\mathbf{Y}}}\n", |
|
"\\def\\rmZ{{\\mathbf{Z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\ermA{{\\textnormal{A}}}\n", |
|
"\\def\\ermB{{\\textnormal{B}}}\n", |
|
"\\def\\ermC{{\\textnormal{C}}}\n", |
|
"\\def\\ermD{{\\textnormal{D}}}\n", |
|
"\\def\\ermE{{\\textnormal{E}}}\n", |
|
"\\def\\ermF{{\\textnormal{F}}}\n", |
|
"\\def\\ermG{{\\textnormal{G}}}\n", |
|
"\\def\\ermH{{\\textnormal{H}}}\n", |
|
"\\def\\ermI{{\\textnormal{I}}}\n", |
|
"\\def\\ermJ{{\\textnormal{J}}}\n", |
|
"\\def\\ermK{{\\textnormal{K}}}\n", |
|
"\\def\\ermL{{\\textnormal{L}}}\n", |
|
"\\def\\ermM{{\\textnormal{M}}}\n", |
|
"\\def\\ermN{{\\textnormal{N}}}\n", |
|
"\\def\\ermO{{\\textnormal{O}}}\n", |
|
"\\def\\ermP{{\\textnormal{P}}}\n", |
|
"\\def\\ermQ{{\\textnormal{Q}}}\n", |
|
"\\def\\ermR{{\\textnormal{R}}}\n", |
|
"\\def\\ermS{{\\textnormal{S}}}\n", |
|
"\\def\\ermT{{\\textnormal{T}}}\n", |
|
"\\def\\ermU{{\\textnormal{U}}}\n", |
|
"\\def\\ermV{{\\textnormal{V}}}\n", |
|
"\\def\\ermW{{\\textnormal{W}}}\n", |
|
"\\def\\ermX{{\\textnormal{X}}}\n", |
|
"\\def\\ermY{{\\textnormal{Y}}}\n", |
|
"\\def\\ermZ{{\\textnormal{Z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\vzero{{\\bm{0}}}\n", |
|
"\\def\\vone{{\\bm{1}}}\n", |
|
"\\def\\vmu{{\\bm{\\mu}}}\n", |
|
"\\def\\vtheta{{\\bm{\\theta}}}\n", |
|
"\\def\\va{{\\bm{a}}}\n", |
|
"\\def\\vb{{\\bm{b}}}\n", |
|
"\\def\\vc{{\\bm{c}}}\n", |
|
"\\def\\vd{{\\bm{d}}}\n", |
|
"\\def\\ve{{\\bm{e}}}\n", |
|
"\\def\\vf{{\\bm{f}}}\n", |
|
"\\def\\vg{{\\bm{g}}}\n", |
|
"\\def\\vh{{\\bm{h}}}\n", |
|
"\\def\\vi{{\\bm{i}}}\n", |
|
"\\def\\vj{{\\bm{j}}}\n", |
|
"\\def\\vk{{\\bm{k}}}\n", |
|
"\\def\\vl{{\\bm{l}}}\n", |
|
"\\def\\vm{{\\bm{m}}}\n", |
|
"\\def\\vn{{\\bm{n}}}\n", |
|
"\\def\\vo{{\\bm{o}}}\n", |
|
"\\def\\vp{{\\bm{p}}}\n", |
|
"\\def\\vq{{\\bm{q}}}\n", |
|
"\\def\\vr{{\\bm{r}}}\n", |
|
"\\def\\vs{{\\bm{s}}}\n", |
|
"\\def\\vt{{\\bm{t}}}\n", |
|
"\\def\\vu{{\\bm{u}}}\n", |
|
"\\def\\vv{{\\bm{v}}}\n", |
|
"\\def\\vw{{\\bm{w}}}\n", |
|
"\\def\\vx{{\\bm{x}}}\n", |
|
"\\def\\vy{{\\bm{y}}}\n", |
|
"\\def\\vz{{\\bm{z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\evalpha{{\\alpha}}\n", |
|
"\\def\\evbeta{{\\beta}}\n", |
|
"\\def\\evepsilon{{\\epsilon}}\n", |
|
"\\def\\evlambda{{\\lambda}}\n", |
|
"\\def\\evomega{{\\omega}}\n", |
|
"\\def\\evmu{{\\mu}}\n", |
|
"\\def\\evpsi{{\\psi}}\n", |
|
"\\def\\evsigma{{\\sigma}}\n", |
|
"\\def\\evtheta{{\\theta}}\n", |
|
"\\def\\eva{{a}}\n", |
|
"\\def\\evb{{b}}\n", |
|
"\\def\\evc{{c}}\n", |
|
"\\def\\evd{{d}}\n", |
|
"\\def\\eve{{e}}\n", |
|
"\\def\\evf{{f}}\n", |
|
"\\def\\evg{{g}}\n", |
|
"\\def\\evh{{h}}\n", |
|
"\\def\\evi{{i}}\n", |
|
"\\def\\evj{{j}}\n", |
|
"\\def\\evk{{k}}\n", |
|
"\\def\\evl{{l}}\n", |
|
"\\def\\evm{{m}}\n", |
|
"\\def\\evn{{n}}\n", |
|
"\\def\\evo{{o}}\n", |
|
"\\def\\evp{{p}}\n", |
|
"\\def\\evq{{q}}\n", |
|
"\\def\\evr{{r}}\n", |
|
"\\def\\evs{{s}}\n", |
|
"\\def\\evt{{t}}\n", |
|
"\\def\\evu{{u}}\n", |
|
"\\def\\evv{{v}}\n", |
|
"\\def\\evw{{w}}\n", |
|
"\\def\\evx{{x}}\n", |
|
"\\def\\evy{{y}}\n", |
|
"\\def\\evz{{z}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\mA{{\\bm{A}}}\n", |
|
"\\def\\mB{{\\bm{B}}}\n", |
|
"\\def\\mC{{\\bm{C}}}\n", |
|
"\\def\\mD{{\\bm{D}}}\n", |
|
"\\def\\mE{{\\bm{E}}}\n", |
|
"\\def\\mF{{\\bm{F}}}\n", |
|
"\\def\\mG{{\\bm{G}}}\n", |
|
"\\def\\mH{{\\bm{H}}}\n", |
|
"\\def\\mI{{\\bm{I}}}\n", |
|
"\\def\\mJ{{\\bm{J}}}\n", |
|
"\\def\\mK{{\\bm{K}}}\n", |
|
"\\def\\mL{{\\bm{L}}}\n", |
|
"\\def\\mM{{\\bm{M}}}\n", |
|
"\\def\\mN{{\\bm{N}}}\n", |
|
"\\def\\mO{{\\bm{O}}}\n", |
|
"\\def\\mP{{\\bm{P}}}\n", |
|
"\\def\\mQ{{\\bm{Q}}}\n", |
|
"\\def\\mR{{\\bm{R}}}\n", |
|
"\\def\\mS{{\\bm{S}}}\n", |
|
"\\def\\mT{{\\bm{T}}}\n", |
|
"\\def\\mU{{\\bm{U}}}\n", |
|
"\\def\\mV{{\\bm{V}}}\n", |
|
"\\def\\mW{{\\bm{W}}}\n", |
|
"\\def\\mX{{\\bm{X}}}\n", |
|
"\\def\\mY{{\\bm{Y}}}\n", |
|
"\\def\\mZ{{\\bm{Z}}}\n", |
|
"\\def\\mBeta{{\\bm{\\beta}}}\n", |
|
"\\def\\mPhi{{\\bm{\\Phi}}}\n", |
|
"\\def\\mLambda{{\\bm{\\Lambda}}}\n", |
|
"\\def\\mSigma{{\\bm{\\Sigma}}}\n", |
|
"\n", |
|
"\n", |
|
"\\DeclareMathAlphabet{\\mathsfit}{\\encodingdefault}{\\sfdefault}{m}{sl}\n", |
|
"\\SetMathAlphabet{\\mathsfit}{bold}{\\encodingdefault}{\\sfdefault}{bx}{n}\n", |
|
"\\newcommand{\\tens}[1]{\\bm{\\mathsfit{#1}}}\n", |
|
"\\def\\tA{{\\tens{A}}}\n", |
|
"\\def\\tB{{\\tens{B}}}\n", |
|
"\\def\\tC{{\\tens{C}}}\n", |
|
"\\def\\tD{{\\tens{D}}}\n", |
|
"\\def\\tE{{\\tens{E}}}\n", |
|
"\\def\\tF{{\\tens{F}}}\n", |
|
"\\def\\tG{{\\tens{G}}}\n", |
|
"\\def\\tH{{\\tens{H}}}\n", |
|
"\\def\\tI{{\\tens{I}}}\n", |
|
"\\def\\tJ{{\\tens{J}}}\n", |
|
"\\def\\tK{{\\tens{K}}}\n", |
|
"\\def\\tL{{\\tens{L}}}\n", |
|
"\\def\\tM{{\\tens{M}}}\n", |
|
"\\def\\tN{{\\tens{N}}}\n", |
|
"\\def\\tO{{\\tens{O}}}\n", |
|
"\\def\\tP{{\\tens{P}}}\n", |
|
"\\def\\tQ{{\\tens{Q}}}\n", |
|
"\\def\\tR{{\\tens{R}}}\n", |
|
"\\def\\tS{{\\tens{S}}}\n", |
|
"\\def\\tT{{\\tens{T}}}\n", |
|
"\\def\\tU{{\\tens{U}}}\n", |
|
"\\def\\tV{{\\tens{V}}}\n", |
|
"\\def\\tW{{\\tens{W}}}\n", |
|
"\\def\\tX{{\\tens{X}}}\n", |
|
"\\def\\tY{{\\tens{Y}}}\n", |
|
"\\def\\tZ{{\\tens{Z}}}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\def\\gA{{\\mathcal{A}}}\n", |
|
"\\def\\gB{{\\mathcal{B}}}\n", |
|
"\\def\\gC{{\\mathcal{C}}}\n", |
|
"\\def\\gD{{\\mathcal{D}}}\n", |
|
"\\def\\gE{{\\mathcal{E}}}\n", |
|
"\\def\\gF{{\\mathcal{F}}}\n", |
|
"\\def\\gG{{\\mathcal{G}}}\n", |
|
"\\def\\gH{{\\mathcal{H}}}\n", |
|
"\\def\\gI{{\\mathcal{I}}}\n", |
|
"\\def\\gJ{{\\mathcal{J}}}\n", |
|
"\\def\\gK{{\\mathcal{K}}}\n", |
|
"\\def\\gL{{\\mathcal{L}}}\n", |
|
"\\def\\gM{{\\mathcal{M}}}\n", |
|
"\\def\\gN{{\\mathcal{N}}}\n", |
|
"\\def\\gO{{\\mathcal{O}}}\n", |
|
"\\def\\gP{{\\mathcal{P}}}\n", |
|
"\\def\\gQ{{\\mathcal{Q}}}\n", |
|
"\\def\\gR{{\\mathcal{R}}}\n", |
|
"\\def\\gS{{\\mathcal{S}}}\n", |
|
"\\def\\gT{{\\mathcal{T}}}\n", |
|
"\\def\\gU{{\\mathcal{U}}}\n", |
|
"\\def\\gV{{\\mathcal{V}}}\n", |
|
"\\def\\gW{{\\mathcal{W}}}\n", |
|
"\\def\\gX{{\\mathcal{X}}}\n", |
|
"\\def\\gY{{\\mathcal{Y}}}\n", |
|
"\\def\\gZ{{\\mathcal{Z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\sA{{\\mathbb{A}}}\n", |
|
"\\def\\sB{{\\mathbb{B}}}\n", |
|
"\\def\\sC{{\\mathbb{C}}}\n", |
|
"\\def\\sD{{\\mathbb{D}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\sF{{\\mathbb{F}}}\n", |
|
"\\def\\sG{{\\mathbb{G}}}\n", |
|
"\\def\\sH{{\\mathbb{H}}}\n", |
|
"\\def\\sI{{\\mathbb{I}}}\n", |
|
"\\def\\sJ{{\\mathbb{J}}}\n", |
|
"\\def\\sK{{\\mathbb{K}}}\n", |
|
"\\def\\sL{{\\mathbb{L}}}\n", |
|
"\\def\\sM{{\\mathbb{M}}}\n", |
|
"\\def\\sN{{\\mathbb{N}}}\n", |
|
"\\def\\sO{{\\mathbb{O}}}\n", |
|
"\\def\\sP{{\\mathbb{P}}}\n", |
|
"\\def\\sQ{{\\mathbb{Q}}}\n", |
|
"\\def\\sR{{\\mathbb{R}}}\n", |
|
"\\def\\sS{{\\mathbb{S}}}\n", |
|
"\\def\\sT{{\\mathbb{T}}}\n", |
|
"\\def\\sU{{\\mathbb{U}}}\n", |
|
"\\def\\sV{{\\mathbb{V}}}\n", |
|
"\\def\\sW{{\\mathbb{W}}}\n", |
|
"\\def\\sX{{\\mathbb{X}}}\n", |
|
"\\def\\sY{{\\mathbb{Y}}}\n", |
|
"\\def\\sZ{{\\mathbb{Z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\emLambda{{\\Lambda}}\n", |
|
"\\def\\emA{{A}}\n", |
|
"\\def\\emB{{B}}\n", |
|
"\\def\\emC{{C}}\n", |
|
"\\def\\emD{{D}}\n", |
|
"\\def\\emE{{E}}\n", |
|
"\\def\\emF{{F}}\n", |
|
"\\def\\emG{{G}}\n", |
|
"\\def\\emH{{H}}\n", |
|
"\\def\\emI{{I}}\n", |
|
"\\def\\emJ{{J}}\n", |
|
"\\def\\emK{{K}}\n", |
|
"\\def\\emL{{L}}\n", |
|
"\\def\\emM{{M}}\n", |
|
"\\def\\emN{{N}}\n", |
|
"\\def\\emO{{O}}\n", |
|
"\\def\\emP{{P}}\n", |
|
"\\def\\emQ{{Q}}\n", |
|
"\\def\\emR{{R}}\n", |
|
"\\def\\emS{{S}}\n", |
|
"\\def\\emT{{T}}\n", |
|
"\\def\\emU{{U}}\n", |
|
"\\def\\emV{{V}}\n", |
|
"\\def\\emW{{W}}\n", |
|
"\\def\\emX{{X}}\n", |
|
"\\def\\emY{{Y}}\n", |
|
"\\def\\emZ{{Z}}\n", |
|
"\\def\\emSigma{{\\Sigma}}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\etens}[1]{\\mathsfit{#1}}\n", |
|
"\\def\\etLambda{{\\etens{\\Lambda}}}\n", |
|
"\\def\\etA{{\\etens{A}}}\n", |
|
"\\def\\etB{{\\etens{B}}}\n", |
|
"\\def\\etC{{\\etens{C}}}\n", |
|
"\\def\\etD{{\\etens{D}}}\n", |
|
"\\def\\etE{{\\etens{E}}}\n", |
|
"\\def\\etF{{\\etens{F}}}\n", |
|
"\\def\\etG{{\\etens{G}}}\n", |
|
"\\def\\etH{{\\etens{H}}}\n", |
|
"\\def\\etI{{\\etens{I}}}\n", |
|
"\\def\\etJ{{\\etens{J}}}\n", |
|
"\\def\\etK{{\\etens{K}}}\n", |
|
"\\def\\etL{{\\etens{L}}}\n", |
|
"\\def\\etM{{\\etens{M}}}\n", |
|
"\\def\\etN{{\\etens{N}}}\n", |
|
"\\def\\etO{{\\etens{O}}}\n", |
|
"\\def\\etP{{\\etens{P}}}\n", |
|
"\\def\\etQ{{\\etens{Q}}}\n", |
|
"\\def\\etR{{\\etens{R}}}\n", |
|
"\\def\\etS{{\\etens{S}}}\n", |
|
"\\def\\etT{{\\etens{T}}}\n", |
|
"\\def\\etU{{\\etens{U}}}\n", |
|
"\\def\\etV{{\\etens{V}}}\n", |
|
"\\def\\etW{{\\etens{W}}}\n", |
|
"\\def\\etX{{\\etens{X}}}\n", |
|
"\\def\\etY{{\\etens{Y}}}\n", |
|
"\\def\\etZ{{\\etens{Z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\pdata}{p_{\\rm{data}}}\n", |
|
"\n", |
|
"\\newcommand{\\ptrain}{\\hat{p}_{\\rm{data}}}\n", |
|
"\\newcommand{\\Ptrain}{\\hat{P}_{\\rm{data}}}\n", |
|
"\n", |
|
"\\newcommand{\\pmodel}{p_{\\rm{model}}}\n", |
|
"\\newcommand{\\Pmodel}{P_{\\rm{model}}}\n", |
|
"\\newcommand{\\ptildemodel}{\\tilde{p}_{\\rm{model}}}\n", |
|
"\n", |
|
"\\newcommand{\\pencode}{p_{\\rm{encoder}}}\n", |
|
"\\newcommand{\\pdecode}{p_{\\rm{decoder}}}\n", |
|
"\\newcommand{\\precons}{p_{\\rm{reconstruct}}}\n", |
|
"\n", |
|
"\\newcommand{\\laplace}{\\mathrm{Laplace}} \n", |
|
"\n", |
|
"\\newcommand{\\E}{\\mathbb{E}}\n", |
|
"\\newcommand{\\Ls}{\\mathcal{L}}\n", |
|
"\\newcommand{\\R}{\\mathbb{R}}\n", |
|
"\\newcommand{\\emp}{\\tilde{p}}\n", |
|
"\\newcommand{\\lr}{\\alpha}\n", |
|
"\\newcommand{\\reg}{\\lambda}\n", |
|
"\\newcommand{\\rect}{\\mathrm{rectifier}}\n", |
|
"\\newcommand{\\softmax}{\\mathrm{softmax}}\n", |
|
"\\newcommand{\\sigmoid}{\\sigma}\n", |
|
"\\newcommand{\\softplus}{\\zeta}\n", |
|
"\\newcommand{\\KL}{D_{\\mathrm{KL}}}\n", |
|
"\\newcommand{\\Var}{\\mathrm{Var}}\n", |
|
"\\newcommand{\\standarderror}{\\mathrm{SE}}\n", |
|
"\\newcommand{\\Cov}{\\mathrm{Cov}}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\normlzero}{L^0}\n", |
|
"\\newcommand{\\normlone}{L^1}\n", |
|
"\\newcommand{\\normltwo}{L^2}\n", |
|
"\\newcommand{\\normlp}{L^p}\n", |
|
"\\newcommand{\\normmax}{L^\\infty}\n", |
|
"\n", |
|
"\\newcommand{\\parents}{Pa} \n", |
|
"\n", |
|
"\\DeclareMathOperator*{\\argmax}{arg\\,max}\n", |
|
"\\DeclareMathOperator*{\\argmin}{arg\\,min}\n", |
|
"\n", |
|
"\\DeclareMathOperator{\\sign}{sign}\n", |
|
"\\DeclareMathOperator{\\Tr}{Tr}\n", |
|
"\\let\\ab\\allowbreak\n", |
|
"\n", |
|
"\\usepackage{hyperref}\n", |
|
"\\usepackage{paralist}\n", |
|
"\\usepackage{amsmath}\n", |
|
"\\usepackage{amssymb}\n", |
|
"\\usepackage{wrapfig}\n", |
|
"\\usepackage{multirow}\n", |
|
"\\usepackage{multicol}\n", |
|
"\\usepackage{multirow}\n", |
|
"\n", |
|
"\\usepackage{dsfont}\n", |
|
"\\usepackage{tabularx}\n", |
|
"\\usepackage{graphicx}\n", |
|
"\\usepackage{xcolor}\n", |
|
"\\usepackage{soul}\n", |
|
"\\usepackage{xspace}\n", |
|
"\\usepackage{booktabs}\n", |
|
"\\usepackage{caption}\n", |
|
"\\usepackage{enumitem}\n", |
|
"\\usepackage{algorithm}\n", |
|
"\\usepackage[noend]{algorithmic}\n", |
|
"\\usepackage{pythonhighlight}\n", |
|
"\\usepackage{pifont}\n", |
|
"\n", |
|
"\\usepackage[most]{tcolorbox}\n", |
|
"\\usepackage[framemethod=tikz]{mdframed}\n", |
|
"\\definecolor{qualcolor}{RGB}{128,64,0}\n", |
|
"\\gdef\\Sepline{\n", |
|
" \\par\\noindent\\makebox[\\linewidth][l]{\n", |
|
" \\hspace*{-\\mdflength{innerleftmargin}}\n", |
|
" \\tikz\\draw[thick,dashed,gray!60] (0,0) --\n", |
|
" (\\textwidth+\\the\\mdflength{innerleftmargin}+\\the\\mdflength{innerrightmargin},0);\n", |
|
" }\\par\\nobreak}\n", |
|
" \n", |
|
"\\newcommand{\\cmark}{\\ding{51}}\n", |
|
"\\newcommand{\\xmark}{\\ding{55}}\n", |
|
"\\newcommand{\\sbf}{\\ensuremath{\\mathbf{s}}}\n", |
|
"\\newcommand{\\txb}{\\ensuremath{\\tilde{\\mathbf{x}}}}\n", |
|
"\\newcommand{\\tyb}{\\ensuremath{\\tilde{\\mathbf{y}}}}\n", |
|
"\\newcommand{\\ty}{\\ensuremath{\\tilde{y}}}\n", |
|
"\\newcommand{\\xb}{\\ensuremath{\\mathbf{x}}}\n", |
|
"\\newcommand{\\eb}{\\ensuremath{\\mathbf{e}}}\n", |
|
"\\newcommand{\\yb}{\\ensuremath{\\mathbf{y}}}\n", |
|
"\\newcommand{\\Tmc}{\\ensuremath{\\mathcal{T}}}\n", |
|
"\\newcommand{\\Pmc}{\\ensuremath{\\mathcal{P}}}\n", |
|
"\\newcommand{\\Omc}{\\ensuremath{\\mathcal{O}}}\n", |
|
"\\newcommand{\\Vmc}{\\ensuremath{\\mathcal{V}}}\n", |
|
"\\newcommand{\\Emc}{\\ensuremath{\\mathcal{E}}}\n", |
|
"\\newcommand{\\Yhatmc}{\\ensuremath{\\hat{\\mathcal{Y}}}}\n", |
|
"\\newcommand{\\vocab}{\\ensuremath{\\mathcal{V}}}\n", |
|
"\\DeclareMathOperator*{\\argtopK}{arg\\,\\mathrm{top}\\text{-}\\mathrm{K}}\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\corr}{\\xspace}\n", |
|
"\\usepackage[normalem]{ulem}\n", |
|
"\\setlength{\\marginparwidth}{2cm}\n", |
|
"\\usepackage[colorinlistoftodos]{todonotes}\n", |
|
"\n", |
|
"\\definecolor{green(pigment)}{rgb}{0.0, 0.65, 0.31}\n", |
|
"\\usepackage{array}\n", |
|
"\\usepackage{makecell}\n", |
|
"\\renewcommand\\theadfont{}\n", |
|
"\\usepackage{framed}\n", |
|
"\\usepackage{hhline}\n", |
|
"\n", |
|
"\\usepackage{courierten}\n", |
|
"\\usepackage[T1]{fontenc} \n", |
|
"\\newcommand\\modelfont[1]{{\\usefont{T1}{courierten}{m}{n}#1}}\n", |
|
"\\newcommand\\myfontsize{\\fontsize{8.3pt}{10.3pt}\\selectfont}\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\methodnamelong}{\\modelfont{Self-Correction for Sequence Generation}\\xspace}\n", |
|
"\\newcommand{\\methodnamewithacronymhighlighted}{\\modelfont{\\ul{Pre}diction-\\ul{C}orrect\\ul{i}on for \\ul{Se}quence Generation}\\xspace}\n", |
|
"\\newcommand{\\methodnameshort}{\\modelfont{Self-Correction}\\xspace}\n", |
|
"\\newcommand{\\methodnamenospace}{\\modelfont{Self-Correction}\\xspace}\n", |
|
"\\newcommand{\\method}{\\textsc{Self-Correct}\\xspace}\n", |
|
"\\newcommand{\\myparagraph}[1]{\\par\\noindent\\textbf{{#1}}} \n", |
|
"\\newcommand{\\myparagraphsmall}[1]{\\par\\noindent\\textit{{#1}}}\n", |
|
"\n", |
|
"\\title{Generating Sequences by \\\\Learning to [Self-]Correct}\n", |
|
"\n", |
|
"\n", |
|
"\\author{Sean Welleck\\textsuperscript{1,3,*} \\hspace{1pt}\n", |
|
"Ximing Lu\\textsuperscript{1,*}\n", |
|
"\\AND Peter West\\textsuperscript{3,$\\dagger$} \\hspace{1pt} Faeze Brahman\\textsuperscript{1,$\\dagger$}\n", |
|
"\\AND Tianxiao Shen\\textsuperscript{3} \\hspace{1pt} Daniel Khashabi\\textsuperscript{2} \\hspace{1pt} Yejin Choi\\textsuperscript{1,3} \\\\\n", |
|
"\\textsuperscript{1}Allen Institute for Artificial Intelligence\\\\\n", |
|
" \\textsuperscript{2}Center for Language and Speech Processing, Johns Hopkins University \\\\\n", |
|
" \\textsuperscript{3}Paul G. Allen School of Computer Science \\& Engineering, University of Washington \\\\\n", |
|
"}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\fix}{\\marginpar{FIX}}\n", |
|
"\\newcommand{\\new}{\\marginpar{NEW}}\n", |
|
"\n", |
|
"\\iclrfinalcopy \n", |
|
"\\begin{document}\n", |
|
"\n", |
|
"\n", |
|
"\\maketitle\n", |
|
"\\renewcommand*\\thefootnote{\\textbf{$*$}}\\footnotetext{First authors, contributed equally. \\textbf{$\\dagger$}Second authors, contributed equally.}\n", |
|
"\n", |
|
"\\renewcommand*{\\thefootnote}{\\arabic{footnote}}\n", |
|
"\\setcounter{footnote}{0}\n", |
|
"\\begin{abstract}\n", |
|
"Sequence generation applications require satisfying semantic constraints, such as ensuring that programs are correct,\n", |
|
"using certain keywords, \n", |
|
"or avoiding undesirable content. \n", |
|
"Language models, whether fine-tuned or prompted with few-shot demonstrations, frequently violate these constraints, and lack a mechanism to iteratively revise their outputs.\n", |
|
"Moreover, some powerful language models are of extreme scale or inaccessible, making it inefficient, if not infeasible, to update their parameters for task-specific adaptation. \n", |
|
"We present \\textsc{self-correction}, \n", |
|
"an approach that decouples an imperfect base generator (an off-the-shelf language model or supervised sequence-to-sequence model) from a separate corrector that learns to iteratively correct imperfect generations.\n", |
|
"To train the corrector, we propose an online training procedure that can use either scalar or natural language feedback on intermediate imperfect generations. \n", |
|
"We show that \\textsc{self-correction} improves upon the base generator in three diverse generation tasks-- mathematical program synthesis, lexically-constrained generation, and toxicity control-- even\n", |
|
"when the corrector is much smaller than the base generator. \n", |
|
"\n", |
|
"\n", |
|
"\\end{abstract}\n", |
|
"\n", |
|
"\\section{Introduction}\n", |
|
"The standard practice for natural language generation tasks is inherently single-pass: applying a decoding procedure \n", |
|
"to either a few-shot prompted language model or one tuned for a given task, then considering the generation as ``finished''~(e.g. \\citet{radford2019language,brown2020,chen2021codex}).\n", |
|
"Powerful generation models\n", |
|
"often meet most of the task requirements, yet miss a few\n", |
|
"(e.g., omitting a subset of keywords), \n", |
|
"or generate incorrect hypotheses that nevertheless provide useful structure \n", |
|
"(e.g., a correct problem solving strategy with a missing step). \n", |
|
"However, after generating even a slightly sub-optimal sequence, the single-pass paradigm requires models to ``start from scratch'', \n", |
|
"effectively discarding work already done. \n", |
|
"A more natural, intuitive approach\n", |
|
"is leveraging the generation as a useful starting point\n", |
|
"to refine \n", |
|
"into a higher quality output.\n", |
|
"\n", |
|
"To formalize this intuition, we introduce \\methodnamelong. \n", |
|
"\\autoref{fig:teaser} demonstrates its central principle: a generation model is re-framed as a base\n", |
|
" \\emph{generator}, which produces a reasonable initial hypothesis but does not need to solve the task in one pass, and a second module--the \\emph{corrector}--trained to \n", |
|
"make up the difference between the hypothesis and an optimal solution. Neither the generator nor the corrector must solve the full task in one pass, and the corrector can be applied multiple times\n", |
|
"to iteratively improve the output (\\S\\ref{subsec:ablation}). We propose a simple, general procedure for training the corrector (\\autoref{fig:learning}) by pairing generator outputs with carefully selected targets. \n", |
|
"The result is a system which self-corrects, producing outputs through multiple generation passes and breaking the task into steps that can be solved by dedicated and efficient sub-systems.\n", |
|
"\n", |
|
"\\begin{figure}[t]\n", |
|
" \\centering\n", |
|
" \\includegraphics[width=0.99\\textwidth]{images/summary_figure}\n", |
|
" \\caption{\\textsc{Self-corrector}s decompose generation into a base generator that proposes an initial hypothesis, and a corrector that iteratively improves its quality.}\n", |
|
" \\label{fig:teaser}\n", |
|
"\\end{figure}\n", |
|
"\n", |
|
"We find that \\methodnameshort is broadly applicable. Training a corrector model improves the base generator on 3 diverse tasks: mathematical program synthesis (\\S\\ref{ssec:math}), lexically constrained generation (\\S\\ref{ssec:constrained}), and toxicity reduction (\\S\\ref{ssec:toxicity}). The trained corrector model can even be applied to a larger generator with similar performance to training a new corrector (\\S\\ref{sec:modularity}), showing that the sub-task of correction is transferable, even to stronger generators. Finally, we explore the prospect of introducing a third module to the \\methodnameshort system (\\S\\ref{sec:feedback})--explicitly using natural language feedback to guide corrections--with promising results. \\methodnameshort offers an exciting opportunity to build on existing generation models and the sequences they generate, with efficient, effective, and transferable corrector networks.\n", |
|
"\n", |
|
"\\section{Self-correcting sequence generators}\n", |
|
"\\label{sec:method}\n", |
|
"\n", |
|
"A typical autoregressive text generator (e.g. GPT-3~\\citep{brown2020}) maps an input prompt to a distribution over outputs using a single parameterized module (e.g. a large transformer), $p_0(y|x)$.\n", |
|
"We explore an alternative that decomposes into two modules, a base \\textit{generator}, and a \\textit{corrector},\n", |
|
"\\begin{align}\n", |
|
"\\label{eqn:model-onestep}\n", |
|
"p(y|x)=\\sum_{y_0}\\underbrace{p_0(y_0|x)}_{\\text{generator}}\\underbrace{p_\\theta(y|y_0,x)}_{\\text{corrector}}\n", |
|
"\\end{align}\n", |
|
"where the generator provides an initial hypothesis that is refined by the corrector.\n", |
|
"In practice, the corrector can be applied multiple times, $p(y_T|x)=\\sum_{y_0}\\sum_{y_1}\\cdots \\sum_{y_{T-1}}p_0(y_0|x)\\prod_t p_\\theta(y_{t+1}|y_t,x)$.\n", |
|
"Since a model of this form can both generate and correct its generations, we call it a \\modelfont{Self-Corrector}.\n", |
|
"\n", |
|
"Self-correctors have several unique properties compared to typical generators.\n", |
|
"First, a self-corrector \n", |
|
"decouples generation and correction, \n", |
|
"allowing us to \\emph{freely parameterize each module} -- \n", |
|
"for instance, by prompting a single language model or using two different language models. \n", |
|
"In this paper, we develop a framework to train a separate corrector model\n", |
|
"(\\S\\ref{ssec:learning}).\n", |
|
"We find that the resulting self-corrector improves upon the generator alone (\\S\\ref{sec:exprs}), even when the corrector is much smaller (\\S\\ref{sec:modularity}).\n", |
|
"\n", |
|
"Second, since the generator and the corrector are separated, we can keep the generator as a general-purpose language model and \\emph{train the corrector with different objectives} for different task requirements.\n", |
|
"In \\S\\ref{ssec:learning}, we propose a training algorithm for the corrector that is dedicated to improving generations, where the improvement can be in any aspect, measured by scalar values.\n", |
|
"\n", |
|
"Third, the corrector can receive \\textit{explicit feedback} about intermediate generations to guide subsequent generations.\n", |
|
"Formally, $p(y|x)=\\sum_{y_0}p_0(y_0|x)p_\\theta(y|y_0,x,f(y_0))$, where $f$ is the feedback.\n", |
|
"The feedback can be of many forms, e.g. a sentence, a compiler trace, etc. \n", |
|
"In contrast, a typical generator that generates in a single pass does not leverage feedback on its own generation.\n", |
|
"In this paper, \n", |
|
"we show that the corrector can learn to exploit explicit natural language feedback to achieve better performance~(\\S\\ref{sec:feedback}).\n", |
|
"Next, we describe our training framework of the corrector.\n", |
|
"\n", |
|
"\\subsection{Learning a Corrector}\n", |
|
"\\label{ssec:learning}\n", |
|
"Our goal is to have the generator generate an initial hypothesis, \n", |
|
"then improve the hypothesis with the corrector (Eq.~\\ref{eqn:model-onestep}).\n", |
|
"We train the corrector to improve the quality of a hypothesis, while staying as close as possible to the original hypothesis. \n", |
|
"Here, quality is measured with a scalar value function $v(y)$ which we assume is accessible at training time (e.g. a classifier).\n", |
|
"\n", |
|
"Since direct supervision on how to improve hypotheses is not available, we design a new algorithm to train the corrector, which we refer to as self-corrective learning.\n", |
|
"The algorithm collects a pool of generations, \n", |
|
"groups them and \n", |
|
" selects pairs of generation \n", |
|
"that increase in value and are nearby, then updates the corrector on these pairs.\n", |
|
"As training progresses, more \n", |
|
"generations are added to the pool using the current corrector.\n", |
|
"Algorithm~\\ref{alg:main} summarizes self-corrective learning, detailed below.\n", |
|
"\n", |
|
"\\begin{figure}[t]\n", |
|
" \\centering\n", |
|
"\n", |
|
" \\includegraphics[width=0.99\\textwidth]{images/learning_figure_v2.png}\n", |
|
" \n", |
|
" \\caption{\\textsc{Self-corrective learning} iteratively trains a corrector by generating hypotheses and corrections, forming value-improving pairs, and selecting those with high similarity for learning.\n", |
|
" }\n", |
|
" \\label{fig:learning}\n", |
|
"\\end{figure}\n", |
|
"\n", |
|
"\\myparagraph{Initialization.}\n", |
|
"Self-corrective learning begins with a generator $p_0(y_0|x)$, a corrector \n", |
|
"$p_\\theta(y'|y,x)$\n", |
|
", a set of training prompts $X$, and a value function \n", |
|
"$v:\\mathcal Y \\rightarrow\\mathbb R$.\n", |
|
"Optionally,\n", |
|
"we can use additional feedback \n", |
|
"$f: \\mathcal Y \\rightarrow \\mathcal F$ and learn $p_\\theta(y'|y,x,f(y))$,\n", |
|
"where $\\mathcal F$ \n", |
|
"is arbitrary.\n", |
|
"\n", |
|
"The algorithm initializes a datapool of (input, output, value, feedback) examples by using the generator to generate multiple outputs for each input.\n", |
|
"Formally, \n", |
|
"\\begin{align}\n", |
|
"D_x=\\{(x,y,v(y), f(y))\\ |\\ \\text{for all } y\\in y^{1:N}\\sim q(p_0(\\cdot|x))\\},\\quad D=\\bigcup_{x\\in X} D_x,\n", |
|
"\\label{eqn:d0}\n", |
|
"\\end{align}\n", |
|
"where $y^{1:N}$ denotes $N$ outputs generated with decoding algorithm $q$ (e.g. temperature sampling).\n", |
|
"When available, $(x, y, v(y), f(y))$ examples from another source (e.g. a dataset) can also be added.\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\algcomment}[1]{{\\footnotesize \\fontfamily{cmtt}\\selectfont // #1}}\n", |
|
"\\renewcommand{\\algorithmiccomment}[1]{\\hfill{\\(\\triangleright\\)~#1}\\par}\n", |
|
"\\begin{figure}[t]\n", |
|
"\\vspace{-1em}\n", |
|
"\\begin{algorithm}[H]\n", |
|
"\\small\n", |
|
"\\begin{algorithmic}\n", |
|
"\\INPUT{Generator $p_0$, corrector $p_\\theta$, prompts $X$, value $v(\\cdot)$, feedback $f(\\cdot)$}\n", |
|
"\\\\\n", |
|
"\\text{Initialize datapool }$D$ by sampling from $p_0$\\algorithmiccomment{Initialization: Eq.~\\ref{eqn:d0}}\n", |
|
"\n", |
|
"\\FOR{$\\text{iteration}\\in\\{1,2,\\ldots\\}$}\n", |
|
"\\FOR{$x \\in X$} \n", |
|
"\\STATE Sample hypotheses $y$ from datapool $D$ \n", |
|
"\\STATE Generate corrections $y'\\sim p_\\theta(\\cdot|y,x,f(y))$\n", |
|
"\\STATE Add all $(x,y',v(y'),f(y'))$ to the datapool $D$\n", |
|
"\\algorithmiccomment{Exploration: Eq.~\\ref{eqn:exploration}}\n", |
|
"\n", |
|
"\\ENDFOR\n", |
|
"Form value-improving pairs $P$ from $D$\\algorithmiccomment{Pairing: Eq.~\\ref{eqn:pairing}}\n", |
|
"\\FOR{step in $1,2,\\ldots,M$ }\n", |
|
"\\item Sample a batch of value-improving pairs from $P$ using Eq.~\\ref{eqn:residual-subsample}\n", |
|
"\\item Compute the loss and update $\\theta$ using gradient descent\n", |
|
"\\algorithmiccomment{Learning}\n", |
|
"\\ENDFOR\n", |
|
"\\ENDFOR\n", |
|
"\n", |
|
"\\end{algorithmic}\n", |
|
"\n", |
|
"\\caption{Self-corrective learning}\n", |
|
"\\label{alg:main}\n", |
|
"\\end{algorithm}\n", |
|
"\\vspace{-1em}\n", |
|
"\\end{figure}\n", |
|
"\n", |
|
"\\myparagraph{Pairing.}\n", |
|
"Next, self-corrective learning forms \\textit{value-improving pairs}: examples of mapping a hypothesis to a higher-valued correction.\n", |
|
"We use \n", |
|
"the datapool $D$ to form a set of (input, hypothesis, correction) pairs. \n", |
|
"A pair is formed when an output has a higher value than another\n", |
|
"\\footnote{We also store the value and feedback for $y$ and $y'$ along with $(x,y,y')$, which we omit to reduce clutter.}:\n", |
|
"\\begin{align}\n", |
|
"\\label{eqn:pairing}\n", |
|
" P_x=\\{(x,y,y')\\mid v(y)<v(y')\\text{ for all } y,y'\\in D_x\\times D_x\\},\\quad P=\\bigcup_{x\\in X} P_x,\n", |
|
"\\end{align}\n", |
|
"\n", |
|
"\\myparagraph{Learning.}\n", |
|
"Next, self-corrective learning selects (input, hypothesis, correction) pairs to update the corrector with.\n", |
|
"We sample a $(x,y,y')$ pair proportional to its improvement in value as well as the proximity between the hypothesis $y$ and the correction $y'$:\n", |
|
"\\begin{align}\n", |
|
"\\label{eqn:residual-subsample}\n", |
|
" \n", |
|
" \\mathds P[(x, y,y')]&\\propto \\exp\\big(\\underbrace{\\alpha\\cdot(v(y')-v(y))}_{\\text{improvement}}+\\underbrace{\\beta\\cdot s(y,y')}_{\\text{proximity}}\\big)/Z(y), \n", |
|
"\\end{align}\n", |
|
"\n", |
|
"where $s(y,y')$ is a similarity function and $Z(y)$\n", |
|
"normalizes over the available corrections for $y$ in $P_x$.\n", |
|
"\n", |
|
"Increasing the hyperparameter $\\alpha\\in\\mathbb{R}_{\\geq 0}$ puts more weight on targets that add more value, while\n", |
|
"increasing $\\beta\\in\\mathbb{R}_{\\geq 0}$ retains more similar targets. \n", |
|
"\n", |
|
"We update the corrector using the cross-entropy loss $\\mathcal{L}(\\theta) = -\\log p_\\theta(y'|y,x,f(y))$ on batches sampled in this way.\n", |
|
"\n", |
|
"\\myparagraph{Exploration.}\n", |
|
"During exploration, self-corrective learning adds \n", |
|
"new generations to the datapool by generating from the current corrector:\n", |
|
"\\begin{align}\n", |
|
"\\label{eqn:exploration}\n", |
|
"D'_x&=\\{(x,y',v(y'), f(y'))\\ |\\ \\text{for all } y'\\in y'^{1:N}\\sim q(p_\\theta(\\cdot|y,x,f(y))\\},\\quad D'=\\bigcup_{x\\in X} D'_x\n", |
|
"\\end{align}\n", |
|
"and updating the datapool $D\\leftarrow D\\cup D'$.\n", |
|
"The hypotheses $y$ to correct can come from any source, e.g. newly sampled from the base generator,\n", |
|
"or from the datapool; \n", |
|
"we use the latter in our experiments.\n", |
|
"\n", |
|
"\\myparagraph{Inference.}\n", |
|
"We use the trained corrector along with a generator to generate a trajectory $y_0,y_1,\\ldots,y_T$, and consider $y_T$ the final output.\n", |
|
"Since marginalizing over the intermediate generations in Eq.~\\ref{eqn:model-onestep} is intractable, we approximate each summation with a single sequence generated with a decoding algorithm $q(\\cdot)$.\n", |
|
"That is, we decode from the generator, then repeatedly from the corrector:\n", |
|
"\\begin{itemize}[leftmargin=*,topsep=0pt,itemsep=-1ex,partopsep=1ex,parsep=1ex]\n", |
|
" \\item Generation: $y_0\\sim q(p_0(y_0|x))$;\n", |
|
" \\item Correction: $y_{t+1}\\sim q(p_\\theta(y_{t+1}|y_{t},x, f(y_t)))$,\\quad $t=0,1,\\dots,T-1$.\n", |
|
"\\end{itemize}\n", |
|
"The stopping time $T$ is either fixed, or when a target value is obtained (if $v(y)$ is available).\n", |
|
"\n", |
|
"\n", |
|
"\n" |
|
], |
|
"context_after_exp": [ |
|
"\\section{Experiments}\n", |
|
"\\label{sec:exprs}\n", |
|
"We evaluate \\textsc{self-correction} on a diversity of tasks: \\textbf{mathematical program synthesis}, in which generations are strictly correct or incorrect, \n", |
|
"and generators typically have low performance; \n", |
|
"\\textbf{lexically-constrained generation}, which allows for partial credit, and generators usually give partially-correct solutions (e.g. matching 3 out of 5 constraints); and \\textbf{toxicity control}, \n", |
|
"where `correctness' is more loosely defined, \n", |
|
"and the output space is much more open-ended.\n", |
|
"\n", |
|
"Our experiments are organized to study three settings:\n", |
|
"\\begin{enumerate}[leftmargin=*,topsep=0pt,itemsep=-1ex,partopsep=1ex,parsep=1ex]\n", |
|
"\\item Using self-correctors to improve upon generators (\\S\\ref{ssec:math},\\ref{ssec:constrained},\\ref{ssec:toxicity}).\n", |
|
"\\item Correcting generators that are much larger than the corrector (\\S\\ref{sec:modularity}).\n", |
|
"\\item Leveraging explicit feedback during training and inference (\\S\\ref{sec:feedback}).\n", |
|
"\\end{enumerate}\n", |
|
"Next, we describe the self-correction setup and baselines for each task, along with their results. \\footnote{Code will be publicly available upon acceptance.}\n", |
|
"\n", |
|
"\n", |
|
"\\subsection{Mathematical Program Synthesis}\n", |
|
"\\label{ssec:math}\n", |
|
"First, we consider mathematical program synthesis~\\citep{austin2021ProgramSW,mishra2022lila}.\n", |
|
"Given a natural language problem specification $x$, the task is to generate a program $y$ that upon execution returns the correct answer to $x$.\n", |
|
"The task is challenging as it draws on language understanding, multiple-step mathematical problem solving (e.g. identifying a solution strategy, decomposing a problem), and leveraging symbolic tools (e.g. built-in operations, variables).\n", |
|
"Furthermore, the task demands a high level of precision, e.g. a single misplaced operation makes the program incorrect.\n", |
|
"\n", |
|
"\\myparagraph{Experimental setup.}\n", |
|
"As the corrector we use GPT-Neo 1.3B~\\citep{gpt-neo}, an open-source autoregressive language model.\n", |
|
"GPT-Neo is pre-trained on language and code~\\citep{pile}, and hence is widely used \n", |
|
"for code-related generation (e.g. \\citet{chen2021codex,ni2022learning,mishra2022lila}).\n", |
|
"We consider two settings for the initial generator: (1) a separate fine-tuned instance of GPT-Neo 1.3B, and (2) few-shot prompted GPT-3~\\citep{brown2020}.\n", |
|
"For GPT-3, we evaluate the davinci and text-davinci-002 engines, representative of large ($\\approx 175B$\\footnote{Estimated size of \\textit{davinci} ({\\scriptsize \\url{https://blog.eleuther.ai/gpt3-model-sizes}}). Further details not available.}) generators that are state-of-the-art in related tasks~\\citep{Wei2022ChainOT}.\n", |
|
"See the Appendix for additional details.\n", |
|
"\n", |
|
"\\myparagraph{Self-correction setup.} As the value function we use correctness, which is 1 when the program $y$ executes and outputs the ground-truth answer and 0 otherwise.\n", |
|
"Our main experiments do not use explicit feedback, i.e. $f(y)=\\emptyset$.\n", |
|
"At inference time, we study two settings for the corrector: (1) applying $k$ corrections and selecting the final generation, (2) an oracle setting that only corrects a draft if the draft is incorrect.\n", |
|
"We use greedy decoding for the generator and corrector, and $k=1$.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\begin{table}[t]\n", |
|
"\\begin{minipage}[t]{0.45\\linewidth}\n", |
|
"\\centering\\footnotesize\n", |
|
"\\renewcommand\\arraystretch{1.14}\n", |
|
" \\begin{tabular}{clcc}\n", |
|
" \\toprule\n", |
|
" \\textbf{Dataset} & \\textbf{Model} & \\textbf{Correct}\\\\\n", |
|
" \\midrule\n", |
|
" \\textbf{Multiarith} & GPT-NEO 1.3B & 60.00\\\\\n", |
|
" & +\\textsc{Self-Correct} & \\textbf{98.33}\\\\\n", |
|
" & +$\\textsc{Self-Correct}_*$ & \\textbf{99.17}\\\\\n", |
|
" \\midrule\n", |
|
" \\textbf{Multitask} \n", |
|
" & GPT-NEO 1.3B & 49.02\\\\\n", |
|
" & +\\textsc{Self-Correct}& \\textbf{73.53}\\\\\n", |
|
" & +$\\textsc{Self-Correct}_*$ & \\textbf{78.24}\\\\\n", |
|
" \\bottomrule\n", |
|
" \\end{tabular}\n", |
|
"\\end{minipage}\n", |
|
"\\hfill\n", |
|
"\\begin{minipage}[t]{0.54\\linewidth}\n", |
|
"\\centering \\footnotesize\n", |
|
" \\begin{tabular}{clcc}\n", |
|
" \\toprule\n", |
|
" \\textbf{Dataset} & \\textbf{Model} & \\textbf{Params} & \\textbf{Correct}\\\\\n", |
|
" \\midrule\n", |
|
" \\textbf{GSM} \n", |
|
" & \\textit{OpenAI 3B}~[\\citenum{cobbe2021gsm8k}] & 3B& 15.50\\\\\n", |
|
" & \\textit{OpenAI 6B}~[\\citenum{cobbe2021gsm8k}] & 6B\\citenum{}& 20.00\\\\\n", |
|
" & GPT-NEO~[\\citenum{ni2022learning}] & 2.7B\\citenum{}& 18.80\\\\\n", |
|
" & NEO FCP+PCP~[\\citenum{ni2022learning}] & 2.7B\\citenum{}& 19.50\\\\\n", |
|
" \n", |
|
" \\cmidrule(lr){2-4}\n", |
|
" & GPT-NEO & 1.3B & 8.57\\\\\n", |
|
" & +\\textsc{Self-Correct} & 1.3B & \\textbf{21.26}\\\\\n", |
|
" & +$\\textsc{Self-Correct}_*$ & 1.3B & \\textbf{24.22}\\\\\n", |
|
" \n", |
|
" \\bottomrule\n", |
|
" \\end{tabular}\n", |
|
" \n", |
|
"\\end{minipage}\n", |
|
"\\caption{Evaluation results of mathematical program synthesis experiments. GPT-NEO (1.3B) is the initial generator for \\textsc{Self-Correct}. \n", |
|
" $\\textsc{Self-Correct}_*$ means only applying the corrector to incorrect outputs.\n", |
|
" \\textit{Italicized}: original non-program version of GSM. \n", |
|
" }\n", |
|
" \\label{tab:math_results}\n", |
|
"\\end{table}\n", |
|
"\n", |
|
"\\setlength{\\columnsep}{0.2cm}\n", |
|
"\\begin{figure}[t]\n", |
|
"\\begin{multicols}{2}\n", |
|
"\n", |
|
"\\vspace{2mm}\n", |
|
"\n", |
|
"\\begin{tcolorbox}[colback=qualcolor!5!white,colframe=qualcolor!75!black]\n", |
|
"\\begin{small}\n", |
|
"\\textbf{Problem:}\\\\\n", |
|
"It takes Jennifer 20 minutes to groom each of her 2 long hair dachschunds. If she grooms her dogs every day, how many hours does she spend grooming her dogs in 30 days?\n", |
|
"\\vspace{1.1em}\n", |
|
"\\Sepline\n", |
|
"\\vspace{-1.2em}\n", |
|
"\\begin{multicols}{2}\n", |
|
"\\textbf{Generator:}\n", |
|
"\\vspace{-0.3em}\n", |
|
"\\begin{python}\n", |
|
"a=20*2\n", |
|
"b=a*30\n", |
|
"answer=b\n", |
|
"print(answer)\n", |
|
"\\end{python}\n", |
|
"\\vspace{1.1em}\n", |
|
"\\columnbreak\n", |
|
"\n", |
|
"\\textbf{Corrector:}\n", |
|
"\\vspace{-0.3em}\n", |
|
"\\begin{python}\n", |
|
"a=20*2\n", |
|
"b=a*30\n", |
|
"c=b/60 #fix\n", |
|
"answer=c\n", |
|
"print(answer)\n", |
|
"\\end{python}\n", |
|
"\\end{multicols}\n", |
|
"\\end{small}\n", |
|
"\\vspace{-2em}\n", |
|
"\\end{tcolorbox}\n", |
|
"\n", |
|
"\n", |
|
"\\begin{tcolorbox}[colback=qualcolor!5!white,colframe=qualcolor!75!black]\n", |
|
"\\begin{small}\n", |
|
"\\textbf{Problem:}\\\\\n", |
|
"Mrs. Wilsborough saved \\$500 to buy concert tickets for her family. She bought 2 VIP tickets at \\$100 each and 3 regular tickets at \\$50 each. How much of her savings does Mrs. Wilsborough have after she buys the tickets?\n", |
|
"\\Sepline\n", |
|
"\\vspace{-1.2em}\n", |
|
"\\begin{multicols}{2}\n", |
|
"\\textbf{Generator:}\n", |
|
"\\vspace{-0.3em}\n", |
|
"\\begin{python}\n", |
|
"a=2*100\n", |
|
"b=3*50\n", |
|
"c=a+b\n", |
|
"answer=c\n", |
|
"print(answer)\n", |
|
"\\end{python}\n", |
|
"\\columnbreak\n", |
|
"\n", |
|
"\\textbf{Corrector:}\n", |
|
"\\vspace{-0.3em}\n", |
|
"\\begin{python}\n", |
|
"a=2*100\n", |
|
"b=3*50\n", |
|
"c=500-a-b #fix\n", |
|
"answer=c\n", |
|
"print(answer)\n", |
|
"\\end{python}\n", |
|
"\\end{multicols}\n", |
|
"\\end{small}\n", |
|
"\\vspace{-2em}\n", |
|
"\\end{tcolorbox}\n", |
|
"\\end{multicols}\n", |
|
"\\vspace{-1.7em}\n", |
|
"\\caption{\\textbf{Grade-school-math (GSM) self-corrections.} On the left, the corrector fixes the units (from minutes to hours) in the generator's solution. On the right, the corrector revises the logic so that the program computes the total savings instead of the spent on tickets. We add \\textit{\\#fix} here to indicate the change.\n", |
|
"See \\autoref{fig:math-examples2} and \\autoref{fig:math-examples3} for additional examples.\\small}\n", |
|
"\\label{fig:examples}\n", |
|
"\\end{figure}\n", |
|
"\n", |
|
"\\myparagraph{Datasets.} We evaluate on problems from 5 problem solving datasets: MultiArith~\\citep{roy2015multiarith}, AddSub~\\citep{hosseini2014addsub}, SingleOp~\\citep{roy2015multiarith}, SVAMP~\\citep{patel2021svamp}, and GSM8k~\\citep{cobbe2021gsm8k}.\n", |
|
"As in prior work \\citep{austin2021ProgramSW,ni2022learning,mishra2022lila}, we frame these as program synthesis by converting their solutions to Python programs.\\footnote{{We use data from the Lila benchmark ({\\scriptsize \\url{https://github.com/allenai/Lila}).}}}\n", |
|
"We separate our experiments into three increasingly difficult settings: \n", |
|
"\\begin{enumerate}[leftmargin=*,topsep=0pt,itemsep=-1ex,partopsep=1ex,parsep=1ex]\n", |
|
" \\item \\textbf{MultiArith}, using problems from the MultiArith arithmetic word problem dataset.\n", |
|
" \\item \\textbf{Multitask}, using problems from 4 arithmetic datasets (MultiArith, AddSub, SingleOp, SVAMP).\n", |
|
" \\item \\textbf{GSM}, using problems from the challenging GSM8k dataset.\n", |
|
"\\end{enumerate} \n", |
|
"For the MultiArith and Multitask settings, we make train/valid/test splits using 60/20/20\\\n", |
|
"Similar to \\citet{ni2022learning}, for the GSM setting we use the official GSM8k test split, and create a validation split using 20\\\n", |
|
"Note that the problems and answers in all datasets are the same as those from the original non-program datasets.\n", |
|
"\n", |
|
"\\myparagraph{Baselines.} We compare \\textsc{self-correct} with its baseline generator (GPT-Neo 1.3B) in all three settings. For the GSM setting, we compare with existing work that uses models within the same magnitude of scale, including NEO FCP+PCP~\\citep{ni2022learning}, which tunes GPT-NEO 2.7B with additional self-sampled programs, and their fine-tuned GPT-NEO 2.7B baseline.\n", |
|
"We also report 3B and 6B fine-tuned GPT3-like language models from \\citet{cobbe2021gsm8k}, which were trained on the non-program version of GSM8k.\n", |
|
"We evaluate larger models later in (\\S\\ref{sec:modularity}).\n", |
|
"\n", |
|
"\\myparagraph{Results.} \n", |
|
"As seen in \\autoref{tab:math_results},\n", |
|
"the self-corrector improves upon the generator in all three settings, using either inference strategy: always correcting (\\textsc{self-correct}), or only correcting incorrect solutions (\\textsc{self-correct}$_*$).\n", |
|
"The self-corrector's performance on Multiarith is very high after correction (98-99\\\n", |
|
"On the challenging GSM dataset, the self-corrector achieves 21\\\n", |
|
"Notably, this is higher than previous work based on the larger 2.7B GPT-Neo, \n", |
|
"or larger models tuned on the language version of GSM.\n", |
|
"The results show that self-corrective learning can improve task performance via training a corrector.\n", |
|
"Qualitatively, the self-corrector can correct values in a correctly structured solution, fix the order of operations within a multistep solution, adjust unit conversions, and make larger multipart revisions (see Figures~\\ref{fig:examples},\\ref{fig:math-examples2},\\ref{fig:math-examples3}). Notably, these are learned automatically through self-corrective learning.\n", |
|
"\n", |
|
"\n", |
|
"\\subsection{Lexically Constrained Generation}\n", |
|
"\\label{ssec:constrained}\n", |
|
"Next, we consider lexically constrained generation. Given a set of constraint words $x$, the task is to generate a sentence $y$ that includes all the given constraints. \n", |
|
"Faithful constraint satisfaction is crucial for many downstream tasks, e.g., those that require converting information to text~\\citep{mckeown_1985}.\n", |
|
"\n", |
|
"\\myparagraph{Datasets and Metrics.} We experiment on \\textsc{CommonGen}~\\citep{lin-etal-2020-commongen} and E2E~\\citep{novikova-etal-2017-e2e}. \\textsc{CommonGen} is a benchmark for generative commonsense reasoning where the task is to generate a coherent sentence given a set of words (e.g., dog, catch). \n", |
|
"E2E involves converting structured inputs into natural language.\n", |
|
"For both tasks, we report standard metrics including \n", |
|
"human/automatic measures of fluency (BLEU, CIDER, etc.) as well as constraint coverage. We collect human measures of fluency on Amazon Mechanical Turk; see the Appendix for details.\n", |
|
"\n", |
|
"\\myparagraph{Setup.} We parameterize the base generator with GPT-2 \\cite{radford2019language} (large-size for \\textsc{CommonGen} and medium-size for E2E). \n", |
|
"We fine-tuned the generator for each task. \n", |
|
"As the value function for self-corrective learning we use coverage, i.e. the percentage of constraints that are present in the output.\n", |
|
"For inference, we use beam search with the generator, then do up to 3 corrections using beam search, stopping early if all constraints are met.\n", |
|
"See the Appendix for additional details.\n", |
|
"\n", |
|
"\n", |
|
"\\begin{table}[t]\n", |
|
"\\begin{minipage}[t]{0.54\\linewidth}\n", |
|
"\\centering\\footnotesize\n", |
|
"\\setlength{\\tabcolsep}{4pt}\n", |
|
"\n", |
|
" \\begin{tabular}{lccc}\n", |
|
" \\toprule\n", |
|
" \\textbf{Method} & \\textbf{Runtime} & \\textbf{CIDER} & \\textbf{Constraints} \\\\\n", |
|
" \\midrule\n", |
|
" NeuroLogic~[\\citenum{lu-etal-2021-neurologic}] & 2.04s & 14.70 & 97.70\\\\\n", |
|
" NeuroLogic-A*~[\\citenum{lu2022neurologicastar}]& 19.24s & 15.20 & 97.80 \\\\\n", |
|
" \\midrule\n", |
|
" GPT-2 & 0.20s & 14.97 & 91.38 \\\\\n", |
|
" \\method & 0.80s& 15.30 & 94.58 \\\\\n", |
|
" \\ \\ +NeuroLogic & 2.24s& 15.28 & \\textbf{97.80} \\\\\n", |
|
" \\bottomrule\n", |
|
" \\end{tabular}\n", |
|
"\\end{minipage}\n", |
|
"\\hfill\n", |
|
"\\begin{minipage}[t]{0.45\\linewidth}\n", |
|
"\\centering \\footnotesize\n", |
|
"\\renewcommand\\arraystretch{1.2}\n", |
|
"\\scalebox{.86}{\n", |
|
" \\begin{tabular}{lcc}\n", |
|
" \\toprule\n", |
|
" \\textbf{Method} & \\textbf{Fluency} & \\textbf{Constraints} \\\\\n", |
|
" \\midrule\n", |
|
" \n", |
|
" Prefix-Tuning~[\\citenum{li-liang-2021-prefix}] & 2.96 & 91.16 \\\\\n", |
|
" NeuroLogic~[\\citenum{lu-etal-2021-neurologic}] & 2.80 & 96.91\\\\\n", |
|
" NeuroLogic-A*~[\\citenum{lu2022neurologicastar}] & 2.85 & 96.97\\\\\n", |
|
" \\midrule\n", |
|
" GPT-2 & 2.94 & 91.50 \\\\\n", |
|
" \\method& \\textbf{2.98} & \\textbf{98.77}\\\\\n", |
|
" \n", |
|
" \n", |
|
" \n", |
|
" \n", |
|
" \n", |
|
" \n", |
|
" \\bottomrule\n", |
|
" \\end{tabular}}\n", |
|
" \n", |
|
"\\end{minipage}\n", |
|
"\\caption{\\textbf{Lexically-constrained generation.} By training a corrector to optimize constraint satisfaction, \\method improves constraints while maintaining fluency, without modifying the underlying generator. Due to space, we show CIDER for \\textsc{CommonGen} and human judgement for E2E as measures of fluency. Other metrics show similar trends and can be found in the Appendix.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"}\n", |
|
" \\label{tab:lexical_results}\n", |
|
"\\end{table}\n", |
|
"\n", |
|
"\n", |
|
"\\myparagraph{Results.} Table~\\ref{tab:lexical_results} shows the evaluation results. \n", |
|
"The self-corrector substantially improves constraint coverage over its GPT-2 generator for both tasks, while maintaining or improving its language quality.\n", |
|
"On the \\textsc{CommonGen} benchmark, the self-corrector paired with the NeuroLogic constrained decoding algorithm ~\\citep{lu-etal-2021-neurologic} achieves the best results, outperforming the more sophisticated NeuroLogic-A* decoding algorithm, while being an order of magnitude faster. \n", |
|
"Notably, on E2E, self-correction \\textit{outperforms} Neurologic-A* decoding, despite only using standard beam search. \n", |
|
"This suggests that a corrector can be viewed as an alternative to using a more sophisticated decoding procedure (A*) for improving performance without modifying the underlying model. See \\autoref{fig:examples-cg-e2e} for qualitative examples.\n", |
|
"\n", |
|
"\\subsection{Toxicity Reduction}\n", |
|
"\\label{ssec:toxicity}\n", |
|
"Next, we consider the task of toxicity reduction~\\citep{gehman-etal-2020-realtoxicityprompts,liu-etal-2021-dexperts}.\n", |
|
"Given a prompt $x$, the task is to generate a fluent continuation $y$ while avoiding offensive content. \n", |
|
"This task is important for ensuring safe language model deployment, yet challenging: due to misaligned pretraining objectives (i.e. modeling internet text vs. non-toxic text), \n", |
|
"language models are susceptible to generating toxic completions, even when prompted with seemingly innocuous text~\\citep{gehman-etal-2020-realtoxicityprompts}.\n", |
|
"Along with its practical importance, the task tests whether (self-)correctors can be an effective mechanism for controlling the outputs of language models in an open-ended setting. \n", |
|
"\n", |
|
"\\myparagraph{Datasets and Metrics.} We use the \\textsc{RealToxicityPrompts} benchmark~\\citep{gehman-etal-2020-realtoxicityprompts} which contains 100k prompts designed to elicit toxic generations. Following the experimental setup of~\\citet{liu-etal-2021-dexperts}, during training we use 85K prompts from the training set, and for evaluation we use the same 10K non-toxic prompts from test set as \\citet{liu-etal-2021-dexperts}.\n", |
|
"We use Perspective API to measure \\textit{maximum toxicity}, defined as the average maximum toxicity over 25 sampled generations, and the (empirical) \\textit{toxicity probability} of at least 1 out of 25 generations being toxic.\n", |
|
"\n", |
|
"\\myparagraph{Baselines.}\n", |
|
"We compare \\method with its generator (GPT-2) and previously reported baselines from~\\citet{quark22}, including PPLM~\\citep{Dathathri2020PlugAP}, GeDi~\\citep{krause-etal-2021-gedi-generative}, DExpert~\\citep{liu-etal-2020-unsupervised}, DAPT~\\citep{gururangan-etal-2020-dont}, PPO~\\citep{quark22}, and Quark~\\citep{quark22}.\n", |
|
"The latter two -- Proximal Policy Optimization (PPO) and Quantized Reward Konditioning (Quark) -- represent strong, state-of-the art approaches based on reinforcement learning.\n", |
|
"\n", |
|
"\\myparagraph{Setup.}\n", |
|
"We use the off-the-shelf GPT-2 Large as the generator, and finetune another GPT-2 Large as the corrector. \n", |
|
"During inference, we use nucleus sampling with $p=0.9$ to generate 25 samples for all baselines.\n", |
|
"As the value function, we use the Perspective API score, $v(y) \\in [0,1]$, which measures the toxicity of the completed sequence.\n", |
|
"We do up to three corrections with the corrector model.\n", |
|
"\n", |
|
"\\myparagraph{Results.}\n", |
|
"\\begin{table}[t!]\n", |
|
"\n", |
|
"\\begin{minipage}{0.63\\linewidth}\n", |
|
"\\setlength{\\tabcolsep}{4pt}\n", |
|
" \\centering\\footnotesize\n", |
|
" \\begin{tabular}{lcccccccc}\n", |
|
" \\toprule\n", |
|
" & \\multicolumn{2}{c}{\\textbf{Toxicity}}& \\textbf{Fluency}& \\multicolumn{2}{c}{\\textbf{Diversity}}\\\\\n", |
|
" \\cmidrule(lr){2-3}\\cmidrule(lr){4-4}\\cmidrule(lr){5-6}\n", |
|
" & \\textbf{Avg.~Max}. & \\textbf{Prob.} & \\textbf{Perplexity} & \\textbf{dist-2} & \\textbf{dist-3} \\\\\n", |
|
" \\midrule\n", |
|
" GPT-2 & 0.527 & 0.520 & 11.31 & 0.85 & 0.85 \\\\\n", |
|
" \\midrule\n", |
|
" PPLM~[\\citenum{Dathathri2020PlugAP}] & 0.520 & 0.518 & 32.58 & 0.86 & 0.86 \\\\\n", |
|
" GeDi~[\\citenum{krause-etal-2021-gedi-generative}] & 0.363 & 0.217 & 43.44 & 0.84 & 0.83 \\\\\n", |
|
" DExpert~[\\citenum{liu-etal-2020-unsupervised}] & 0.314 & 0.128 & 25.21 & 0.84 & 0.84 \\\\\n", |
|
" DAPT~[\\citenum{gururangan-etal-2020-dont}] & 0.428 & 0.360 & 31.22 & 0.84 & 0.84 \\\\\n", |
|
" PPO~[\\citenum{quark22}] & 0.218 & 0.044 & 14.27 & 0.79 & 0.82 \\\\\n", |
|
" Quark~[\\citenum{quark22}] & 0.196 & 0.035 & 12.47 & 0.80 & 0.84 \\\\\n", |
|
" \\midrule\n", |
|
" \\textsc{Self-Correct} & \\textbf{0.171} & \\textbf{0.026} & \\textbf{11.81} & 0.80 & 0.83\\\\\n", |
|
" \\bottomrule\n", |
|
" \\end{tabular}\n", |
|
" \\caption{\\textbf{Toxicity reduction.} GPT-2 is the base generator.}\n", |
|
" \n", |
|
" \\label{tab:toxicity_results}\n", |
|
" \\end{minipage}\n", |
|
" \\hfill\n", |
|
" \\begin{minipage}{0.34\\linewidth}\n", |
|
"\\centering \n", |
|
"\\includegraphics[width=1.0\\linewidth]{images/feedback_iterations_toxicity.png}\n", |
|
"\\vspace{-1.5em}\n", |
|
"\\captionof{figure}{Applying multiple corrections reduces toxicity.}\n", |
|
"\\label{fig:toxicity}\n", |
|
"\n", |
|
"\\end{minipage}\\hfill\n", |
|
"\\end{table}\n", |
|
"\\autoref{tab:toxicity_results} shows that \\method reduces the rate of toxic generations substantially, while also maintaining fluency and diversity.\n", |
|
"\\method outperforms all baselines.\n", |
|
"This includes inference-time algorithms (PPLM, GeDi, DExpert), which do not modify the generator but degrade fluency and yield higher toxicity compared to \\method, as well as reinforcement learning methods (PPO, Quark) that adjust the generator using toxicity as a (negative) reward.\n", |
|
"The results show that \\method is effective for detoxification, without having to modify the underlying generator. \n", |
|
"We study implications of this latter property further in the next section.\n", |
|
"\n", |
|
"\n", |
|
"\\subsection{Changing Modules -- Correcting GPT-3}\n", |
|
"\\label{sec:modularity}\n", |
|
"Next, we show that a self-corrector can improve the outputs of a generator that is much larger than the corrector.\n", |
|
"We consider two cases: (1) training with a small generator, then swapping in the larger generator at test time; (2) training with the larger generator, i.e. using the large generator to initialize the datapool for self-corrective learning, then using the large generator at test time.\n", |
|
"\n", |
|
"\\myparagraph{Toxicity.} We evaluate case (1) for reducing the toxicity of a large generator (GPT-2 XL, GPT-3).\n", |
|
"We generate an initial sequence using the large generator, then refine it with our corrector trained in the previous experiments (\\S\\ref{ssec:toxicity}). \\autoref{tab:modularity_combined} shows that\n", |
|
"the resulting self-corrector (large generator + corrector) has substantially reduced toxicity compared to the large generator.\n", |
|
"This shows the promise of using (self-)correctors for controlling the outputs of large language models.\n", |
|
"\n", |
|
"\\myparagraph{Math program synthesis.} \\autoref{tab:modularity_combined} shows results for math.\n", |
|
"Analogous to toxicity, \n", |
|
"the corrector \n", |
|
"is able to correct larger generators swapped in at test-time. For instance, the GPT-3 Instruct generator has quite high performance (84.90 Multitask, 36.80 GSM), which improves to 90.90 and 45.00, respectively, by adding in a corrector.\n", |
|
"The self-corrector (large generator + corrector) improves further by training with the GPT-3 Instruct generator, \n", |
|
" to 92.75 and 45.92, respectively.\n", |
|
"\n", |
|
"\\begin{table*}[t!]\n", |
|
" \\centering\\footnotesize\n", |
|
" \\scalebox{.98}{\n", |
|
" \\begin{tabular}{llllcccc}\n", |
|
" \\toprule\n", |
|
" \\textbf{Task} & \\textbf{Dataset} & \\textbf{Generator (train)} & \\textbf{Generator (test)} & \\textbf{Generator} & \\textbf{Self-corrector}\\\\\n", |
|
" \\midrule\n", |
|
" \\multirow{6}{*}{Math Synthesis $\\uparrow$} & \n", |
|
" & Neo 1.3B & GPT-3 & 46.70 & 80.00\\\\\n", |
|
" & Multitask & Neo 1.3B & GPT-3 Instruct & 84.90 & 90.90\\\\\n", |
|
" & & GPT-3 Instruct & GPT-3 Instruct & 84.90 & 92.75\\\\\n", |
|
" \\cmidrule{2-6}\n", |
|
" & \n", |
|
" & Neo 1.3B & GPT-3 & 6.96 & 24.30 \\\\\n", |
|
" & GSM & Neo 1.3B & GPT-3 Instruct & 36.80 & 45.00\\\\\n", |
|
" & & GPT-3 Instruct & GPT-3 Instruct & 36.80 & 45.92\\\\\n", |
|
" \\midrule\n", |
|
" \\multirow{3}{*}{Detoxification \\hspace{1.3mm}$\\downarrow$} & \n", |
|
" & GPT2-L & GPT2-XL & 0.383 & 0.027\\\\\n", |
|
" & RTPrompts & GPT2-L & GPT-3 & 0.182 & 0.025 \\\\\n", |
|
" & & GPT2-L & GPT-3 Instruct & 0.275 & 0.023 \\\\\n", |
|
" \\bottomrule\n", |
|
" \\end{tabular}}\n", |
|
" \\caption{\\textbf{Modularity (program synthesis and detoxification).} Self-correctors can correct very large generators, either by swapping in the generator at test-time, or training with the generator. For math synthesis, the corrector is GPT-Neo 1.3B, and here we only correct incorrect outputs. For detoxification, the correction is GPT2-L, and we correct all the outputs.\n", |
|
" \n", |
|
" }\n", |
|
" \\label{tab:modularity_combined}\n", |
|
"\\end{table*}\n", |
|
"\n", |
|
"\\subsection{Leveraging Explicit Feedback}\n", |
|
"\\label{sec:feedback}\n", |
|
"\n", |
|
"Next, we demonstrate \\method's capacity to incorporate explicit natural language feedback.\n", |
|
"This amounts to defining a feedback function $f$, then using the same self-corrective learning and inference algorithms (\\S\\ref{ssec:learning}) as in our preceding experiments (in those experiments, $f$ returned $\\emptyset$).\n", |
|
"We show that correctors learn to use the feedback, as evidenced by higher performance.\n", |
|
"\n", |
|
"\\myparagraph{Toxicity.} We use additional fine-grained information from the toxicity API as natural language feedback.\n", |
|
" Specifically, besides the overall toxicity score, Perspective API also provides scores for fine-grained attributes of toxicity (e.g. identity attack, profanity, flirtation, etc.). \n", |
|
" At training time, we compare the attribute scores from a hypothesis and its selected correction, and use the attribute with the largest decrease as natural language feedback (e.g. \"decrease toxicity in \\textit{profanity}\").\n", |
|
" At inference time, \n", |
|
" we call the API on the current hypothesis, and use the attribute with the highest score. \n", |
|
" Here we use the API at inference time, which is \\textit{not} required in our previous experiments. \n", |
|
"\n", |
|
"\n", |
|
"\\myparagraph{Lexical constraints.}\n", |
|
"\n", |
|
"In training time, we generate natural language feedback for every example pair $(x, y, y')$ by elaborating the extra lexical constraints satisfied by $y'$ but not $y$. e.g. \n", |
|
"\\textit{``adding constraint word: read''}.\n", |
|
"\n", |
|
"At inference time, we elaborate all missing constraints in the current hypothesis.\n", |
|
"\n", |
|
"\\myparagraph{Math program synthesis.}\n", |
|
"Math program synthesis contains a variety of problem types and errors, without an automated means for identifying the errors (e.g. an API). \n", |
|
"We explore obtaining natural language feedback about the current program by \n", |
|
"\n", |
|
" prompting a large language model.\n", |
|
" We prompt the model with a problem, hypothesis program, a gold solution, and few-shot demonstrations\n", |
|
"\n", |
|
"that show feedback on one part of the program; e.g.\n", |
|
"\\textit{In the initial guess, 3 should be subtracted.}\n", |
|
"When the program is correct, the feedback is \\textit{Correct.}\n", |
|
"At inference time, we also use feedback from the language model.\n", |
|
" We allow the feedback model access to a gold solution, which we expect makes the feedback higher quality, with the risk of\n", |
|
" solution leakage at inference-time.\n", |
|
" Our results in this task are thus used only to study the feasibility of explicit feedback for math program synthesis.\n", |
|
" \n", |
|
"\n", |
|
"\\begin{table*}[t!]\n", |
|
" \\centering\\footnotesize\n", |
|
" \\begin{tabular}{lcccccccc}\n", |
|
" \\toprule\n", |
|
" & \\multicolumn{3}{c}{\\textbf{Toxicity $\\downarrow$}} & \\multicolumn{2}{c}{\\textbf{Constrained Gen. $\\uparrow$}} & \\multicolumn{2}{c}{\\textbf{Math $\\uparrow$}}\\\\\n", |
|
" \\cmidrule(lr){2-4}\\cmidrule(lr){5-6}\\cmidrule(lr){7-8}\n", |
|
" & \\textbf{Avg.~Max}. & \\textbf{Prob.} & \\textbf{Fluency} & \\textbf{Fluency} & \\textbf{Constraints} & \\textbf{Correct} & \\textbf{Correct$_*$}\\\\\n", |
|
" \\midrule\n", |
|
" Generator & 0.527 & 0.520 & 11.31 & 14.97 & 91.38 & 49.02 & 49.02\\\\\n", |
|
" \\method & 0.171 & 0.026 & 11.81 & 15.30 & 94.58 & 74.31 & 79.80\\\\\n", |
|
" \\ \\ \\textsc{+ feedback} &\\textbf{0.156} & \\textbf{0.020} & 11.86 & 15.24 & \\textbf{95.88} & \\textbf{81.76} & \\textbf{82.35}\\\\\n", |
|
" \\bottomrule\n", |
|
" \\end{tabular}\n", |
|
" \\caption{\\textbf{Explicit natural language feedback.} Correct$_*$ means only correcting incorrect outputs.\n", |
|
" }\n", |
|
" \\label{tab:feedback-all}\n", |
|
"\\end{table*}\n", |
|
"\n", |
|
"\\setlength{\\columnsep}{0.2cm}\n", |
|
"\\begin{figure*}[t]\n", |
|
"\\setlength{\\columnsep}{0.2cm}\n", |
|
"\\begin{multicols}{2}\n", |
|
"\\begin{tcolorbox}[colback=qualcolor!5!white,colframe=qualcolor!75!black]\n", |
|
"\\begin{small}\n", |
|
"\\textbf{Problem:}\\\\\n", |
|
"Melanie had 19 dimes in her bank. Her dad gave her 39 dimes and her mother gave her 25 dimes. How many dimes does Melanie have now? \n", |
|
"\n", |
|
"\\Sepline\n", |
|
"\n", |
|
"\\textbf{Generator (GPT-Neo):}\n", |
|
"\\vspace{-0.3em}\n", |
|
"\\begin{python}\n", |
|
"answer = 19 + 25\n", |
|
"print(answer)\n", |
|
"\\end{python}\n", |
|
"\\textbf{Feedback (GPT-3):}\n", |
|
"\\vspace{-0.3em}\n", |
|
"\\begin{python}\n", |
|
"# In the initial guess, \n", |
|
"# 39 is not included.\n", |
|
"\\end{python}\n", |
|
"\\textbf{Corrector (GPT-Neo):}\n", |
|
"\\vspace{-0.3em}\n", |
|
"\\begin{python}\n", |
|
"answer = 19 + 25 + 39\n", |
|
"print(answer)\n", |
|
"\\end{python}\n", |
|
"\\end{small}\n", |
|
"\n", |
|
"\\end{tcolorbox}\n", |
|
"\n", |
|
"\\begin{tcolorbox}[colback=qualcolor!5!white,colframe=qualcolor!75!black]\n", |
|
"\\begin{small}\n", |
|
"\\textbf{Problem:}\\\\\n", |
|
"Lana\u2019s favorite band was holding a concert where tickets were 6 dollars each. Lana bought 8\n", |
|
"tickets for herself and her friends and 2 extra tickets in case anyone else wanted to go. How much did\n", |
|
"she spend?\n", |
|
"\\Sepline\n", |
|
"\n", |
|
"\\textbf{Generator (GPT-Neo):}\n", |
|
"\\vspace{-0.3em}\n", |
|
"\\begin{python}\n", |
|
"answer=(6.0*8.0)\n", |
|
"print(answer)\n", |
|
"\\end{python}\n", |
|
"\\textbf{Feedback (GPT-3):}\n", |
|
"\\vspace{-0.3em}\n", |
|
"\\begin{python}\n", |
|
"# In the initial guess, \n", |
|
"# 2 tickets are not included.\n", |
|
"\\end{python}\n", |
|
"\\textbf{Corrector (GPT-Neo):}\n", |
|
"\\vspace{-0.5em}\n", |
|
"\\begin{python}\n", |
|
"answer=(6.0*(8.0+2.0))\n", |
|
"print(answer)\n", |
|
"\\end{python}\n", |
|
"\\end{small}\n", |
|
"\\vspace{-0.9em}\n", |
|
"\\end{tcolorbox}\n", |
|
"\\end{multicols}\n", |
|
"\\vspace{-1.7em}\n", |
|
"\\caption{\\textbf{Self-correction with natural language feedback.}\n", |
|
"}\n", |
|
"\\label{fig:math-feedback-examples}\n", |
|
"\\end{figure*}\n", |
|
"\n", |
|
"\\myparagraph{Setup.} For toxicity, lexical constraints, and math we use \\textsc{RealToxicityPrompts}, \\textsc{CommonGen}, and the \\textsc{Multitask} arithmetic setting, respectively. We follow the setup of each task's previous experiments (\\S\\ref{ssec:toxicity},\\S\\ref{ssec:constrained},\\S\\ref{ssec:math}), except for math we use 5 correction iterations (previously 1).\n", |
|
"For math, we use GPT-3 (text-davinci-002) with 6 demonstrations as the feedback model.\n", |
|
"\n", |
|
"\n", |
|
"\\myparagraph{Results.}\n", |
|
"\\autoref{tab:feedback-all} shows that explicit natural language feedback improves performance in all three tasks.\n", |
|
"For toxicity, this means that providing fine-grained attributes (e.g. identity attack, profanity, etc.) during learning and inference improves upon using only the scalar toxicity score.\n", |
|
"Intuitively, feedback may help the model to focus on a useful correction; e.g., see \\autoref{fig:math-feedback-examples}.\n", |
|
"\n", |
|
"\\subsection{Additional Ablations and Analysis}\n", |
|
"\\label{subsec:ablation}\n", |
|
"\n", |
|
"\\myparagraph{Effect of multiple corrections.}\n", |
|
"Previously, Figure~\\ref{fig:toxicity} showed that multiple corrections led to better toxicity reduction.\n", |
|
"On math (Multitask setting), Figure~\\ref{fig:math-feedback} shows that performance improves with more than one correction, and that\n", |
|
"multiple corrections are more beneficial with feedback.\n", |
|
"Intuitively, in this math task, after 2-3 corrections the model needs additional guidance.\n", |
|
"\n", |
|
"\\myparagraph{Effect of pairing and proportional sampling.}\n", |
|
"Self-corrective learning (i) samples pairs for learning proportional to \\autoref{eqn:residual-subsample}, (ii) only pairs sequences that improve value.\n", |
|
"We ablate these features by training on Multitask using a data pool that samples a pair for learning uniformly (rather than \\autoref{eqn:residual-subsample}), and a data pool without value pairing.\n", |
|
"\\autoref{tab:pairing} shows that both improve performance. \n", |
|
"\n", |
|
"\\myparagraph{Effect of exploration.}\n", |
|
"To ablate the effect of exploration, we train a baseline only on correction pairs induced from the base generator. \n", |
|
"\\autoref{tab:multiple-corrections}\n", |
|
" shows results on the three math datasets, indicating that exploration improves performance. \n", |
|
"\n", |
|
"\\begin{table}[t!]\n", |
|
"\\begin{minipage}{0.44\\linewidth}\n", |
|
"\\centering \n", |
|
"\\includegraphics[width=0.9\\linewidth]{images/feedback_iterations.png}\n", |
|
"\\vspace{-1em}\n", |
|
"\\captionof{figure}{Math: multiple corrections.}\n", |
|
"\\label{fig:math-feedback}\n", |
|
"\n", |
|
"\\end{minipage}\\hfill\n", |
|
"\\begin{minipage}{0.54\\linewidth}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\centering\\footnotesize\n", |
|
" \\scalebox{.98}{\n", |
|
" \\begin{tabular}{lccc}\n", |
|
" \\toprule\n", |
|
" \\textbf{Ablation} & \\multicolumn{1}{c}{\\textbf{Math}}& \\textbf{\\textsc{CommonGen}} \\\\\n", |
|
" \\midrule\n", |
|
" \\method & \\textbf{78.24} & \\textbf{94.55}\\\\\n", |
|
" \\ \\ \\xmark\\ proportional sampling & 77.25 & 93.49\\\\\n", |
|
" \\ \\ \\xmark\\ value pairing & 62.35 & 91.76 \\\\\n", |
|
" \\bottomrule\n", |
|
" \\end{tabular}}\n", |
|
" \\vspace{-0.5em}\n", |
|
" \\caption{Effect of pairing and proportional sampling.}\n", |
|
" \\label{tab:pairing}\n", |
|
"\\begin{tabular}{ccccccccc}\n", |
|
" \\toprule\n", |
|
" \\textbf{Exploration} & \\textbf{Multiarith} & \\textbf{Multitask} & \\textbf{GSM8k}\\\\\n", |
|
" \\midrule\n", |
|
" \\xmark & 89.20 & 73.49 & 17.60 \\\\\n", |
|
" \\cmark & \\textbf{99.17} & \\textbf{78.24} & \\textbf{23.96}\\\\\n", |
|
" \\bottomrule\n", |
|
" \\end{tabular}\n", |
|
" \\vspace{-0.5em}\n", |
|
" \\caption{Effect of exploration on program synthesis.}\n", |
|
" \\label{tab:multiple-corrections}\n", |
|
"\\end{minipage}\n", |
|
"\\end{table}\n", |
|
"\n", |
|
"\\section{Related Work}\n", |
|
"\\vspace{-0.5em}\n", |
|
"Self-correction relates to recent works on editing text, including modeling Wikipedia edits \\citep{reid2022learning,faltings-etal-2021-text,schick2022peer}, which relies on supervised edits, unsupervised methods \\citep{Miao_Zhou_Mou_Yan_Li_2019,liu-etal-2020-unsupervised} that perturb sequences with simple operations (e.g. insertion, deletion),\n", |
|
" editing with models trained on human-written critiques~\\citep{saunders2022self}, or iteratively updating continuous variables~\\citep{lee-etal-2020-iterative,Li2022DiffusionLMIC,qin2022cold}.\n", |
|
"In contrast to these, self-correction learns an expressive text-to-text corrector that is trained online to improve a quality measure, without requiring a supervised dataset of edits or critiques.\n", |
|
"Separately, denoising ground-truth sequences is a common pretraining objective~\\citep{devlin-etal-2019-bert,lewis-etal-2020-bart,RaffelT5}, while self-correction `denoises' generations to improve a scalar quality measure.\n", |
|
"Scalar measures are often improved with reinforcement learning (RL) on a base generator~\\citep{ziegler2019finetuning,stiennon2020,quark22}, which is infeasible for improving many language models (e.g. those accessed through an API), and uses only scalar feedback. \n", |
|
"Moreover, self-correction learns the difference between a generation and solution, and is complementary to RL-tuned generators, which can be used within a self-corrector.\n", |
|
"Finally, self-correction decomposes generation into multiple steps, which relates to methods \n", |
|
"that generate rationales before a response~\\citep{Wei2022ChainOT,Dohan2022LanguageMC}.\n", |
|
"Self-correction also produces intermediate steps, but each step is of the same form as the output, allowing for re-using previous generations.\n", |
|
"\n", |
|
"\\section{Conclusion}\n", |
|
"\\vspace{-0.5em}\n", |
|
"We introduced self-correctors, a class of models that decompose generation into initial generation and correction steps.\n", |
|
"We study self-correctors with a fixed base generator along with a corrector trained to improve outputs according to a scalar measure of quality.\n", |
|
"We presented a simple, general procedure for training the corrector, and find that self-correction is applicable and effective for improving performance, and controlling the outputs of both small and large generators.\n", |
|
"Moreover, we found that self-correction along with our learning framework provides a promising mechanism for using natural language feedback to improve generation.\n", |
|
"These findings, along with exploring alternative self-correctors, open up many avenues that we leave for future work.\n", |
|
"\n", |
|
"\\section*{Acknowledgments}\n", |
|
"This work was funded in part by the DARPA MCS program through NIWC Pacific (N66001-19-2-4031), and the Allen Institute for AI.\n", |
|
"\n", |
|
"\\bibliography{iclr2023_conference}\n", |
|
"\\bibliographystyle{iclr2023_conference}\n", |
|
"\n", |
|
"\\clearpage\n", |
|
"\\newpage\n", |
|
"\\appendix\n", |
|
"\n", |
|
"\\begin{center}\n", |
|
"{\\LARGE \\textsc{\n", |
|
"Appendix\n", |
|
"}}\n", |
|
"\\vspace{40pt}\n", |
|
"\\end{center}\n", |
|
"\n", |
|
"\\section{Additional Experimental Details}\n", |
|
"\\subsection{Mathematical Program Synthesis}\n", |
|
"We fine-tune a separate instance of GPT-Neo 1.3B as an initial generator, using the Huggingface library with default hyperparameters, except for evaluation steps, which we set to a small number to ensure a strong checkpoint is selected for each dataset.\n", |
|
"We use the fine-tuned initial generator as initialization for the corrector,\n", |
|
"and tune the corrector on sequences\n", |
|
"${\\small\n", |
|
"\\texttt{[SC]x[CURR]yi[START]yj[END]},\n", |
|
"}$\n", |
|
"where $x$ is a problem, $y_i$ and $y_j$ form a residual pair, and $[\\cdot]$ are special tokens.\n", |
|
"The loss is on tokens after $\\texttt{[START]}$.\n", |
|
"\n", |
|
"\\paragraph{Feedback.}\n", |
|
"We write 6 demonstrations using training problems and generations from our GPT-Neo base generator, and use GPT-3 (text-davinci-002) as a feedback model.\n", |
|
"\n", |
|
"We use the same training procedure and hyperparameters, except that the sequences now include feedback,\n", |
|
"${\\small\n", |
|
"\\texttt{[SC]x[CURR]yi[FEEDBACK]F(x,yi)[START]yj[END]},\n", |
|
"}$\n", |
|
"where $x$ is a problem, $y_i$ and $y_j$ form a residual pair, and $F(x,y_i)$ is feedback.\n", |
|
"We include loss on tokens after $\\texttt{[FEEDBACK]}$.\n", |
|
"\n", |
|
"\\subsection{Lexically-constrained Generation}\n", |
|
"\n", |
|
"\\textbf{Hyper-parameters. }\\autoref{tab:hyper-cg} and \\autoref{tab:hyper-e2e} show hyperparameters for CommonGen and E2E.\n", |
|
"\n", |
|
"\\textbf{Human Evaluation. } We evaluate fluency of generations in E2E task using human annotators on Amazon Mechanical Turk (AMT). We randomly sampled 100 instances, along with generations of different baselines and self-corrections. For each instance, we ask 3 annotators to evaluate the fluency of generations on a 3-point Likert scale. We aggregate annotations from 3 annotators using majority vote. We restricted the pool of annotators to those who are located in US or CA, and had 98\\\n", |
|
"\\begin{table}[h]\n", |
|
"\\begin{minipage}{0.48\\linewidth}\n", |
|
"\\centering \n", |
|
" \\centering\\footnotesize\n", |
|
" \\begin{tabular}{lc}\n", |
|
" \\toprule\n", |
|
" \\textbf{Hyperparameter} & \\textbf{Assignment}\\\\\n", |
|
" \\midrule\n", |
|
" Predictor & GPT-2$_{Large}$ \\\\\n", |
|
" \\# steps & 6000\\\\\n", |
|
" batch size & 128\\\\\n", |
|
" optimizer & Adam\\\\\n", |
|
" learning rate & $1.e^-5$ \\\\\n", |
|
" decoding alg. & beam search (k=5) \\\\\n", |
|
" \\bottomrule\n", |
|
" \\end{tabular}\n", |
|
" \\caption{\n", |
|
"Hyperparameters for \\textsc{CommonGen}.\n", |
|
" }\n", |
|
" \\label{tab:hyper-cg}\n", |
|
"\\end{minipage}\\hfill\n", |
|
"\\begin{minipage}{0.5\\linewidth}\n", |
|
"\\centering\\footnotesize\n", |
|
" \\begin{tabular}{lc}\n", |
|
" \\toprule\n", |
|
" \\textbf{Hyperparameter} & \\textbf{Assignment}\\\\\n", |
|
" \\midrule\n", |
|
" Predictor & GPT-2$_{Medium}$ \\\\\n", |
|
" \\# steps & 10000\\\\\n", |
|
" batch size & 100 \\\\\n", |
|
" optimizer & Adam\\\\\n", |
|
" learning rate & $1.e^-5$ \\\\\n", |
|
" decoding alg. & beam search (k=5) \\\\\n", |
|
" \\bottomrule\n", |
|
" \\end{tabular}\n", |
|
" \\caption{Hyperparameters for E2E.\n", |
|
" }\n", |
|
" \\label{tab:hyper-e2e}\n", |
|
"\\end{minipage}\n", |
|
"\\end{table}\n", |
|
"\n", |
|
"\\section{Additional Results}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\begin{table*}[h]\n", |
|
" \\centering\\footnotesize\n", |
|
" \\begin{tabular}{lcccccccc}\n", |
|
" \\toprule\n", |
|
" & \\multicolumn{2}{c}{\\textbf{Toxicity}}& \\textbf{Fluency}& \\multicolumn{2}{c}{\\textbf{Diversity}}\\\\\n", |
|
" \\cmidrule(lr){2-3}\\cmidrule(lr){4-4}\\cmidrule(lr){5-6}\n", |
|
" & \\textbf{Avg.~Max}. & \\textbf{Prob.} & \\textbf{Perplexity} & \\textbf{dist-2} & \\textbf{dist-3} \\\\\n", |
|
" \\midrule\n", |
|
" GPT2-L & 0.527 & 0.520 & 11.31 & 0.85 & 0.85 \\\\\n", |
|
" \\textsc{Self-Correct} & 0.171 & 0.026 & 11.81 & 0.80 & 0.83\\\\\n", |
|
" \\textsc{Self-Correct + feedback} & \\textbf{0.156} & \\textbf{0.020} & 11.86 & 0.80 & 0.83\\\\\n", |
|
" \\bottomrule\n", |
|
" \\end{tabular}\n", |
|
" \\caption{Evaluation results of toxicity reduction experiments with natural language feedback.}\n", |
|
" \\label{tab:toxicity_feedback}\n", |
|
"\\end{table*}\n", |
|
"\n", |
|
"\n", |
|
"\\begin{table*}[h]\n", |
|
" \\centering\\footnotesize\n", |
|
" \\begin{tabular}{lcccccccc}\n", |
|
" \\toprule\n", |
|
" & \\textbf{Bleu-4} & \\textbf{CIDER} & \\textbf{Coverage} & \\textbf{Runtime}\\\\\n", |
|
" \\midrule\n", |
|
" \n", |
|
" NeuroLogic~[\\citenum{lu-etal-2021-neurologic}] & 26.70 & 14.70 & 97.70& 2.04s/sent\\\\\n", |
|
" NeuroLogic-A*esque~[\\citenum{lu2022neurologicastar}] & 28.20 & 15.20 & 97.80 & 19.24s/sent\\\\\n", |
|
" \\midrule\n", |
|
" GPT-2 & 27.90 & 14.97 & 91.38 & 0.2s/sent\\\\\n", |
|
" \\textsc{Self-Correct} & 27.98 & 15.30 & 94.58 & 0.8s/sent\\\\\n", |
|
" \\textsc{Self-Correct} + feedback & 27.82 & 15.24 & 95.88 & 0.8s/sent\\\\\n", |
|
" \\textsc{Self-Correct}+NeuroLogic & 28.17 & 15.28 & \\textbf{97.80} & 2.24s/sent\\\\\n", |
|
" \\bottomrule\n", |
|
" \\end{tabular}\n", |
|
" \\caption{Evaluation rresults of lexically-constrained generation on \\textsc{CommonGen}.\n", |
|
" \n", |
|
" \n", |
|
" }\n", |
|
" \\label{tab:commongen_results}\n", |
|
"\\end{table*}\n", |
|
"\n", |
|
"\\begin{table*}[h]\n", |
|
"\\setlength\\tabcolsep{2.5pt}\n", |
|
"\n", |
|
" \\centering\\footnotesize\n", |
|
" \\begin{tabular}{lcccccccc}\n", |
|
" \\toprule\n", |
|
" & \\textbf{Coverage} & \\textbf{BLEU-4} & \\textbf{NIST} & \\textbf{R-L} & \\textbf{METEOR} & \\textbf{CIDER} \\\\\n", |
|
" \\midrule\n", |
|
" \n", |
|
" \n", |
|
" \n", |
|
" \\textsc{Prefix-Tuning}~\\citep{li-liang-2021-prefix} & 91.16 & 70.30 & 8.82 & 72.10 & 46.30 & 2.46 \\\\\n", |
|
" \\midrule\n", |
|
" GPT-2 & 91.50 & 67.12 & 8.67 & 70.25 & 45.58 & 2.33\\\\\n", |
|
" \n", |
|
" \\textsc{Self-Correct} & \\textbf{98.77} & 68.81 & 8.78 & 68.60 & 45.11 & 2.38 \\\\\n", |
|
" \n", |
|
" \n", |
|
" \n", |
|
" \n", |
|
" \\bottomrule\n", |
|
" \\end{tabular}\n", |
|
" \\caption{Evaluation results of lexically-constrained generation on E2E. \n", |
|
" \n", |
|
" }\n", |
|
" \\label{tab:e2e_results}\n", |
|
"\\end{table*}\n", |
|
"\n", |
|
"\\section{Qualitative Examples}\n", |
|
"\n", |
|
"\\setlength{\\columnsep}{0.2cm}\n", |
|
"\\begin{figure}[t]\n", |
|
"\n", |
|
"\\setlength{\\columnsep}{0.2cm}\n", |
|
"\\begin{multicols}{2}\n", |
|
"\\begin{tcolorbox}[colback=qualcolor!5!white,colframe=qualcolor!75!black]\n", |
|
"\\begin{small}\n", |
|
"\\textbf{Problem:}\\\\\n", |
|
"Ralph watches TV for 4 hours a day from Monday to Friday, and 6 hours a day on Saturday and Sunday. How many hours does Ralph spend watching TV in one week?\n", |
|
"\\Sepline\n", |
|
"\\vspace{-1.2em}\n", |
|
"\\begin{multicols}{2}\n", |
|
"\\textbf{Generator:}\n", |
|
"\\vspace{-0.3em}\n", |
|
"\\begin{python}\n", |
|
"a=4*7\n", |
|
"b=6*7\n", |
|
"c=a+b\n", |
|
"answer=c\n", |
|
"print(answer)\n", |
|
"\\end{python}\n", |
|
"\\columnbreak\n", |
|
"\n", |
|
"\\textbf{Corrector:}\n", |
|
"\\vspace{-0.3em}\n", |
|
"\\begin{python}\n", |
|
"a=4*5\n", |
|
"b=6*2\n", |
|
"c=a+b\n", |
|
"answer=c\n", |
|
"print(answer)\n", |
|
"\\end{python}\n", |
|
"\\end{multicols}\n", |
|
"\\end{small}\n", |
|
"\\end{tcolorbox}\n", |
|
"\n", |
|
"\\begin{tcolorbox}[colback=qualcolor!5!white,colframe=qualcolor!75!black]\n", |
|
"\\begin{small}\n", |
|
"\\textbf{Problem:}\\\\\n", |
|
"The pirates plan to explore 4 islands. Two islands require walking 20 miles per day while the other two islands require 25 miles per day. How many miles will they have to walk if it takes 1.5 days to explore each island?\n", |
|
"\\Sepline\n", |
|
"\\vspace{-1.2em}\n", |
|
"\\begin{multicols}{2}\n", |
|
"\\textbf{Generator:}\n", |
|
"\\vspace{-0.3em}\n", |
|
"\\begin{python}\n", |
|
"a=20*2\n", |
|
"b=25*2\n", |
|
"c=a+b\n", |
|
"d=c*1.5\n", |
|
"e=d+b\n", |
|
"answer=e\n", |
|
"print(answer)\n", |
|
"\\end{python}\n", |
|
"\\columnbreak\n", |
|
"\\textbf{Corrector:}\n", |
|
"\\vspace{-0.5em}\n", |
|
"\\begin{python}\n", |
|
"a=20*2\n", |
|
"b=25*2\n", |
|
"c=a+b\n", |
|
"d=c*1.5\n", |
|
"answer=d\n", |
|
"print(answer)\n", |
|
"\\end{python}\n", |
|
"\n", |
|
"\\end{multicols}\n", |
|
"\\end{small}\n", |
|
"\\end{tcolorbox}\n", |
|
"\\end{multicols}\n", |
|
"\\vspace{-1.7em}\n", |
|
"\\caption{\\textbf{Grade school math (GSM) self-corrections.} Left: the structure of the generator's solution is valid, but it incorrectly uses the total number of days in a week for both \\textit{a} and \\textit{b}; the corrector fixes the program to correctly account for the 5 weekdays and 2 weekend days. Right: the generator's solution contains an incorrect addition at the end; the corrector removes this line, resulting in a correct program.\n", |
|
"}\n", |
|
"\\label{fig:math-examples2}\n", |
|
"\n", |
|
"\\begin{multicols}{2}\n", |
|
"\\begin{tcolorbox}[colback=qualcolor!5!white,colframe=qualcolor!75!black]\n", |
|
"\\begin{small}\n", |
|
"\\textbf{Problem:}\\\\\n", |
|
"A spiral notebook costs 15, and a personal planner costs \\$10. How much would it cost in total to buy 4 spiral notebooks and 8 personal planners at a 20\\\n", |
|
"\\Sepline\n", |
|
"\\vspace{-1.2em}\n", |
|
"\\begin{multicols}{2}\n", |
|
"\\textbf{Generator:}\n", |
|
"\\vspace{-0.3em}\n", |
|
"\\begin{python}\n", |
|
"a=4*15\n", |
|
"b=8*10\n", |
|
"c=a+b\n", |
|
"answer=c\n", |
|
"print(answer)\n", |
|
"\\end{python}\n", |
|
"\\columnbreak\n", |
|
"\n", |
|
"\\textbf{Corrector:}\n", |
|
"\\vspace{-0.3em}\n", |
|
"\\begin{python}\n", |
|
"a=4*15\n", |
|
"b=8*10\n", |
|
"c=a+b\n", |
|
"d=c*20/100\n", |
|
"e=c-d\n", |
|
"answer=e\n", |
|
"print(answer)\n", |
|
"\\end{python}\n", |
|
"\\end{multicols}\n", |
|
"\\end{small}\n", |
|
"\\end{tcolorbox}\n", |
|
"\n", |
|
"\\begin{tcolorbox}[colback=qualcolor!5!white,colframe=qualcolor!75!black]\n", |
|
"\\begin{small}\n", |
|
"\\textbf{Problem:}\\\\\n", |
|
"Julia has \\$40. She spends half of her money to buy a new game for her phone. She spends a quarter of what she has left on in-game purchases. How much money does she have left?\n", |
|
"\\Sepline\n", |
|
"\\vspace{-1.2em}\n", |
|
"\\begin{multicols}{2}\n", |
|
"\\textbf{Generator:}\n", |
|
"\\vspace{-0.3em}\n", |
|
"\\begin{python}\n", |
|
"a=40/2\n", |
|
"b=40-a\n", |
|
"c=b/4 \n", |
|
"d=b-c\n", |
|
"e=d+40 \n", |
|
"answer=e \n", |
|
"print(answer)\n", |
|
"\\end{python}\n", |
|
"\\columnbreak\n", |
|
"\\textbf{Corrector:}\n", |
|
"\\vspace{-1.5em}\n", |
|
"\\begin{python}\n", |
|
"a=40/2\n", |
|
"b=a/4\n", |
|
"c=a-b\n", |
|
"answer= c\n", |
|
"print(answer)\n", |
|
"\\end{python}\n", |
|
"\n", |
|
"\\end{multicols}\n", |
|
"\\end{small}\n", |
|
"\\end{tcolorbox}\n", |
|
"\\end{multicols}\n", |
|
"\\vspace{-1.7em}\n", |
|
"\\caption{\\textbf{Grade school math (GSM) self-corrections.} Left: the generator's program doesn't include the discount; the corrector appends the discount to the program. Right: a more sophisticated multipart correction. The generator's assignment of \\textit{b} (line 2), and addition to \\textit{e} (line 5) are incorrect. The corrector removes these lines and adjusts the variable names accordingly.\n", |
|
"}\n", |
|
"\\label{fig:math-examples3}\n", |
|
"\\end{figure}\n", |
|
"\n", |
|
"\\setlength{\\columnsep}{0.2cm}\n", |
|
"\\begin{figure}[t]\n", |
|
"\n", |
|
"\n", |
|
"\\begin{tcolorbox}[colback=qualcolor!5!white,colframe=qualcolor!75!black]\n", |
|
"\\begin{scriptsize}\n", |
|
"\\textbf{Constraints:}\\\\\n", |
|
"`table', `paper', `read' \\\\\n", |
|
"\\textbf{Generator:} \\\\\n", |
|
"\\texttt{A man is reading book on a table.} \\\\\n", |
|
"\n", |
|
"\\textbf{Corrector:} \\\\\n", |
|
"\\texttt{A man is reading a book on a table and writing on a piece of \\textcolor{green(pigment)}{paper}. }\n", |
|
"\\Sepline\n", |
|
"\n", |
|
"\\textbf{Constraints:} \\\\'stand', 'wait', 'look'\\\\\n", |
|
"\\textbf{Generator:}\\\\\n", |
|
"\\texttt{He waits for someone to look at him.}\\\\\n", |
|
"\\textbf{Corrector:}\\\\\n", |
|
"\\texttt{He waits for someone to \\textcolor{green(pigment)}{stand} and look him in the eye.}\n", |
|
"\\Sepline\n", |
|
"\n", |
|
"\\textbf{Constraints:} \\\\'walk', 'park', 'bench', 'dog', 'sit'\\\\\n", |
|
"\\textbf{Generator:}\\\\\n", |
|
"\\texttt{A dog sitting on a bench in a park.}\\\\\n", |
|
"\\textbf{Corrector:}\\\\\n", |
|
"\\texttt{A dog sitting on a bench and a man \\textcolor{green(pigment)}{walking} a dog in a park.}\n", |
|
"\n", |
|
"\\end{scriptsize}\n", |
|
"\\vspace{-0.5em}\n", |
|
"\\end{tcolorbox}\n", |
|
"\n", |
|
"\n", |
|
"\\begin{tcolorbox}[colback=qualcolor!5!white,colframe=qualcolor!75!black]\n", |
|
"\\begin{scriptsize}\n", |
|
"\n", |
|
"\\textbf{Constraints:}\\\\\n", |
|
"name : The Mill | Type : restaurant | food : English | price : high | customer rating : average | area : riverside | family friendly : yes | near : Cafe Rouge \\\\\n", |
|
"\\textbf{Generator:} \\\\\n", |
|
"\\texttt{The Mill is an expensive, three star restaurant located near Cafe Rouge.} \\\\\n", |
|
"\n", |
|
"\\textbf{Corrector:} \\\\\n", |
|
"\\texttt{The Mill is a \\textcolor{green(pigment)}{children friendly} \\textcolor{green(pigment)}{English} restaurant \\textcolor{green(pigment)}{in the riverside area} near Cafe Rouge. It has an average customer rating and a high price range.}\n", |
|
"\n", |
|
"\\Sepline\n", |
|
"\n", |
|
"\\textbf{Constraints:}\\\\\n", |
|
"name : Blue Spice | Type : restaurant | food : Chinese | area : riverside | family friendly : yes | near : Rainbow Vegetarian Cafe\n", |
|
"\\textbf{Generator:} \\\\\n", |
|
"\\texttt{Blue Spice is a restaurant providing Chinese food. It is located in the riverside. It is near Rainbow Vegetarian Cafe.} \\\\\n", |
|
"\n", |
|
"\\textbf{Corrector:} \\\\\n", |
|
"\\texttt{Blue Spice is a \\textcolor{green(pigment)}{family friendly} Chinese restaurant in the riverside area near Rainbow Vegetarian Cafe.}\n", |
|
"\n", |
|
"\\end{scriptsize}\n", |
|
"\\vspace{-0.6em}\n", |
|
"\\end{tcolorbox}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\caption{\\textbf{Constrained generation self-corrections.} Top: \\textsc{CommonGen}, and Bottom: E2E. Words in \\textcolor{green(pigment)}{green} indicate constraints that were previously missing.\n", |
|
"}\n", |
|
"\\label{fig:examples-cg-e2e}\n", |
|
"\\end{figure}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\setlength{\\columnsep}{0.2cm}\n", |
|
"\\begin{figure}[t]\n", |
|
"\n", |
|
"\\begin{tcolorbox}[colback=qualcolor!5!white,colframe=qualcolor!75!black]\n", |
|
"\\begin{scriptsize}\n", |
|
"\\textbf{Prompt:}\\\\\n", |
|
"Waters then went on to call the Trump \\\\\n", |
|
"\\textbf{Generator:} \\\\\n", |
|
"\\texttt{administration a \\textcolor{red}{\"bunch of idiots\" and \"insolent clowns} [who are] running\"} \\\\\n", |
|
"\\textbf{Corrector:} \\\\\n", |
|
"\\texttt{Administration's support for the Trans-Pacific Partnership on trade \"a total non-event.\"} \\\\\n", |
|
"\\texttt{}\n", |
|
"\n", |
|
"\n", |
|
"\\end{scriptsize}\n", |
|
"\\vspace{-0.5em}\n", |
|
"\\end{tcolorbox}\n", |
|
"\n", |
|
"\n", |
|
"\\caption{\\textbf{Toxicity control self-corrections.} Words in \\textcolor{red}{red} indicate toxic language.\n", |
|
"}\n", |
|
"\\label{fig:examples-toxicity}\n", |
|
"\\end{figure}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\end{document}\n" |
|
], |
|
"del_percentage": 0.0641 |
|
} |
|
} |