{ "id": "2211.01910", "annotator": "jiangshu", "input": [ "\\documentclass{article} \n", "\\usepackage[dvipsnames]{xcolor} \n", "\\usepackage{iclr2023_conference,times}\n", "\\usepackage{amsmath,amsfonts,bm}\n", "\\newcommand{\\figleft}{{\\em (Left)}}\n", "\\newcommand{\\figcenter}{{\\em (Center)}}\n", "\\newcommand{\\figright}{{\\em (Right)}}\n", "\\newcommand{\\figtop}{{\\em (Top)}}\n", "\\newcommand{\\figbottom}{{\\em (Bottom)}}\n", "\\newcommand{\\captiona}{{\\em (a)}}\n", "\\newcommand{\\captionb}{{\\em (b)}}\n", "\\newcommand{\\captionc}{{\\em (c)}}\n", "\\newcommand{\\captiond}{{\\em (d)}}\n", "\\newcommand{\\newterm}[1]{{\\bf #1}}\n", "\\def\\figref#1{figure~\\ref{#1}}\n", "\\def\\Figref#1{Figure~\\ref{#1}}\n", "\\def\\twofigref#1#2{figures \\ref{#1} and \\ref{#2}}\n", "\\def\\quadfigref#1#2#3#4{figures \\ref{#1}, \\ref{#2}, \\ref{#3} and \\ref{#4}}\n", "\\def\\secref#1{section~\\ref{#1}}\n", "\\def\\Secref#1{Section~\\ref{#1}}\n", "\\def\\twosecrefs#1#2{sections \\ref{#1} and \\ref{#2}}\n", "\\def\\secrefs#1#2#3{sections \\ref{#1}, \\ref{#2} and \\ref{#3}}\n", "\\def\\eqref#1{equation~\\ref{#1}}\n", "\\def\\Eqref#1{Equation~\\ref{#1}}\n", "\\def\\plaineqref#1{\\ref{#1}}\n", "\\def\\chapref#1{chapter~\\ref{#1}}\n", "\\def\\Chapref#1{Chapter~\\ref{#1}}\n", "\\def\\rangechapref#1#2{chapters\\ref{#1}--\\ref{#2}}\n", "\\def\\algref#1{algorithm~\\ref{#1}}\n", "\\def\\Algref#1{Algorithm~\\ref{#1}}\n", "\\def\\twoalgref#1#2{algorithms \\ref{#1} and \\ref{#2}}\n", "\\def\\Twoalgref#1#2{Algorithms \\ref{#1} and \\ref{#2}}\n", "\\def\\partref#1{part~\\ref{#1}}\n", "\\def\\Partref#1{Part~\\ref{#1}}\n", "\\def\\twopartref#1#2{parts \\ref{#1} and \\ref{#2}}\n", "\\def\\ceil#1{\\lceil #1 \\rceil}\n", "\\def\\floor#1{\\lfloor #1 \\rfloor}\n", "\\def\\1{\\bm{1}}\n", "\\newcommand{\\train}{\\mathcal{D}}\n", "\\newcommand{\\valid}{\\mathcal{D_{\\mathrm{valid}}}}\n", "\\newcommand{\\test}{\\mathcal{D_{\\mathrm{test}}}}\n", "\\def\\eps{{\\epsilon}}\n", "\\def\\reta{{\\textnormal{$\\eta$}}}\n", "\\def\\ra{{\\textnormal{a}}}\n", "\\def\\rb{{\\textnormal{b}}}\n", "\\def\\rc{{\\textnormal{c}}}\n", "\\def\\rd{{\\textnormal{d}}}\n", "\\def\\re{{\\textnormal{e}}}\n", "\\def\\rf{{\\textnormal{f}}}\n", "\\def\\rg{{\\textnormal{g}}}\n", "\\def\\rh{{\\textnormal{h}}}\n", "\\def\\ri{{\\textnormal{i}}}\n", "\\def\\rj{{\\textnormal{j}}}\n", "\\def\\rk{{\\textnormal{k}}}\n", "\\def\\rl{{\\textnormal{l}}}\n", "\\def\\rn{{\\textnormal{n}}}\n", "\\def\\ro{{\\textnormal{o}}}\n", "\\def\\rp{{\\textnormal{p}}}\n", "\\def\\rq{{\\textnormal{q}}}\n", "\\def\\rr{{\\textnormal{r}}}\n", "\\def\\rs{{\\textnormal{s}}}\n", "\\def\\rt{{\\textnormal{t}}}\n", "\\def\\ru{{\\textnormal{u}}}\n", "\\def\\rv{{\\textnormal{v}}}\n", "\\def\\rw{{\\textnormal{w}}}\n", "\\def\\rx{{\\textnormal{x}}}\n", "\\def\\ry{{\\textnormal{y}}}\n", "\\def\\rz{{\\textnormal{z}}}\n", "\\def\\rvepsilon{{\\mathbf{\\epsilon}}}\n", "\\def\\rvtheta{{\\mathbf{\\theta}}}\n", "\\def\\rva{{\\mathbf{a}}}\n", "\\def\\rvb{{\\mathbf{b}}}\n", "\\def\\rvc{{\\mathbf{c}}}\n", "\\def\\rvd{{\\mathbf{d}}}\n", "\\def\\rve{{\\mathbf{e}}}\n", "\\def\\rvf{{\\mathbf{f}}}\n", "\\def\\rvg{{\\mathbf{g}}}\n", "\\def\\rvh{{\\mathbf{h}}}\n", "\\def\\rvu{{\\mathbf{i}}}\n", "\\def\\rvj{{\\mathbf{j}}}\n", "\\def\\rvk{{\\mathbf{k}}}\n", "\\def\\rvl{{\\mathbf{l}}}\n", "\\def\\rvm{{\\mathbf{m}}}\n", "\\def\\rvn{{\\mathbf{n}}}\n", "\\def\\rvo{{\\mathbf{o}}}\n", "\\def\\rvp{{\\mathbf{p}}}\n", "\\def\\rvq{{\\mathbf{q}}}\n", "\\def\\rvr{{\\mathbf{r}}}\n", "\\def\\rvs{{\\mathbf{s}}}\n", "\\def\\rvt{{\\mathbf{t}}}\n", "\\def\\rvu{{\\mathbf{u}}}\n", "\\def\\rvv{{\\mathbf{v}}}\n", "\\def\\rvw{{\\mathbf{w}}}\n", "\\def\\rvx{{\\mathbf{x}}}\n", "\\def\\rvy{{\\mathbf{y}}}\n", "\\def\\rvz{{\\mathbf{z}}}\n", "\\def\\erva{{\\textnormal{a}}}\n", "\\def\\ervb{{\\textnormal{b}}}\n", "\\def\\ervc{{\\textnormal{c}}}\n", "\\def\\ervd{{\\textnormal{d}}}\n", "\\def\\erve{{\\textnormal{e}}}\n", "\\def\\ervf{{\\textnormal{f}}}\n", "\\def\\ervg{{\\textnormal{g}}}\n", "\\def\\ervh{{\\textnormal{h}}}\n", "\\def\\ervi{{\\textnormal{i}}}\n", "\\def\\ervj{{\\textnormal{j}}}\n", "\\def\\ervk{{\\textnormal{k}}}\n", "\\def\\ervl{{\\textnormal{l}}}\n", "\\def\\ervm{{\\textnormal{m}}}\n", "\\def\\ervn{{\\textnormal{n}}}\n", "\\def\\ervo{{\\textnormal{o}}}\n", "\\def\\ervp{{\\textnormal{p}}}\n", "\\def\\ervq{{\\textnormal{q}}}\n", "\\def\\ervr{{\\textnormal{r}}}\n", "\\def\\ervs{{\\textnormal{s}}}\n", "\\def\\ervt{{\\textnormal{t}}}\n", "\\def\\ervu{{\\textnormal{u}}}\n", "\\def\\ervv{{\\textnormal{v}}}\n", "\\def\\ervw{{\\textnormal{w}}}\n", "\\def\\ervx{{\\textnormal{x}}}\n", "\\def\\ervy{{\\textnormal{y}}}\n", "\\def\\ervz{{\\textnormal{z}}}\n", "\\def\\rmA{{\\mathbf{A}}}\n", "\\def\\rmB{{\\mathbf{B}}}\n", "\\def\\rmC{{\\mathbf{C}}}\n", "\\def\\rmD{{\\mathbf{D}}}\n", "\\def\\rmE{{\\mathbf{E}}}\n", "\\def\\rmF{{\\mathbf{F}}}\n", "\\def\\rmG{{\\mathbf{G}}}\n", "\\def\\rmH{{\\mathbf{H}}}\n", "\\def\\rmI{{\\mathbf{I}}}\n", "\\def\\rmJ{{\\mathbf{J}}}\n", "\\def\\rmK{{\\mathbf{K}}}\n", "\\def\\rmL{{\\mathbf{L}}}\n", "\\def\\rmM{{\\mathbf{M}}}\n", "\\def\\rmN{{\\mathbf{N}}}\n", "\\def\\rmO{{\\mathbf{O}}}\n", "\\def\\rmP{{\\mathbf{P}}}\n", "\\def\\rmQ{{\\mathbf{Q}}}\n", "\\def\\rmR{{\\mathbf{R}}}\n", "\\def\\rmS{{\\mathbf{S}}}\n", "\\def\\rmT{{\\mathbf{T}}}\n", "\\def\\rmU{{\\mathbf{U}}}\n", "\\def\\rmV{{\\mathbf{V}}}\n", "\\def\\rmW{{\\mathbf{W}}}\n", "\\def\\rmX{{\\mathbf{X}}}\n", "\\def\\rmY{{\\mathbf{Y}}}\n", "\\def\\rmZ{{\\mathbf{Z}}}\n", "\\def\\ermA{{\\textnormal{A}}}\n", "\\def\\ermB{{\\textnormal{B}}}\n", "\\def\\ermC{{\\textnormal{C}}}\n", "\\def\\ermD{{\\textnormal{D}}}\n", "\\def\\ermE{{\\textnormal{E}}}\n", "\\def\\ermF{{\\textnormal{F}}}\n", "\\def\\ermG{{\\textnormal{G}}}\n", "\\def\\ermH{{\\textnormal{H}}}\n", "\\def\\ermI{{\\textnormal{I}}}\n", "\\def\\ermJ{{\\textnormal{J}}}\n", "\\def\\ermK{{\\textnormal{K}}}\n", "\\def\\ermL{{\\textnormal{L}}}\n", "\\def\\ermM{{\\textnormal{M}}}\n", "\\def\\ermN{{\\textnormal{N}}}\n", "\\def\\ermO{{\\textnormal{O}}}\n", "\\def\\ermP{{\\textnormal{P}}}\n", "\\def\\ermQ{{\\textnormal{Q}}}\n", "\\def\\ermR{{\\textnormal{R}}}\n", "\\def\\ermS{{\\textnormal{S}}}\n", "\\def\\ermT{{\\textnormal{T}}}\n", "\\def\\ermU{{\\textnormal{U}}}\n", "\\def\\ermV{{\\textnormal{V}}}\n", "\\def\\ermW{{\\textnormal{W}}}\n", "\\def\\ermX{{\\textnormal{X}}}\n", "\\def\\ermY{{\\textnormal{Y}}}\n", "\\def\\ermZ{{\\textnormal{Z}}}\n", "\\def\\vzero{{\\bm{0}}}\n", "\\def\\vone{{\\bm{1}}}\n", "\\def\\vmu{{\\bm{\\mu}}}\n", "\\def\\vtheta{{\\bm{\\theta}}}\n", "\\def\\va{{\\bm{a}}}\n", "\\def\\vb{{\\bm{b}}}\n", "\\def\\vc{{\\bm{c}}}\n", "\\def\\vd{{\\bm{d}}}\n", "\\def\\ve{{\\bm{e}}}\n", "\\def\\vf{{\\bm{f}}}\n", "\\def\\vg{{\\bm{g}}}\n", "\\def\\vh{{\\bm{h}}}\n", "\\def\\vi{{\\bm{i}}}\n", "\\def\\vj{{\\bm{j}}}\n", "\\def\\vk{{\\bm{k}}}\n", "\\def\\vl{{\\bm{l}}}\n", "\\def\\vm{{\\bm{m}}}\n", "\\def\\vn{{\\bm{n}}}\n", "\\def\\vo{{\\bm{o}}}\n", "\\def\\vp{{\\bm{p}}}\n", "\\def\\vq{{\\bm{q}}}\n", "\\def\\vr{{\\bm{r}}}\n", "\\def\\vs{{\\bm{s}}}\n", "\\def\\vt{{\\bm{t}}}\n", "\\def\\vu{{\\bm{u}}}\n", "\\def\\vv{{\\bm{v}}}\n", "\\def\\vw{{\\bm{w}}}\n", "\\def\\vx{{\\bm{x}}}\n", "\\def\\vy{{\\bm{y}}}\n", "\\def\\vz{{\\bm{z}}}\n", "\\def\\evalpha{{\\alpha}}\n", "\\def\\evbeta{{\\beta}}\n", "\\def\\evepsilon{{\\epsilon}}\n", "\\def\\evlambda{{\\lambda}}\n", "\\def\\evomega{{\\omega}}\n", "\\def\\evmu{{\\mu}}\n", "\\def\\evpsi{{\\psi}}\n", "\\def\\evsigma{{\\sigma}}\n", "\\def\\evtheta{{\\theta}}\n", "\\def\\eva{{a}}\n", "\\def\\evb{{b}}\n", "\\def\\evc{{c}}\n", "\\def\\evd{{d}}\n", "\\def\\eve{{e}}\n", "\\def\\evf{{f}}\n", "\\def\\evg{{g}}\n", "\\def\\evh{{h}}\n", "\\def\\evi{{i}}\n", "\\def\\evj{{j}}\n", "\\def\\evk{{k}}\n", "\\def\\evl{{l}}\n", "\\def\\evm{{m}}\n", "\\def\\evn{{n}}\n", "\\def\\evo{{o}}\n", "\\def\\evp{{p}}\n", "\\def\\evq{{q}}\n", "\\def\\evr{{r}}\n", "\\def\\evs{{s}}\n", "\\def\\evt{{t}}\n", "\\def\\evu{{u}}\n", "\\def\\evv{{v}}\n", "\\def\\evw{{w}}\n", "\\def\\evx{{x}}\n", "\\def\\evy{{y}}\n", "\\def\\evz{{z}}\n", "\\def\\mA{{\\bm{A}}}\n", "\\def\\mB{{\\bm{B}}}\n", "\\def\\mC{{\\bm{C}}}\n", "\\def\\mD{{\\bm{D}}}\n", "\\def\\mE{{\\bm{E}}}\n", "\\def\\mF{{\\bm{F}}}\n", "\\def\\mG{{\\bm{G}}}\n", "\\def\\mH{{\\bm{H}}}\n", "\\def\\mI{{\\bm{I}}}\n", "\\def\\mJ{{\\bm{J}}}\n", "\\def\\mK{{\\bm{K}}}\n", "\\def\\mL{{\\bm{L}}}\n", "\\def\\mM{{\\bm{M}}}\n", "\\def\\mN{{\\bm{N}}}\n", "\\def\\mO{{\\bm{O}}}\n", "\\def\\mP{{\\bm{P}}}\n", "\\def\\mQ{{\\bm{Q}}}\n", "\\def\\mR{{\\bm{R}}}\n", "\\def\\mS{{\\bm{S}}}\n", "\\def\\mT{{\\bm{T}}}\n", "\\def\\mU{{\\bm{U}}}\n", "\\def\\mV{{\\bm{V}}}\n", "\\def\\mW{{\\bm{W}}}\n", "\\def\\mX{{\\bm{X}}}\n", "\\def\\mY{{\\bm{Y}}}\n", "\\def\\mZ{{\\bm{Z}}}\n", "\\def\\mBeta{{\\bm{\\beta}}}\n", "\\def\\mPhi{{\\bm{\\Phi}}}\n", "\\def\\mLambda{{\\bm{\\Lambda}}}\n", "\\def\\mSigma{{\\bm{\\Sigma}}}\n", "\\DeclareMathAlphabet{\\mathsfit}{\\encodingdefault}{\\sfdefault}{m}{sl}\n", "\\SetMathAlphabet{\\mathsfit}{bold}{\\encodingdefault}{\\sfdefault}{bx}{n}\n", "\\newcommand{\\tens}[1]{\\bm{\\mathsfit{#1}}}\n", "\\def\\tA{{\\tens{A}}}\n", "\\def\\tB{{\\tens{B}}}\n", "\\def\\tC{{\\tens{C}}}\n", "\\def\\tD{{\\tens{D}}}\n", "\\def\\tE{{\\tens{E}}}\n", "\\def\\tF{{\\tens{F}}}\n", "\\def\\tG{{\\tens{G}}}\n", "\\def\\tH{{\\tens{H}}}\n", "\\def\\tI{{\\tens{I}}}\n", "\\def\\tJ{{\\tens{J}}}\n", "\\def\\tK{{\\tens{K}}}\n", "\\def\\tL{{\\tens{L}}}\n", "\\def\\tM{{\\tens{M}}}\n", "\\def\\tN{{\\tens{N}}}\n", "\\def\\tO{{\\tens{O}}}\n", "\\def\\tP{{\\tens{P}}}\n", "\\def\\tQ{{\\tens{Q}}}\n", "\\def\\tR{{\\tens{R}}}\n", "\\def\\tS{{\\tens{S}}}\n", "\\def\\tT{{\\tens{T}}}\n", "\\def\\tU{{\\tens{U}}}\n", "\\def\\tV{{\\tens{V}}}\n", "\\def\\tW{{\\tens{W}}}\n", "\\def\\tX{{\\tens{X}}}\n", "\\def\\tY{{\\tens{Y}}}\n", "\\def\\tZ{{\\tens{Z}}}\n", "\\def\\gA{{\\mathcal{A}}}\n", "\\def\\gB{{\\mathcal{B}}}\n", "\\def\\gC{{\\mathcal{C}}}\n", "\\def\\gD{{\\mathcal{D}}}\n", "\\def\\gE{{\\mathcal{E}}}\n", "\\def\\gF{{\\mathcal{F}}}\n", "\\def\\gG{{\\mathcal{G}}}\n", "\\def\\gH{{\\mathcal{H}}}\n", "\\def\\gI{{\\mathcal{I}}}\n", "\\def\\gJ{{\\mathcal{J}}}\n", "\\def\\gK{{\\mathcal{K}}}\n", "\\def\\gL{{\\mathcal{L}}}\n", "\\def\\gM{{\\mathcal{M}}}\n", "\\def\\gN{{\\mathcal{N}}}\n", "\\def\\gO{{\\mathcal{O}}}\n", "\\def\\gP{{\\mathcal{P}}}\n", "\\def\\gQ{{\\mathcal{Q}}}\n", "\\def\\gR{{\\mathcal{R}}}\n", "\\def\\gS{{\\mathcal{S}}}\n", "\\def\\gT{{\\mathcal{T}}}\n", "\\def\\gU{{\\mathcal{U}}}\n", "\\def\\gV{{\\mathcal{V}}}\n", "\\def\\gW{{\\mathcal{W}}}\n", "\\def\\gX{{\\mathcal{X}}}\n", "\\def\\gY{{\\mathcal{Y}}}\n", "\\def\\gZ{{\\mathcal{Z}}}\n", "\\def\\sA{{\\mathbb{A}}}\n", "\\def\\sB{{\\mathbb{B}}}\n", "\\def\\sC{{\\mathbb{C}}}\n", "\\def\\sD{{\\mathbb{D}}}\n", "\\def\\sF{{\\mathbb{F}}}\n", "\\def\\sG{{\\mathbb{G}}}\n", "\\def\\sH{{\\mathbb{H}}}\n", "\\def\\sI{{\\mathbb{I}}}\n", "\\def\\sJ{{\\mathbb{J}}}\n", "\\def\\sK{{\\mathbb{K}}}\n", "\\def\\sL{{\\mathbb{L}}}\n", "\\def\\sM{{\\mathbb{M}}}\n", "\\def\\sN{{\\mathbb{N}}}\n", "\\def\\sO{{\\mathbb{O}}}\n", "\\def\\sP{{\\mathbb{P}}}\n", "\\def\\sQ{{\\mathbb{Q}}}\n", "\\def\\sR{{\\mathbb{R}}}\n", "\\def\\sS{{\\mathbb{S}}}\n", "\\def\\sT{{\\mathbb{T}}}\n", "\\def\\sU{{\\mathbb{U}}}\n", "\\def\\sV{{\\mathbb{V}}}\n", "\\def\\sW{{\\mathbb{W}}}\n", "\\def\\sX{{\\mathbb{X}}}\n", "\\def\\sY{{\\mathbb{Y}}}\n", "\\def\\sZ{{\\mathbb{Z}}}\n", "\\def\\emLambda{{\\Lambda}}\n", "\\def\\emA{{A}}\n", "\\def\\emB{{B}}\n", "\\def\\emC{{C}}\n", "\\def\\emD{{D}}\n", "\\def\\emE{{E}}\n", "\\def\\emF{{F}}\n", "\\def\\emG{{G}}\n", "\\def\\emH{{H}}\n", "\\def\\emI{{I}}\n", "\\def\\emJ{{J}}\n", "\\def\\emK{{K}}\n", "\\def\\emL{{L}}\n", "\\def\\emM{{M}}\n", "\\def\\emN{{N}}\n", "\\def\\emO{{O}}\n", "\\def\\emP{{P}}\n", "\\def\\emQ{{Q}}\n", "\\def\\emR{{R}}\n", "\\def\\emS{{S}}\n", "\\def\\emT{{T}}\n", "\\def\\emU{{U}}\n", "\\def\\emV{{V}}\n", "\\def\\emW{{W}}\n", "\\def\\emX{{X}}\n", "\\def\\emY{{Y}}\n", "\\def\\emZ{{Z}}\n", "\\def\\emSigma{{\\Sigma}}\n", "\\newcommand{\\etens}[1]{\\mathsfit{#1}}\n", "\\def\\etLambda{{\\etens{\\Lambda}}}\n", "\\def\\etA{{\\etens{A}}}\n", "\\def\\etB{{\\etens{B}}}\n", "\\def\\etC{{\\etens{C}}}\n", "\\def\\etD{{\\etens{D}}}\n", "\\def\\etE{{\\etens{E}}}\n", "\\def\\etF{{\\etens{F}}}\n", "\\def\\etG{{\\etens{G}}}\n", "\\def\\etH{{\\etens{H}}}\n", "\\def\\etI{{\\etens{I}}}\n", "\\def\\etJ{{\\etens{J}}}\n", "\\def\\etK{{\\etens{K}}}\n", "\\def\\etL{{\\etens{L}}}\n", "\\def\\etM{{\\etens{M}}}\n", "\\def\\etN{{\\etens{N}}}\n", "\\def\\etO{{\\etens{O}}}\n", "\\def\\etP{{\\etens{P}}}\n", "\\def\\etQ{{\\etens{Q}}}\n", "\\def\\etR{{\\etens{R}}}\n", "\\def\\etS{{\\etens{S}}}\n", "\\def\\etT{{\\etens{T}}}\n", "\\def\\etU{{\\etens{U}}}\n", "\\def\\etV{{\\etens{V}}}\n", "\\def\\etW{{\\etens{W}}}\n", "\\def\\etX{{\\etens{X}}}\n", "\\def\\etY{{\\etens{Y}}}\n", "\\def\\etZ{{\\etens{Z}}}\n", "\\newcommand{\\pdata}{p_{\\rm{data}}}\n", "\\newcommand{\\ptrain}{\\hat{p}_{\\rm{data}}}\n", "\\newcommand{\\Ptrain}{\\hat{P}_{\\rm{data}}}\n", "\\newcommand{\\pmodel}{p_{\\rm{model}}}\n", "\\newcommand{\\Pmodel}{P_{\\rm{model}}}\n", "\\newcommand{\\ptildemodel}{\\tilde{p}_{\\rm{model}}}\n", "\\newcommand{\\pencode}{p_{\\rm{encoder}}}\n", "\\newcommand{\\pdecode}{p_{\\rm{decoder}}}\n", "\\newcommand{\\precons}{p_{\\rm{reconstruct}}}\n", "\\newcommand{\\laplace}{\\mathrm{Laplace}} \n", "\\newcommand{\\E}{\\mathbb{E}}\n", "\\newcommand{\\Ls}{\\mathcal{L}}\n", "\\newcommand{\\R}{\\mathbb{R}}\n", "\\newcommand{\\emp}{\\tilde{p}}\n", "\\newcommand{\\lr}{\\alpha}\n", "\\newcommand{\\reg}{\\lambda}\n", "\\newcommand{\\rect}{\\mathrm{rectifier}}\n", "\\newcommand{\\softmax}{\\mathrm{softmax}}\n", "\\newcommand{\\sigmoid}{\\sigma}\n", "\\newcommand{\\softplus}{\\zeta}\n", "\\newcommand{\\KL}{D_{\\mathrm{KL}}}\n", "\\newcommand{\\Var}{\\mathrm{Var}}\n", "\\newcommand{\\standarderror}{\\mathrm{SE}}\n", "\\newcommand{\\Cov}{\\mathrm{Cov}}\n", "\\newcommand{\\normlzero}{L^0}\n", "\\newcommand{\\normlone}{L^1}\n", "\\newcommand{\\normltwo}{L^2}\n", "\\newcommand{\\normlp}{L^p}\n", "\\newcommand{\\normmax}{L^\\infty}\n", "\\newcommand{\\parents}{Pa} \n", "\\DeclareMathOperator*{\\argmax}{arg\\,max}\n", "\\DeclareMathOperator*{\\argmin}{arg\\,min}\n", "\\DeclareMathOperator{\\sign}{sign}\n", "\\DeclareMathOperator{\\Tr}{Tr}\n", "\\let\\ab\\allowbreak\n", "\\usepackage[dvipsnames]{xcolor}\n", "\\newif\\ifcomments\n", "\\commentstrue\n", "\\ifcomments\n", " \\newcommand{\\colornote}[3]{{\\color{#1}\\bf{#2: #3}\\normalfont}}\n", "\\else\n", " \\newcommand{\\colornote}[3]{}\n", "\\fi\n", "\\newcommand{\\JB}[1]{\\colornote{red}{JB}{#1}}\n", "\\newcommand{\\YC}[1]{\\colornote{red}{YC}{#1}}\n", "\\newcommand{\\SP}[1]{{\\footnotesize\\colornote{red}{SP}{#1}}}\n", "\\newcommand{\\TODO}[1]{{\\footnotesize\\color{WildStrawberry}\\bf{TODO: #1}}}\n", "\\newcommand{\\TENTATIVE}[1]{{\\footnotesize\\color{PineGreen}\\bf{[#1]}}}\n", "\\newcommand{\\rebuttal}[1]{{\\leavevmode\\color{PineGreen}#1}} \n", "\\newcommand{\\cmark}{\\ding{51}}\n", "\\newcommand{\\xmark}{\\ding{55}}\n", "\\newcommand{\\kldiv}{\\mathrm{D}_{\\rm KL}}\n", "\\newcommand{\\klbars}{\\,|\\,}\n", "\\newcommand{\\gaussian}[2]{\\mathcal{N}\\left({#1},{#2}\\right)}\n", "\\newcommand{\\expectation}[2]{\\mathbb{E}_{#1}\\left[{#2}\\right]}\n", "\\newcommand{\\variance}[2]{\\text{Var}_{#1}\\left[{#2}\\right]}\n", "\\newcommand{\\covariance}[2]{\\text{Cov}_{#1}\\left[{#2}\\right]}\n", "\\newcommand{\\real}{\\mathbb{R}}\n", "\\newcommand{\\targetloss}{\\mathcal{L}}\n", "\\newcommand{\\nnloss}{\\mathcal{L}^{\\supportdataset}}\n", "\\newcommand{\\data}{x}\n", "\\newcommand{\\target}{y}\n", "\\newcommand{\\trainingData}{\\mathcal{D}_\\textrm{train}}\n", "\\newcommand{\\validData}{\\mathcal{D}_\\textrm{valid}}\n", "\\newcommand{\\testData}{\\mathcal{D}_\\textrm{test}}\n", "\\newcommand{\\instruction}{\\mathcal{\\rho}}\n", "\\newcommand{\\demotrain}{\\mathcal{D}_{\\text{train}}}\n", "\\newcommand{\\demo}{demo}\n", "\\newcommand{\\demoQ}{Q}\n", "\\newcommand{\\demoA}{A}\n", "\\newcommand{\\task}{T}\n", "\\newcommand{\\subsize}{K}\n", "\\newcommand{\\proposal}{\\mathcal{U}}\n", "\\newcommand{\\logp}[1]{logP[{#1}]}\n", "\\newcommand{\\logpg}[1]{logP Gain[{#1}]}\n", "\\newcommand{\\execacc}[1]{ExecACC[{#1}]}\n", "\\newcommand{\\fexec}{f_\\textrm{exec}}\n", "\\newcommand{\\threshold}{\\tau}\n", "\\newcommand{\\gram}[2]{K^{\\theta}_{{#1}{#2}}}\n", "\\newcommand{\\algname}{APE\\xspace}\n", "\\newcommand{\\given}{\\,|\\,}\n", "\\newcommand{\\M}{\\mathcal{M}}\n", "\\usepackage[utf8]{inputenc} \n", "\\usepackage[T1]{fontenc} \n", "\\usepackage{hyperref} \n", "\\usepackage{url} \n", "\\usepackage{booktabs} \n", "\\usepackage{amsfonts} \n", "\\usepackage{nicefrac} \n", "\\usepackage{microtype} \n", "\\usepackage{subcaption}\n", "\\usepackage{multirow}\n", "\\usepackage{makecell}\n", "\\usepackage{graphicx}\n", "\\usepackage{algorithm}\n", "\\usepackage{algorithmic}\n", "\\usepackage{amssymb}\n", "\\usepackage{pifont}\n", "\\usepackage{adjustbox}\n", "\\usepackage{enumitem}\n", "\\usepackage{setspace} \n", "\\usepackage{etoolbox}\n", "\\usepackage{cleveref}\n", "\\usepackage{xspace} \n", "\\usepackage{wrapfig} \n", "\\usepackage{array}\n", "\\usepackage{calc}\n", "\\usepackage[symbol]{footmisc}\n", "\\usepackage[bb=dsserif]{mathalpha}\n", "\\AtBeginEnvironment{quote}{\\par\\singlespacing\\small}\n", "\\renewcommand{\\paragraph}[1]{\\textbf{#1}\\hspace{0.8em}}\n", "\\newif\\ifworkshop\n", "\\workshopfalse\n", "\\ifworkshop\n", " \\newcommand{\\workshoponly}[1]{#1}\n", " \\newcommand{\\workshopexclude}[1]{}\n", "\\else\n", " \\newcommand{\\workshoponly}[1]{}\n", " \\newcommand{\\workshopexclude}[1]{#1}\n", "\\fi\n", "\\title{Large Language Models are Human-Level Prompt Engineers}\n", "\\author{Yongchao Zhou$^{1,2,*}$, Andrei Ioan Muresanu$^{2,3,*}$, Ziwen Han$^{1,2,*}$, Keiran Paster$^{1,2}$, \\\\\\textbf{Silviu Pitis$^{1,2}$, Harris Chan$^{1,2}$, Jimmy Ba$^{1,2}$} \\\\\n", "$^1$University of Toronto\\quad $^2$Vector Institute\\quad $^3$University of Waterloo\\quad $^*$Equal contribution\\\\\n", "\\texttt{\\{yczhou,hanziwen,keirp,spitis,hchan,jba\\}@cs.toronto.edu}\\\\\n", "\\texttt{\\{andrei.muresanu\\}@uwaterloo.ca}\n", "}\n", "\\newcommand{\\fix}{\\marginpar{FIX}}\n", "\\newcommand{\\new}{\\marginpar{NEW}}\n", "\\usepackage{subfiles} \n", "\\iclrfinalcopy \n", "\\begin{document}\n", "\\maketitle\n", "\\renewcommand*{\\thefootnote}{\\arabic{footnote}}\n", "\\setcounter{footnote}{0}\n", "\\begin{abstract}\n", "\\footnote{\\ Our code is available at \\url{https://github.com/keirp/automatic_prompt_engineer}.}\n", "\\end{abstract}\n", "\\section{Introduction}\\label{sec:intro}\n", "The combination of scale and attention-based architectures has resulted in language models possessing an unprecedented level of generality \\citep{kaplan2020scaling,vaswani2017attention}. These so-called ``large language models'' (LLMs) have shown remarkable, often superhuman, capabilities across a diverse range of tasks, including both zero-shot and few-shot setups \\citep{brown2020language,srivastava2022beyond}. With generality, however, there comes a question of control: how can we make LLMs do what we want them to do? \n", "To answer this question and steer LLMs toward desired behaviors, recent work has considered fine-tuning \\citep{ouyang2022training,ziegler2019fine}, in-context learning \\citep{brown2020language}, and several forms of prompt generation \\citep{gao2021prompting}, including both differentiable tuning of soft prompts \\citep{qin2021learning,lester2021power} and natural language prompt engineering \\citep{reynolds2021prompt}. The latter is of particular interest, as it provides a natural interface for humans to communicate with machines and may be of great relevance not only to LLMs but to other generalist models such as prompted image synthesizers \\citep{rombach2022high,ramesh2022hierarchical}, for which public interest in prompt design and generation has also emerged (see Appendix \\ref{appdx_wild_prompt_engineering} for examples).\n", "Behind this interest is the fact that plain language prompts do not always produce the desired results, even when those results are possible to produce with alternative instructions. Thus, human users must experiment with a wide range of prompts to elicit desired behaviors, as they have little knowledge of how compatible instructions are with a particular model.\n", "We can understand this by viewing LLMs as black-box computers that execute programs specified by natural language instructions: while they can execute a broad range of natural language programs, the way these programs are processed may not be intuitive for humans, and the quality of instruction can only be measured when executing these instructions on a downstream task \\citep{sanh2022multitask, wei2021finetuned}. \n", "\\workshopexclude{\n", "To reduce the human effort involved in creating and validating effective instructions, we propose a novel algorithm using LLMs to generate and select instructions automatically. We call this problem \\textit{natural language program synthesis} and propose to address it as a black-box optimization problem using LLMs to generate and search over heuristically viable candidate solutions. \n", "\\begin{itemize}\n", " \\item We frame instruction generation as natural language program synthesis, formulate it as a \n", "\\end{itemize}\n", "}\n", "\\workshoponly{\n", "\\begin{itemize}\n", " \\item We frame instruction generation as natural language program synthesis, formulate it as a \n", "\\end{itemize}\n", "}\n", "\\begin{figure}\n", " \\centering\n", " \\vspace{-0.05in}\n", "\\begin{subfigure}[b]{0.48\\textwidth}\n", " \\hfill\\includegraphics[width=1.0\\linewidth]{figures/illustration/APE_pipe.pdf}\\vspace{0.75em}\n", " \\caption{Automatic Prompt Engineer (APE) workflow}\n", "\\end{subfigure}\n", " \\begin{subfigure}[b]{0.49\\textwidth}\n", " \\includegraphics[width=1.0\\linewidth]{figures/main/model_size.pdf}\n", " \\caption{Interquartile mean across 24 tasks}\n", " \\end{subfigure}\n", "\\end{figure}\n", "\\section{Related Work}\n", "\\paragraph{Large Language Models}\n", "\\paragraph{Prompt Engineering}\n", "Prompting offers a natural and intuitive interface for humans to interact with and use generalist models such as LLMs. Due to its flexibility, prompting has been widely used as a generic method for NLP tasks \\citep{schick2021exploiting, brown2020language, sanh2022multitask}. However, LLMs require careful prompt engineering, either manually \\citep{reynolds2021prompt} or automatically \\citep{gao2021making, shin2020autoprompt}, as models do not seem to understand the prompts in the same way a human would \\citep{webson2021prompt, lu2021fantastically}. Though many successful prompt tuning methods perform optimization over a continuous space using gradient-based methods \\citep{liu2021gpt, qin2021learning,lester2021power}, this becomes less practical with scale, as computing gradients becomes increasingly expensive and access to models shifts to APIs that may not provide gradient access. \n", "In our paper, we borrow components from discrete prompt search methods, such as prompt generation \\citep{gao2021making, ben2021pada}, prompt scoring \\citep{davison2019commonsense} and prompt paraphrasing \\citep{jiang2020can, yuan2021bartscore} to optimize instructions by searching directly in the natural language hypothesis space. \n", "As compared to this past work, which uses specialized models for each component and leans heavily on human templates, we show that the entire search can be conducted by a single LLM.\n", "\\paragraph{Program Synthesis} Program synthesis involves the automatic search over a ``program space'' to find a program satisfying a particular specification \\citep{gulwani2017program}. \n", "Modern program synthesis admits a wide variety of specifications, including input-output examples \\citep{ellis2021dreamcoder,wong2021leveraging} and natural language \\citep{jain2022jigsaw}. The range of feasible program spaces to search over has also grown, from historically restrictive domain-specific languages to general-purpose programming languages \\citep{austin2021program}. In contrast to prior approaches that require a suitable structured hypothesis space and library of components \\citep{liang2010learning, ellis2018learning}, we leverage the structure provided by LLMs to search over the space of natural language programs. \n", "Using inference models is a standard practice to speed up the search by restricting the search space to a limited space of possible expressions \\citep{menon2013machine, lee2018accelerating, devlin2017neural, ellis2021dreamcoder}. \n", "Inspired by this, we use LLMs as approximate inference models to generate program candidates based on a small set of demonstrations. Unlike classical program synthesis, our inference models do not require any training and generalize well to various tasks.\n", "\\workshoponly{\n", "\\begin{figure}\n", " \\centering\n", " \\includegraphics[width=0.8\\linewidth]{figures/illustration/APE_pipeline.pdf}\n", "\\end{figure}\n", "}\n", "\\workshopexclude{\\section{Natural Language Program Synthesis using LLMs}}\n", "\\workshoponly{\\section{Method in Detail}}\n", "We consider a task specified by a dataset $\\trainingData = \\{(\\demoQ, \\demoA)\\}$ of input/output demonstrations sampled from population $\\mathcal{X}$, and a prompted model $\\M$. \n", "The goal of natural language program synthesis is to find a single instruction $\\instruction$ such that, when $\\M$ is prompted with the concatenation $[\\instruction ; \\demoQ]$ of instruction and a given input, $\\M$ produces the corresponding output $\\demoA$. More formally, we frame this as an optimization problem, where we seek instruction $\\instruction$ that maximizes the expectation of some per-sample score $f(\\instruction, \\demoQ, \\demoA)$ over possible $(\\demoQ, \\demoA)$:\n", "\\begin{equation}\\label{eq:score}\n", "\\instruction^{\\star} = \\argmax_\\instruction f(\\instruction) = \\argmax_\\instruction \\expectation{(\\demoQ, \\demoA)}{f(\\instruction,\\demoQ, \\demoA)}\n", "\\end{equation}\n", "Note that in general, $\\demoQ$ may be the empty string, such that we are optimizing $\\instruction$ as a prompt that directly produces outputs $\\{A\\}$.\n", "While this task has been widely attempted by humans, we have little knowledge of how compatible any particular instruction is with model $\\M$. Thus, we propose to treat this human-intractable question as a black-box optimization process guided by LLMs. \n", "Our algorithm, APE, uses LLMs in each of two key components, proposal and scoring. As shown in Figure \\ref{fig:highlight} and summarized in Algorithm \\ref{alg:ape}, APE first proposes a few candidate prompts, and then filters/refines the candidate set according to a chosen score function, ultimately choosing the instruction with the highest score. We discuss options for proposal and scoring next. \n", "\\subsection{Initial Proposal Distributions}\\label{subsec:initialU}\n", "Due to the infinitely large search space, finding the right instruction can be extremely difficult, which has rendered natural language program synthesis historically intractable. Recent progress in NLP has shown language models are very good at generating diverse natural language text. Therefore, we consider leveraging a pretrained LLM to propose a good set $\\proposal$ of candidate solutions that will guide our search procedure. While random samples from LLMs are unlikely to produce the desired ($\\demoQ, \\demoA$) pairs, we can instead ask the LLM to approximately infer the most likely instructions with a high score, given the input/output demonstrations; i.e., to approximately sample from $P(\\instruction\\given\\trainingData,\\ f(\\instruction)\\textrm{ is high})$.\n", "\\begin{wrapfigure}{R}{0.3\\textwidth}\n", "\\centering\n", "\\vspace{-0.1in}\n", "\\includegraphics[width=0.275\\textwidth]{figures/illustration/template_combined.pdf}\n", "\\caption{Prompts for LLMs}\n", "\\vspace{-0.35in}\n", "\\label{fig:llm_template}\n", "\\end{wrapfigure}\n", "\\newcommand{\\algspacer}{\\hspace{1em}}\n", "\\begin{algorithm}[tb]\\small\n", " \\caption{Automatic Prompt Engineer (APE)}\\label{alg:ape}\n", " \\begin{algorithmic}\n", " \\STATE {\\bfseries Require:} $\\trainingData \\gets \\{(\\demoQ, \\demoA)\\}_n$: training examples, $f:\\instruction\\times\\mathcal{D} \\mapsto \\mathbb{R}$: score function\n", " \\end{algorithmic}\n", " \\begin{algorithmic}[1]\n", " \\WHILE{not converged}\n", " \\STATE Choose a random training subset $\\widetilde{\\mathcal{D}}_\\textrm{train} \\subset \\trainingData$. \n", " \\FORALL{$\\instruction$ in $\\proposal$}\n", " \\STATE Evaluate score on the subset $\\widetilde{s} \\gets f(\\instruction, \\widetilde{\\mathcal{D}}_\\textrm{train})$ (See Section~\\ref{sec:score_function} )\n", " \\ENDFOR\n", " \\STATE Filter the top k\\\n", " \\ENDWHILE\n", " \\end{algorithmic}\n", " \\begin{algorithmic}\n", " \\STATE {\\bfseries Return} instruction with the highest score $\\instruction^{\\star} \\gets \\arg\\max_{\\instruction \\in \\proposal_k} f(\\instruction, \\trainingData)$\n", " \\end{algorithmic}\n", "\\end{algorithm}\n", "\\paragraph{Reverse Mode Generation} Although the ``forward'' model works out of the box for most of the pretrained LLMs, translating $P(\\instruction\\given\\trainingData,\\ f(\\instruction)\\textrm{ is high})$ into words requires custom engineering across different tasks. This is because while instructions are typically found in the beginning of passages, the ``forward'' model only generates text from left to right, which requires the instruction to be predicted at the end of the prompt. Therefore, we desire a more flexible approach such that the instruction can be anywhere in the text. To address this, we consider ``reverse'' mode generation, which uses an LLM with infilling capabilities---e.g., T5~\\citep{raffel2020exploring}, GLM \\citep{du2022glm}, and InsertGPT~\\citep{bavarian2022efficient}---to infer the missing instructions. Our ``reverse'' model directly samples from $P(\\instruction\\given\\trainingData,\\ f(\\instruction)\\textrm{ is high})$ by filling in the blank. We show an example of the such template in Figure \\ref{fig:llm_template} (Middle).\n", "\\subsection{Score Functions} \\label{sec:score_function}\n", "\\paragraph{Efficient score estimation}\n", "Then, we repeat this process until a small set of candidates is left, which are evaluated on the entire training dataset. \n", "This adaptive filtering scheme significantly improves the computation efficiency by keeping the exact computation costs for the high-quality samples and drastically reducing the computation costs for low-quality candidates. \n", "We note that a similar score estimation scheme has been used in previous works \\citep{li2022competition, maclaurin2015firefly}.\n", "\\subsection{Iterative Proposal Distributions}\\label{sec:iterative}\n", "\\begin{wrapfigure}{R}{0.3\\textwidth}\n", "\\centering\n", "\\vspace{-0.1in}\n", "\\includegraphics[width=0.275\\textwidth]{figures/illustration/template_resample.pdf}\n", "\\caption{Resampling}\n", "\\vspace{-0.15in}\n", "\\label{fig:template_resampling}\n", "\\end{wrapfigure}\n", "\\workshopexclude{\\section{Large Language Models are Human-Level Prompt Engineers}}\n" ], "output": { "What experiments do you suggest doing?": [ "1. Performance comparison of zero-shot instruction generation: The authors should compare the proposed Automatic Prompt Engineer (APE) with other auto prompt generation methods and human writing prompts. This experiment should focus on generating instructions for zero-shot tasks. The authors should test the generated instructions on the tasks with different levels of difficulties. For example, using instruction induction tasks as easier tasks, and using BIG-Bench Hard as more challenging ones.", "2. APE-generated instructions for few-shot in-context learning: The authors should apply the APE-generated instructions on few-shot in-context learning. Then compare the performance of APE-generated-instruction-only, in-context-only, and APE-generated-instruction with in-context examples.", "3. APE for zero-shot chain of thought: The authors should use APE to further optimize the zero-shot chain- of- thought prompt. For example, the authors could use APE to search an alternative zero-shot chain-of-thought prompt instead of \u201cLet\u2019s think step by step\u201d, and then compare their performance on some reasoning tasks.", "4. APE steer models toward truthfulness and informativeness: The authors should use APE to learn instructions that maximize three TruthfulQA metrics individually. These three metrics are: truthfulness, informativeness, and a combination of both truthfulness and informativeness. Then analysis the performance and differences of the APE-generated instructions optimized toward these three metrics.", "5. The influence of model size on proposal quality: The authors should use different sizes of models to generate some initial instruction proposals. To compare the proposal quality, each model should generate the same number of instructions, for example, several hundred instructions per model. Then compute the execution accuracy on some test data points.", "6. The influence of proposal quality on APE performance: The authors should compare the APE performance under situations with different proposal quality levels. The difference in proposal quality can come from both model sizes and the number of instruction samples.", "7. The influence of scoring functions: The authors can compute the correlation between the test accuracy and two scoring functions on some tasks to study how good each proposed scoring function is.", "8. The influence of iterative Monte Carlo search: The authors should report the instruction quality changes with increasing iterative rounds. The author should also compare the performance between APE and iterative APE." ], "Why do you suggest these experiments?": [ "1. To demonstrate that if the proposed APE is effective on generating zero-shot instructions for tasks with different levels of difficulties.", "2. To show that if the APE-generated instructions can guide models to better leverage in-context examples and thus improve in-context learning performance.", "3. To demonstrate that APE can help optimize parts of the existing prompt templates to improve performance.", "4. To see how APE-generated instructions can steer an LLM to generate answers with different styles, and study the trade-off between truthfulness and informativeness.", "5. To understand how the proposal quality changes as the model size increases.", "6. To understand if the proposal quality matter under the paper proposed selection method.", "7. To find out which scoring function is better.", "8. To find out if iterative search improves the instruction quality and how important it is." ] }, "paper_info": { "title": "Large Language Models Are Human-Level Prompt Engineers", "authors": [ "Yongchao Zhou", "Andrei Ioan Muresanu", "Ziwen Han", "Keiran Paster", "Silviu Pitis", "Harris Chan", "Jimmy Ba" ], "abstract": "By conditioning on natural language instructions, large language models\n(LLMs) have displayed impressive capabilities as general-purpose computers.\nHowever, task performance depends significantly on the quality of the prompt\nused to steer the model, and most effective prompts have been handcrafted by\nhumans. Inspired by classical program synthesis and the human approach to\nprompt engineering, we propose Automatic Prompt Engineer (APE) for automatic\ninstruction generation and selection. In our method, we treat the instruction\nas the \"program,\" optimized by searching over a pool of instruction candidates\nproposed by an LLM in order to maximize a chosen score function. To evaluate\nthe quality of the selected instruction, we evaluate the zero-shot performance\nof another LLM following the selected instruction. Experiments on 24 NLP tasks\nshow that our automatically generated instructions outperform the prior LLM\nbaseline by a large margin and achieve better or comparable performance to the\ninstructions generated by human annotators on 19/24 tasks. We conduct extensive\nqualitative and quantitative analyses to explore the performance of APE. We\nshow that APE-engineered prompts can be applied to steer models toward\ntruthfulness and/or informativeness, as well as to improve few-shot learning\nperformance by simply prepending them to standard in-context learning prompts.\nPlease check out our webpage at\nhttps://sites.google.com/view/automatic-prompt-engineer.", "comments": null }, "raw_data": { "context_before_exp": [ "\n", "\\documentclass{article} \n", "\\usepackage[dvipsnames]{xcolor} \n", "\\usepackage{iclr2023_conference,times}\n", "\n", "\n", "\n", "\n", "\\usepackage{amsmath,amsfonts,bm}\n", "\n", "\n", "\\newcommand{\\figleft}{{\\em (Left)}}\n", "\\newcommand{\\figcenter}{{\\em (Center)}}\n", "\\newcommand{\\figright}{{\\em (Right)}}\n", "\\newcommand{\\figtop}{{\\em (Top)}}\n", "\\newcommand{\\figbottom}{{\\em (Bottom)}}\n", "\\newcommand{\\captiona}{{\\em (a)}}\n", "\\newcommand{\\captionb}{{\\em (b)}}\n", "\\newcommand{\\captionc}{{\\em (c)}}\n", "\\newcommand{\\captiond}{{\\em (d)}}\n", "\n", "\n", "\\newcommand{\\newterm}[1]{{\\bf #1}}\n", "\n", "\n", "\n", "\\def\\figref#1{figure~\\ref{#1}}\n", "\n", "\\def\\Figref#1{Figure~\\ref{#1}}\n", "\\def\\twofigref#1#2{figures \\ref{#1} and \\ref{#2}}\n", "\\def\\quadfigref#1#2#3#4{figures \\ref{#1}, \\ref{#2}, \\ref{#3} and \\ref{#4}}\n", "\n", "\\def\\secref#1{section~\\ref{#1}}\n", "\n", "\\def\\Secref#1{Section~\\ref{#1}}\n", "\n", "\\def\\twosecrefs#1#2{sections \\ref{#1} and \\ref{#2}}\n", "\n", "\\def\\secrefs#1#2#3{sections \\ref{#1}, \\ref{#2} and \\ref{#3}}\n", "\n", "\\def\\eqref#1{equation~\\ref{#1}}\n", "\n", "\\def\\Eqref#1{Equation~\\ref{#1}}\n", "\n", "\\def\\plaineqref#1{\\ref{#1}}\n", "\n", "\\def\\chapref#1{chapter~\\ref{#1}}\n", "\n", "\\def\\Chapref#1{Chapter~\\ref{#1}}\n", "\n", "\\def\\rangechapref#1#2{chapters\\ref{#1}--\\ref{#2}}\n", "\n", "\\def\\algref#1{algorithm~\\ref{#1}}\n", "\n", "\\def\\Algref#1{Algorithm~\\ref{#1}}\n", "\\def\\twoalgref#1#2{algorithms \\ref{#1} and \\ref{#2}}\n", "\\def\\Twoalgref#1#2{Algorithms \\ref{#1} and \\ref{#2}}\n", "\n", "\\def\\partref#1{part~\\ref{#1}}\n", "\n", "\\def\\Partref#1{Part~\\ref{#1}}\n", "\\def\\twopartref#1#2{parts \\ref{#1} and \\ref{#2}}\n", "\n", "\\def\\ceil#1{\\lceil #1 \\rceil}\n", "\\def\\floor#1{\\lfloor #1 \\rfloor}\n", "\\def\\1{\\bm{1}}\n", "\\newcommand{\\train}{\\mathcal{D}}\n", "\\newcommand{\\valid}{\\mathcal{D_{\\mathrm{valid}}}}\n", "\\newcommand{\\test}{\\mathcal{D_{\\mathrm{test}}}}\n", "\n", "\\def\\eps{{\\epsilon}}\n", "\n", "\n", "\n", "\\def\\reta{{\\textnormal{$\\eta$}}}\n", "\\def\\ra{{\\textnormal{a}}}\n", "\\def\\rb{{\\textnormal{b}}}\n", "\\def\\rc{{\\textnormal{c}}}\n", "\\def\\rd{{\\textnormal{d}}}\n", "\\def\\re{{\\textnormal{e}}}\n", "\\def\\rf{{\\textnormal{f}}}\n", "\\def\\rg{{\\textnormal{g}}}\n", "\\def\\rh{{\\textnormal{h}}}\n", "\\def\\ri{{\\textnormal{i}}}\n", "\\def\\rj{{\\textnormal{j}}}\n", "\\def\\rk{{\\textnormal{k}}}\n", "\\def\\rl{{\\textnormal{l}}}\n", "\n", "\\def\\rn{{\\textnormal{n}}}\n", "\\def\\ro{{\\textnormal{o}}}\n", "\\def\\rp{{\\textnormal{p}}}\n", "\\def\\rq{{\\textnormal{q}}}\n", "\\def\\rr{{\\textnormal{r}}}\n", "\\def\\rs{{\\textnormal{s}}}\n", "\\def\\rt{{\\textnormal{t}}}\n", "\\def\\ru{{\\textnormal{u}}}\n", "\\def\\rv{{\\textnormal{v}}}\n", "\\def\\rw{{\\textnormal{w}}}\n", "\\def\\rx{{\\textnormal{x}}}\n", "\\def\\ry{{\\textnormal{y}}}\n", "\\def\\rz{{\\textnormal{z}}}\n", "\n", "\n", "\\def\\rvepsilon{{\\mathbf{\\epsilon}}}\n", "\\def\\rvtheta{{\\mathbf{\\theta}}}\n", "\\def\\rva{{\\mathbf{a}}}\n", "\\def\\rvb{{\\mathbf{b}}}\n", "\\def\\rvc{{\\mathbf{c}}}\n", "\\def\\rvd{{\\mathbf{d}}}\n", "\\def\\rve{{\\mathbf{e}}}\n", "\\def\\rvf{{\\mathbf{f}}}\n", "\\def\\rvg{{\\mathbf{g}}}\n", "\\def\\rvh{{\\mathbf{h}}}\n", "\\def\\rvu{{\\mathbf{i}}}\n", "\\def\\rvj{{\\mathbf{j}}}\n", "\\def\\rvk{{\\mathbf{k}}}\n", "\\def\\rvl{{\\mathbf{l}}}\n", "\\def\\rvm{{\\mathbf{m}}}\n", "\\def\\rvn{{\\mathbf{n}}}\n", "\\def\\rvo{{\\mathbf{o}}}\n", "\\def\\rvp{{\\mathbf{p}}}\n", "\\def\\rvq{{\\mathbf{q}}}\n", "\\def\\rvr{{\\mathbf{r}}}\n", "\\def\\rvs{{\\mathbf{s}}}\n", "\\def\\rvt{{\\mathbf{t}}}\n", "\\def\\rvu{{\\mathbf{u}}}\n", "\\def\\rvv{{\\mathbf{v}}}\n", "\\def\\rvw{{\\mathbf{w}}}\n", "\\def\\rvx{{\\mathbf{x}}}\n", "\\def\\rvy{{\\mathbf{y}}}\n", "\\def\\rvz{{\\mathbf{z}}}\n", "\n", "\n", "\\def\\erva{{\\textnormal{a}}}\n", "\\def\\ervb{{\\textnormal{b}}}\n", "\\def\\ervc{{\\textnormal{c}}}\n", "\\def\\ervd{{\\textnormal{d}}}\n", "\\def\\erve{{\\textnormal{e}}}\n", "\\def\\ervf{{\\textnormal{f}}}\n", "\\def\\ervg{{\\textnormal{g}}}\n", "\\def\\ervh{{\\textnormal{h}}}\n", "\\def\\ervi{{\\textnormal{i}}}\n", "\\def\\ervj{{\\textnormal{j}}}\n", "\\def\\ervk{{\\textnormal{k}}}\n", "\\def\\ervl{{\\textnormal{l}}}\n", "\\def\\ervm{{\\textnormal{m}}}\n", "\\def\\ervn{{\\textnormal{n}}}\n", "\\def\\ervo{{\\textnormal{o}}}\n", "\\def\\ervp{{\\textnormal{p}}}\n", "\\def\\ervq{{\\textnormal{q}}}\n", "\\def\\ervr{{\\textnormal{r}}}\n", "\\def\\ervs{{\\textnormal{s}}}\n", "\\def\\ervt{{\\textnormal{t}}}\n", "\\def\\ervu{{\\textnormal{u}}}\n", "\\def\\ervv{{\\textnormal{v}}}\n", "\\def\\ervw{{\\textnormal{w}}}\n", "\\def\\ervx{{\\textnormal{x}}}\n", "\\def\\ervy{{\\textnormal{y}}}\n", "\\def\\ervz{{\\textnormal{z}}}\n", "\n", "\n", "\\def\\rmA{{\\mathbf{A}}}\n", "\\def\\rmB{{\\mathbf{B}}}\n", "\\def\\rmC{{\\mathbf{C}}}\n", "\\def\\rmD{{\\mathbf{D}}}\n", "\\def\\rmE{{\\mathbf{E}}}\n", "\\def\\rmF{{\\mathbf{F}}}\n", "\\def\\rmG{{\\mathbf{G}}}\n", "\\def\\rmH{{\\mathbf{H}}}\n", "\\def\\rmI{{\\mathbf{I}}}\n", "\\def\\rmJ{{\\mathbf{J}}}\n", "\\def\\rmK{{\\mathbf{K}}}\n", "\\def\\rmL{{\\mathbf{L}}}\n", "\\def\\rmM{{\\mathbf{M}}}\n", "\\def\\rmN{{\\mathbf{N}}}\n", "\\def\\rmO{{\\mathbf{O}}}\n", "\\def\\rmP{{\\mathbf{P}}}\n", "\\def\\rmQ{{\\mathbf{Q}}}\n", "\\def\\rmR{{\\mathbf{R}}}\n", "\\def\\rmS{{\\mathbf{S}}}\n", "\\def\\rmT{{\\mathbf{T}}}\n", "\\def\\rmU{{\\mathbf{U}}}\n", "\\def\\rmV{{\\mathbf{V}}}\n", "\\def\\rmW{{\\mathbf{W}}}\n", "\\def\\rmX{{\\mathbf{X}}}\n", "\\def\\rmY{{\\mathbf{Y}}}\n", "\\def\\rmZ{{\\mathbf{Z}}}\n", "\n", "\n", "\\def\\ermA{{\\textnormal{A}}}\n", "\\def\\ermB{{\\textnormal{B}}}\n", "\\def\\ermC{{\\textnormal{C}}}\n", "\\def\\ermD{{\\textnormal{D}}}\n", "\\def\\ermE{{\\textnormal{E}}}\n", "\\def\\ermF{{\\textnormal{F}}}\n", "\\def\\ermG{{\\textnormal{G}}}\n", "\\def\\ermH{{\\textnormal{H}}}\n", "\\def\\ermI{{\\textnormal{I}}}\n", "\\def\\ermJ{{\\textnormal{J}}}\n", "\\def\\ermK{{\\textnormal{K}}}\n", "\\def\\ermL{{\\textnormal{L}}}\n", "\\def\\ermM{{\\textnormal{M}}}\n", "\\def\\ermN{{\\textnormal{N}}}\n", "\\def\\ermO{{\\textnormal{O}}}\n", "\\def\\ermP{{\\textnormal{P}}}\n", "\\def\\ermQ{{\\textnormal{Q}}}\n", "\\def\\ermR{{\\textnormal{R}}}\n", "\\def\\ermS{{\\textnormal{S}}}\n", "\\def\\ermT{{\\textnormal{T}}}\n", "\\def\\ermU{{\\textnormal{U}}}\n", "\\def\\ermV{{\\textnormal{V}}}\n", "\\def\\ermW{{\\textnormal{W}}}\n", "\\def\\ermX{{\\textnormal{X}}}\n", "\\def\\ermY{{\\textnormal{Y}}}\n", "\\def\\ermZ{{\\textnormal{Z}}}\n", "\n", "\n", "\\def\\vzero{{\\bm{0}}}\n", "\\def\\vone{{\\bm{1}}}\n", "\\def\\vmu{{\\bm{\\mu}}}\n", "\\def\\vtheta{{\\bm{\\theta}}}\n", "\\def\\va{{\\bm{a}}}\n", "\\def\\vb{{\\bm{b}}}\n", "\\def\\vc{{\\bm{c}}}\n", "\\def\\vd{{\\bm{d}}}\n", "\\def\\ve{{\\bm{e}}}\n", "\\def\\vf{{\\bm{f}}}\n", "\\def\\vg{{\\bm{g}}}\n", "\\def\\vh{{\\bm{h}}}\n", "\\def\\vi{{\\bm{i}}}\n", "\\def\\vj{{\\bm{j}}}\n", "\\def\\vk{{\\bm{k}}}\n", "\\def\\vl{{\\bm{l}}}\n", "\\def\\vm{{\\bm{m}}}\n", "\\def\\vn{{\\bm{n}}}\n", "\\def\\vo{{\\bm{o}}}\n", "\\def\\vp{{\\bm{p}}}\n", "\\def\\vq{{\\bm{q}}}\n", "\\def\\vr{{\\bm{r}}}\n", "\\def\\vs{{\\bm{s}}}\n", "\\def\\vt{{\\bm{t}}}\n", "\\def\\vu{{\\bm{u}}}\n", "\\def\\vv{{\\bm{v}}}\n", "\\def\\vw{{\\bm{w}}}\n", "\\def\\vx{{\\bm{x}}}\n", "\\def\\vy{{\\bm{y}}}\n", "\\def\\vz{{\\bm{z}}}\n", "\n", "\n", "\\def\\evalpha{{\\alpha}}\n", "\\def\\evbeta{{\\beta}}\n", "\\def\\evepsilon{{\\epsilon}}\n", "\\def\\evlambda{{\\lambda}}\n", "\\def\\evomega{{\\omega}}\n", "\\def\\evmu{{\\mu}}\n", "\\def\\evpsi{{\\psi}}\n", "\\def\\evsigma{{\\sigma}}\n", "\\def\\evtheta{{\\theta}}\n", "\\def\\eva{{a}}\n", "\\def\\evb{{b}}\n", "\\def\\evc{{c}}\n", "\\def\\evd{{d}}\n", "\\def\\eve{{e}}\n", "\\def\\evf{{f}}\n", "\\def\\evg{{g}}\n", "\\def\\evh{{h}}\n", "\\def\\evi{{i}}\n", "\\def\\evj{{j}}\n", "\\def\\evk{{k}}\n", "\\def\\evl{{l}}\n", "\\def\\evm{{m}}\n", "\\def\\evn{{n}}\n", "\\def\\evo{{o}}\n", "\\def\\evp{{p}}\n", "\\def\\evq{{q}}\n", "\\def\\evr{{r}}\n", "\\def\\evs{{s}}\n", "\\def\\evt{{t}}\n", "\\def\\evu{{u}}\n", "\\def\\evv{{v}}\n", "\\def\\evw{{w}}\n", "\\def\\evx{{x}}\n", "\\def\\evy{{y}}\n", "\\def\\evz{{z}}\n", "\n", "\n", "\\def\\mA{{\\bm{A}}}\n", "\\def\\mB{{\\bm{B}}}\n", "\\def\\mC{{\\bm{C}}}\n", "\\def\\mD{{\\bm{D}}}\n", "\\def\\mE{{\\bm{E}}}\n", "\\def\\mF{{\\bm{F}}}\n", "\\def\\mG{{\\bm{G}}}\n", "\\def\\mH{{\\bm{H}}}\n", "\\def\\mI{{\\bm{I}}}\n", "\\def\\mJ{{\\bm{J}}}\n", "\\def\\mK{{\\bm{K}}}\n", "\\def\\mL{{\\bm{L}}}\n", "\\def\\mM{{\\bm{M}}}\n", "\\def\\mN{{\\bm{N}}}\n", "\\def\\mO{{\\bm{O}}}\n", "\\def\\mP{{\\bm{P}}}\n", "\\def\\mQ{{\\bm{Q}}}\n", "\\def\\mR{{\\bm{R}}}\n", "\\def\\mS{{\\bm{S}}}\n", "\\def\\mT{{\\bm{T}}}\n", "\\def\\mU{{\\bm{U}}}\n", "\\def\\mV{{\\bm{V}}}\n", "\\def\\mW{{\\bm{W}}}\n", "\\def\\mX{{\\bm{X}}}\n", "\\def\\mY{{\\bm{Y}}}\n", "\\def\\mZ{{\\bm{Z}}}\n", "\\def\\mBeta{{\\bm{\\beta}}}\n", "\\def\\mPhi{{\\bm{\\Phi}}}\n", "\\def\\mLambda{{\\bm{\\Lambda}}}\n", "\\def\\mSigma{{\\bm{\\Sigma}}}\n", "\n", "\n", "\\DeclareMathAlphabet{\\mathsfit}{\\encodingdefault}{\\sfdefault}{m}{sl}\n", "\\SetMathAlphabet{\\mathsfit}{bold}{\\encodingdefault}{\\sfdefault}{bx}{n}\n", "\\newcommand{\\tens}[1]{\\bm{\\mathsfit{#1}}}\n", "\\def\\tA{{\\tens{A}}}\n", "\\def\\tB{{\\tens{B}}}\n", "\\def\\tC{{\\tens{C}}}\n", "\\def\\tD{{\\tens{D}}}\n", "\\def\\tE{{\\tens{E}}}\n", "\\def\\tF{{\\tens{F}}}\n", "\\def\\tG{{\\tens{G}}}\n", "\\def\\tH{{\\tens{H}}}\n", "\\def\\tI{{\\tens{I}}}\n", "\\def\\tJ{{\\tens{J}}}\n", "\\def\\tK{{\\tens{K}}}\n", "\\def\\tL{{\\tens{L}}}\n", "\\def\\tM{{\\tens{M}}}\n", "\\def\\tN{{\\tens{N}}}\n", "\\def\\tO{{\\tens{O}}}\n", "\\def\\tP{{\\tens{P}}}\n", "\\def\\tQ{{\\tens{Q}}}\n", "\\def\\tR{{\\tens{R}}}\n", "\\def\\tS{{\\tens{S}}}\n", "\\def\\tT{{\\tens{T}}}\n", "\\def\\tU{{\\tens{U}}}\n", "\\def\\tV{{\\tens{V}}}\n", "\\def\\tW{{\\tens{W}}}\n", "\\def\\tX{{\\tens{X}}}\n", "\\def\\tY{{\\tens{Y}}}\n", "\\def\\tZ{{\\tens{Z}}}\n", "\n", "\n", "\n", "\\def\\gA{{\\mathcal{A}}}\n", "\\def\\gB{{\\mathcal{B}}}\n", "\\def\\gC{{\\mathcal{C}}}\n", "\\def\\gD{{\\mathcal{D}}}\n", "\\def\\gE{{\\mathcal{E}}}\n", "\\def\\gF{{\\mathcal{F}}}\n", "\\def\\gG{{\\mathcal{G}}}\n", "\\def\\gH{{\\mathcal{H}}}\n", "\\def\\gI{{\\mathcal{I}}}\n", "\\def\\gJ{{\\mathcal{J}}}\n", "\\def\\gK{{\\mathcal{K}}}\n", "\\def\\gL{{\\mathcal{L}}}\n", "\\def\\gM{{\\mathcal{M}}}\n", "\\def\\gN{{\\mathcal{N}}}\n", "\\def\\gO{{\\mathcal{O}}}\n", "\\def\\gP{{\\mathcal{P}}}\n", "\\def\\gQ{{\\mathcal{Q}}}\n", "\\def\\gR{{\\mathcal{R}}}\n", "\\def\\gS{{\\mathcal{S}}}\n", "\\def\\gT{{\\mathcal{T}}}\n", "\\def\\gU{{\\mathcal{U}}}\n", "\\def\\gV{{\\mathcal{V}}}\n", "\\def\\gW{{\\mathcal{W}}}\n", "\\def\\gX{{\\mathcal{X}}}\n", "\\def\\gY{{\\mathcal{Y}}}\n", "\\def\\gZ{{\\mathcal{Z}}}\n", "\n", "\n", "\\def\\sA{{\\mathbb{A}}}\n", "\\def\\sB{{\\mathbb{B}}}\n", "\\def\\sC{{\\mathbb{C}}}\n", "\\def\\sD{{\\mathbb{D}}}\n", "\n", "\n", "\\def\\sF{{\\mathbb{F}}}\n", "\\def\\sG{{\\mathbb{G}}}\n", "\\def\\sH{{\\mathbb{H}}}\n", "\\def\\sI{{\\mathbb{I}}}\n", "\\def\\sJ{{\\mathbb{J}}}\n", "\\def\\sK{{\\mathbb{K}}}\n", "\\def\\sL{{\\mathbb{L}}}\n", "\\def\\sM{{\\mathbb{M}}}\n", "\\def\\sN{{\\mathbb{N}}}\n", "\\def\\sO{{\\mathbb{O}}}\n", "\\def\\sP{{\\mathbb{P}}}\n", "\\def\\sQ{{\\mathbb{Q}}}\n", "\\def\\sR{{\\mathbb{R}}}\n", "\\def\\sS{{\\mathbb{S}}}\n", "\\def\\sT{{\\mathbb{T}}}\n", "\\def\\sU{{\\mathbb{U}}}\n", "\\def\\sV{{\\mathbb{V}}}\n", "\\def\\sW{{\\mathbb{W}}}\n", "\\def\\sX{{\\mathbb{X}}}\n", "\\def\\sY{{\\mathbb{Y}}}\n", "\\def\\sZ{{\\mathbb{Z}}}\n", "\n", "\n", "\\def\\emLambda{{\\Lambda}}\n", "\\def\\emA{{A}}\n", "\\def\\emB{{B}}\n", "\\def\\emC{{C}}\n", "\\def\\emD{{D}}\n", "\\def\\emE{{E}}\n", "\\def\\emF{{F}}\n", "\\def\\emG{{G}}\n", "\\def\\emH{{H}}\n", "\\def\\emI{{I}}\n", "\\def\\emJ{{J}}\n", "\\def\\emK{{K}}\n", "\\def\\emL{{L}}\n", "\\def\\emM{{M}}\n", "\\def\\emN{{N}}\n", "\\def\\emO{{O}}\n", "\\def\\emP{{P}}\n", "\\def\\emQ{{Q}}\n", "\\def\\emR{{R}}\n", "\\def\\emS{{S}}\n", "\\def\\emT{{T}}\n", "\\def\\emU{{U}}\n", "\\def\\emV{{V}}\n", "\\def\\emW{{W}}\n", "\\def\\emX{{X}}\n", "\\def\\emY{{Y}}\n", "\\def\\emZ{{Z}}\n", "\\def\\emSigma{{\\Sigma}}\n", "\n", "\n", "\n", "\\newcommand{\\etens}[1]{\\mathsfit{#1}}\n", "\\def\\etLambda{{\\etens{\\Lambda}}}\n", "\\def\\etA{{\\etens{A}}}\n", "\\def\\etB{{\\etens{B}}}\n", "\\def\\etC{{\\etens{C}}}\n", "\\def\\etD{{\\etens{D}}}\n", "\\def\\etE{{\\etens{E}}}\n", "\\def\\etF{{\\etens{F}}}\n", "\\def\\etG{{\\etens{G}}}\n", "\\def\\etH{{\\etens{H}}}\n", "\\def\\etI{{\\etens{I}}}\n", "\\def\\etJ{{\\etens{J}}}\n", "\\def\\etK{{\\etens{K}}}\n", "\\def\\etL{{\\etens{L}}}\n", "\\def\\etM{{\\etens{M}}}\n", "\\def\\etN{{\\etens{N}}}\n", "\\def\\etO{{\\etens{O}}}\n", "\\def\\etP{{\\etens{P}}}\n", "\\def\\etQ{{\\etens{Q}}}\n", "\\def\\etR{{\\etens{R}}}\n", "\\def\\etS{{\\etens{S}}}\n", "\\def\\etT{{\\etens{T}}}\n", "\\def\\etU{{\\etens{U}}}\n", "\\def\\etV{{\\etens{V}}}\n", "\\def\\etW{{\\etens{W}}}\n", "\\def\\etX{{\\etens{X}}}\n", "\\def\\etY{{\\etens{Y}}}\n", "\\def\\etZ{{\\etens{Z}}}\n", "\n", "\n", "\\newcommand{\\pdata}{p_{\\rm{data}}}\n", "\n", "\\newcommand{\\ptrain}{\\hat{p}_{\\rm{data}}}\n", "\\newcommand{\\Ptrain}{\\hat{P}_{\\rm{data}}}\n", "\n", "\\newcommand{\\pmodel}{p_{\\rm{model}}}\n", "\\newcommand{\\Pmodel}{P_{\\rm{model}}}\n", "\\newcommand{\\ptildemodel}{\\tilde{p}_{\\rm{model}}}\n", "\n", "\\newcommand{\\pencode}{p_{\\rm{encoder}}}\n", "\\newcommand{\\pdecode}{p_{\\rm{decoder}}}\n", "\\newcommand{\\precons}{p_{\\rm{reconstruct}}}\n", "\n", "\\newcommand{\\laplace}{\\mathrm{Laplace}} \n", "\n", "\\newcommand{\\E}{\\mathbb{E}}\n", "\\newcommand{\\Ls}{\\mathcal{L}}\n", "\\newcommand{\\R}{\\mathbb{R}}\n", "\\newcommand{\\emp}{\\tilde{p}}\n", "\\newcommand{\\lr}{\\alpha}\n", "\\newcommand{\\reg}{\\lambda}\n", "\\newcommand{\\rect}{\\mathrm{rectifier}}\n", "\\newcommand{\\softmax}{\\mathrm{softmax}}\n", "\\newcommand{\\sigmoid}{\\sigma}\n", "\\newcommand{\\softplus}{\\zeta}\n", "\\newcommand{\\KL}{D_{\\mathrm{KL}}}\n", "\\newcommand{\\Var}{\\mathrm{Var}}\n", "\\newcommand{\\standarderror}{\\mathrm{SE}}\n", "\\newcommand{\\Cov}{\\mathrm{Cov}}\n", "\n", "\n", "\n", "\\newcommand{\\normlzero}{L^0}\n", "\\newcommand{\\normlone}{L^1}\n", "\\newcommand{\\normltwo}{L^2}\n", "\\newcommand{\\normlp}{L^p}\n", "\\newcommand{\\normmax}{L^\\infty}\n", "\n", "\\newcommand{\\parents}{Pa} \n", "\n", "\\DeclareMathOperator*{\\argmax}{arg\\,max}\n", "\\DeclareMathOperator*{\\argmin}{arg\\,min}\n", "\n", "\\DeclareMathOperator{\\sign}{sign}\n", "\\DeclareMathOperator{\\Tr}{Tr}\n", "\\let\\ab\\allowbreak\n", "\\usepackage[dvipsnames]{xcolor}\n", "\n", "\\newif\\ifcomments\n", "\\commentstrue\n", "\n", "\\ifcomments\n", " \\newcommand{\\colornote}[3]{{\\color{#1}\\bf{#2: #3}\\normalfont}}\n", "\\else\n", " \\newcommand{\\colornote}[3]{}\n", "\\fi\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\newcommand{\\JB}[1]{\\colornote{red}{JB}{#1}}\n", "\\newcommand{\\YC}[1]{\\colornote{red}{YC}{#1}}\n", "\n", "\\newcommand{\\SP}[1]{{\\footnotesize\\colornote{red}{SP}{#1}}}\n", "\\newcommand{\\TODO}[1]{{\\footnotesize\\color{WildStrawberry}\\bf{TODO: #1}}}\n", "\\newcommand{\\TENTATIVE}[1]{{\\footnotesize\\color{PineGreen}\\bf{[#1]}}}\n", "\\newcommand{\\rebuttal}[1]{{\\leavevmode\\color{PineGreen}#1}} \n", "\n", "\\newcommand{\\cmark}{\\ding{51}}\n", "\\newcommand{\\xmark}{\\ding{55}}\n", "\n", "\n", "\\newcommand{\\kldiv}{\\mathrm{D}_{\\rm KL}}\n", "\\newcommand{\\klbars}{\\,|\\,}\n", "\\newcommand{\\gaussian}[2]{\\mathcal{N}\\left({#1},{#2}\\right)}\n", "\\newcommand{\\expectation}[2]{\\mathbb{E}_{#1}\\left[{#2}\\right]}\n", "\\newcommand{\\variance}[2]{\\text{Var}_{#1}\\left[{#2}\\right]}\n", "\\newcommand{\\covariance}[2]{\\text{Cov}_{#1}\\left[{#2}\\right]}\n", "\\newcommand{\\real}{\\mathbb{R}}\n", "\\newcommand{\\targetloss}{\\mathcal{L}}\n", "\\newcommand{\\nnloss}{\\mathcal{L}^{\\supportdataset}}\n", "\\newcommand{\\data}{x}\n", "\\newcommand{\\target}{y}\n", "\n", "\\newcommand{\\trainingData}{\\mathcal{D}_\\textrm{train}}\n", "\\newcommand{\\validData}{\\mathcal{D}_\\textrm{valid}}\n", "\\newcommand{\\testData}{\\mathcal{D}_\\textrm{test}}\n", "\n", "\\newcommand{\\instruction}{\\mathcal{\\rho}}\n", "\\newcommand{\\demotrain}{\\mathcal{D}_{\\text{train}}}\n", "\\newcommand{\\demo}{demo}\n", "\\newcommand{\\demoQ}{Q}\n", "\\newcommand{\\demoA}{A}\n", "\\newcommand{\\task}{T}\n", "\\newcommand{\\subsize}{K}\n", "\n", "\\newcommand{\\proposal}{\\mathcal{U}}\n", "\\newcommand{\\logp}[1]{logP[{#1}]}\n", "\\newcommand{\\logpg}[1]{logP Gain[{#1}]}\n", "\\newcommand{\\execacc}[1]{ExecACC[{#1}]}\n", "\n", "\\newcommand{\\fexec}{f_\\textrm{exec}}\n", "\n", "\\newcommand{\\threshold}{\\tau}\n", "\n", "\\newcommand{\\gram}[2]{K^{\\theta}_{{#1}{#2}}}\n", "\n", "\\newcommand{\\algname}{APE\\xspace}\n", "\\newcommand{\\given}{\\,|\\,}\n", "\\newcommand{\\M}{\\mathcal{M}}\n", "\n", "\\usepackage[utf8]{inputenc} \n", "\\usepackage[T1]{fontenc} \n", "\\usepackage{hyperref} \n", "\\usepackage{url} \n", "\\usepackage{booktabs} \n", "\\usepackage{amsfonts} \n", "\\usepackage{nicefrac} \n", "\\usepackage{microtype} \n", "\n", "\n", "\\usepackage{subcaption}\n", "\\usepackage{multirow}\n", "\\usepackage{makecell}\n", "\\usepackage{graphicx}\n", "\\usepackage{algorithm}\n", "\\usepackage{algorithmic}\n", "\\usepackage{amssymb}\n", "\\usepackage{pifont}\n", "\\usepackage{adjustbox}\n", "\\usepackage{enumitem}\n", "\\usepackage{setspace} \n", "\\usepackage{etoolbox}\n", "\\usepackage{cleveref}\n", "\\usepackage{xspace} \n", "\\usepackage{wrapfig} \n", "\\usepackage{array}\n", "\\usepackage{calc}\n", "\\usepackage[symbol]{footmisc}\n", "\\usepackage[bb=dsserif]{mathalpha}\n", "\n", "\\AtBeginEnvironment{quote}{\\par\\singlespacing\\small}\n", "\n", "\\renewcommand{\\paragraph}[1]{\\textbf{#1}\\hspace{0.8em}}\n", "\n", "\\newif\\ifworkshop\n", "\\workshopfalse\n", "\\ifworkshop\n", " \\newcommand{\\workshoponly}[1]{#1}\n", " \\newcommand{\\workshopexclude}[1]{}\n", "\\else\n", " \\newcommand{\\workshoponly}[1]{}\n", " \\newcommand{\\workshopexclude}[1]{#1}\n", "\\fi\n", "\n", "\\title{Large Language Models are Human-Level Prompt Engineers}\n", "\n", "\n", "\n", "\n", "\\author{Yongchao Zhou$^{1,2,*}$, Andrei Ioan Muresanu$^{2,3,*}$, Ziwen Han$^{1,2,*}$, Keiran Paster$^{1,2}$, \\\\\\textbf{Silviu Pitis$^{1,2}$, Harris Chan$^{1,2}$, Jimmy Ba$^{1,2}$} \\\\\n", "$^1$University of Toronto\\quad $^2$Vector Institute\\quad $^3$University of Waterloo\\quad $^*$Equal contribution\\\\\n", "\\texttt{\\{yczhou,hanziwen,keirp,spitis,hchan,jba\\}@cs.toronto.edu}\\\\\n", "\\texttt{\\{andrei.muresanu\\}@uwaterloo.ca}\n", "}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\newcommand{\\fix}{\\marginpar{FIX}}\n", "\\newcommand{\\new}{\\marginpar{NEW}}\n", "\n", "\\usepackage{subfiles} \n", "\n", "\\iclrfinalcopy \n", "\\begin{document}\n", "\n", "\\maketitle\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\renewcommand*{\\thefootnote}{\\arabic{footnote}}\n", "\\setcounter{footnote}{0}\n", "\n", "\\begin{abstract}\n", "By conditioning on natural language instructions, large language models (LLMs) have displayed impressive capabilities as general-purpose computers. However, task performance depends significantly on the quality of the prompt used to steer the model, and most effective prompts have been handcrafted by humans. Inspired by classical program synthesis and the human approach to prompt engineering, we propose \\textit{Automatic Prompt Engineer}\\footnote{We define ``prompt engineering'' as optimizing the language in a prompt in order to elicit the best possible performance. Notably, this does not include prompts that chain multiple LLM queries together or give the LLM access to external tools.} (APE) for automatic instruction generation and selection. In our method, we treat the instruction as the ``program,'' optimized by searching over a pool of instruction candidates proposed by an LLM in order to maximize a chosen score function. To evaluate the quality of the selected instruction, we evaluate the zero-shot performance of another LLM following the selected instruction. Extensive experiments show that our automatically generated instructions outperform the prior LLM baseline by a large margin and achieve better or comparable performance to the instructions generated by human annotators on 24/24 Instruction Induction tasks and 17/21 curated BIG-Bench tasks. We conduct extensive qualitative and quantitative analyses to explore the performance of APE. We show that APE-engineered prompts are able to improve few-shot learning performance (by simply prepending them to standard in-context learning prompts), find better zero-shot chain-of-thought prompts, as well as steer models toward truthfulness and/or informativeness. \n", "\n", "\\footnote{\\ Our code is available at \\url{https://github.com/keirp/automatic_prompt_engineer}.}\n", "\\end{abstract}\n", "\n", "\\section{Introduction}\\label{sec:intro}\n", "The combination of scale and attention-based architectures has resulted in language models possessing an unprecedented level of generality \\citep{kaplan2020scaling,vaswani2017attention}. These so-called ``large language models'' (LLMs) have shown remarkable, often superhuman, capabilities across a diverse range of tasks, including both zero-shot and few-shot setups \\citep{brown2020language,srivastava2022beyond}. With generality, however, there comes a question of control: how can we make LLMs do what we want them to do? \n", "\n", "To answer this question and steer LLMs toward desired behaviors, recent work has considered fine-tuning \\citep{ouyang2022training,ziegler2019fine}, in-context learning \\citep{brown2020language}, and several forms of prompt generation \\citep{gao2021prompting}, including both differentiable tuning of soft prompts \\citep{qin2021learning,lester2021power} and natural language prompt engineering \\citep{reynolds2021prompt}. The latter is of particular interest, as it provides a natural interface for humans to communicate with machines and may be of great relevance not only to LLMs but to other generalist models such as prompted image synthesizers \\citep{rombach2022high,ramesh2022hierarchical}, for which public interest in prompt design and generation has also emerged (see Appendix \\ref{appdx_wild_prompt_engineering} for examples).\n", "\n", "Behind this interest is the fact that plain language prompts do not always produce the desired results, even when those results are possible to produce with alternative instructions. Thus, human users must experiment with a wide range of prompts to elicit desired behaviors, as they have little knowledge of how compatible instructions are with a particular model.\n", "We can understand this by viewing LLMs as black-box computers that execute programs specified by natural language instructions: while they can execute a broad range of natural language programs, the way these programs are processed may not be intuitive for humans, and the quality of instruction can only be measured when executing these instructions on a downstream task \\citep{sanh2022multitask, wei2021finetuned}. \n", "\n", "\\workshopexclude{\n", "To reduce the human effort involved in creating and validating effective instructions, we propose a novel algorithm using LLMs to generate and select instructions automatically. We call this problem \\textit{natural language program synthesis} and propose to address it as a black-box optimization problem using LLMs to generate and search over heuristically viable candidate solutions. \n", "In doing so, we leverage the generalist capabilities of LLMs in three ways. First, we use an LLM as an inference model \\citep{ellis2021dreamcoder, honovich2022instruction} to generate instruction candidates based on a small set of demonstrations in the form of input-output pairs. Next, we guide the search process by computing a score for each instruction under the LLM we seek to control. Finally, we propose an iterative Monte Carlo search method where LLMs improve the best candidates by proposing semantically similar instruction variants. Intuitively, our algorithm asks LLMs to generate a set of instruction candidates based on demonstrations and then asks them to assess which instructions are more promising. We call our algorithm Automatic Prompt Engineer (\\algname). \\textbf{Our main contributions are:}\n", "\\begin{itemize}\n", " \\item We frame instruction generation as natural language program synthesis, formulate it as a \n", " black-box optimization problem guided by LLMs, and propose both a naive and an iterative Monte Carlo search methods to approximate the solution.\n", " \\item Our proposed method, APE, achieves human-level performance on zero-shot learning with model-generated instructions on 24/24 Instruction Induction and 17/21 Big-Bench tasks.\n", " \\item We provide extensive qualitative and quantitative analyses exploring various facets of APE, and demonstrate applications of APE for improving few-shot learning, finding better zero-shot chain of thought prompts, and steering LLMs toward desired behaviors such as truthfulness and/or informativeness.\n", "\\end{itemize}\n", "}\n", "\n", "\\workshoponly{\n", "To reduce the human effort involved in creating and validating effective instructions, we propose a novel algorithm using LLMs to generate and select instructions automatically. We call this problem \\textit{natural language program synthesis} and propose to address it as a black-box optimization problem using LLMs to generate and search over heuristically viable candidate solutions. In doing so, we leverage the generalist capabilities of LLMs in two ways. First, we use an LLM as an inference model \\citep{ellis2021dreamcoder, honovich2022instruction} to generate instruction candidates based on a small set of demonstrations in the form of input-output pairs. Second, we guide the search process by computing a score for each instruction under the LLM we seek to control. Intuitively, our algorithm asks LLMs to generate a set of instruction candidates based on demonstrations and then asks them to assess which instructions are more promising. We call our algorithm Automatic Prompt Engineer (\\algname). \\textbf{Our main contributions are:}\n", "\\begin{itemize}\n", " \\item We frame instruction generation as natural language program synthesis, formulate it as a \n", " black-box optimization problem guided by LLMs, and propose a Monte Carlo search methods to approximate the solution.\n", " \\item Our proposed method, APE, achieves human-level performance on zero-shot learning with model-generated instructions on 19/24 NLP tasks and demonstrate applications of APE for steering LLMs toward desired behaviors such as truthfulness and/or informativeness.\n", "\\end{itemize}\n", "}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\begin{figure}\n", " \\centering\n", " \\vspace{-0.05in}\n", "\n", "\\begin{subfigure}[b]{0.48\\textwidth}\n", " \\hfill\\includegraphics[width=1.0\\linewidth]{figures/illustration/APE_pipe.pdf}\\vspace{0.75em}\n", " \\caption{Automatic Prompt Engineer (APE) workflow}\n", "\\end{subfigure}\n", " \\begin{subfigure}[b]{0.49\\textwidth}\n", " \\includegraphics[width=1.0\\linewidth]{figures/main/model_size.pdf}\n", " \\caption{Interquartile mean across 24 tasks}\n", " \\end{subfigure}\n", " \\caption{(a) Our method, \\textbf{Automatic Prompt Engineer (APE)}, automatically generates instructions for a task that is specified via output demonstrations: it generates several instruction candidates, either via direct inference or a recursive process based on semantic similarity, executes them using the target model, and selects the most appropriate instruction based on computed evaluation scores. (b) As measured by the interquartile mean across the 24 NLP tasks introduced by \\citet{honovich2022instruction}, APE is able to surpass human performance when using the InstructGPT model \\citep{ouyang2022training}.}\\label{fig:highlight}\n", "\\end{figure}\n", "\\section{Related Work}\n", "\n", "\\paragraph{Large Language Models}\n", "Scaling up transformer-based language models in terms of model size, training data, and training compute has been shown to predictably improve performance on a wide range of downstream NLP tasks \\citep{vaswani2017attention, devlin2018bert, brown2020language}. Many emergent abilities \\citep{wei2022emergent} of LLMs have been discovered as a result of this scaling, including few-shot in-context learning, zero-shot problem solving, chain of thought reasoning, instruction following, and instruction induction \\citep{cobbe2021training, wei2022chain, kojima2022large, sanh2022multitask, wei2021finetuned, ouyang2022training, honovich2022instruction}. In this paper, we view LLMs as black-box computers that execute programs specified by natural language instructions and investigate how to control an LLM's behavior using model-generated instructions. \n", "\n", "\\paragraph{Prompt Engineering}\n", "Prompting offers a natural and intuitive interface for humans to interact with and use generalist models such as LLMs. Due to its flexibility, prompting has been widely used as a generic method for NLP tasks \\citep{schick2021exploiting, brown2020language, sanh2022multitask}. However, LLMs require careful prompt engineering, either manually \\citep{reynolds2021prompt} or automatically \\citep{gao2021making, shin2020autoprompt}, as models do not seem to understand the prompts in the same way a human would \\citep{webson2021prompt, lu2021fantastically}. Though many successful prompt tuning methods perform optimization over a continuous space using gradient-based methods \\citep{liu2021gpt, qin2021learning,lester2021power}, this becomes less practical with scale, as computing gradients becomes increasingly expensive and access to models shifts to APIs that may not provide gradient access. \n", "In our paper, we borrow components from discrete prompt search methods, such as prompt generation \\citep{gao2021making, ben2021pada}, prompt scoring \\citep{davison2019commonsense} and prompt paraphrasing \\citep{jiang2020can, yuan2021bartscore} to optimize instructions by searching directly in the natural language hypothesis space. \n", "As compared to this past work, which uses specialized models for each component and leans heavily on human templates, we show that the entire search can be conducted by a single LLM.\n", "\n", "\\paragraph{Program Synthesis} Program synthesis involves the automatic search over a ``program space'' to find a program satisfying a particular specification \\citep{gulwani2017program}. \n", "\n", "Modern program synthesis admits a wide variety of specifications, including input-output examples \\citep{ellis2021dreamcoder,wong2021leveraging} and natural language \\citep{jain2022jigsaw}. The range of feasible program spaces to search over has also grown, from historically restrictive domain-specific languages to general-purpose programming languages \\citep{austin2021program}. In contrast to prior approaches that require a suitable structured hypothesis space and library of components \\citep{liang2010learning, ellis2018learning}, we leverage the structure provided by LLMs to search over the space of natural language programs. \n", "Using inference models is a standard practice to speed up the search by restricting the search space to a limited space of possible expressions \\citep{menon2013machine, lee2018accelerating, devlin2017neural, ellis2021dreamcoder}. \n", "Inspired by this, we use LLMs as approximate inference models to generate program candidates based on a small set of demonstrations. Unlike classical program synthesis, our inference models do not require any training and generalize well to various tasks.\n", "\n", "\\workshoponly{\n", "\\begin{figure}\n", " \\centering\n", " \\includegraphics[width=0.8\\linewidth]{figures/illustration/APE_pipeline.pdf}\n", " \\caption{Our method, \\textbf{Automatic Prompt Engineer (APE)}, automatically generates instructions for a task that is specified via output demonstrations: it generates several instruction candidates, either via direct inference or a recursive process based on semantic similarity, executes them using the target model, and selects the most appropriate instruction based on computed evaluation scores.}\\label{fig:pipline}\n", "\\end{figure}\n", "}\n", "\\workshopexclude{\\section{Natural Language Program Synthesis using LLMs}}\n", "\\workshoponly{\\section{Method in Detail}}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "We consider a task specified by a dataset $\\trainingData = \\{(\\demoQ, \\demoA)\\}$ of input/output demonstrations sampled from population $\\mathcal{X}$, and a prompted model $\\M$. \n", "The goal of natural language program synthesis is to find a single instruction $\\instruction$ such that, when $\\M$ is prompted with the concatenation $[\\instruction ; \\demoQ]$ of instruction and a given input, $\\M$ produces the corresponding output $\\demoA$. More formally, we frame this as an optimization problem, where we seek instruction $\\instruction$ that maximizes the expectation of some per-sample score $f(\\instruction, \\demoQ, \\demoA)$ over possible $(\\demoQ, \\demoA)$:\n", "\\begin{equation}\\label{eq:score}\n", "\\instruction^{\\star} = \\argmax_\\instruction f(\\instruction) = \\argmax_\\instruction \\expectation{(\\demoQ, \\demoA)}{f(\\instruction,\\demoQ, \\demoA)}\n", "\\end{equation}\n", "Note that in general, $\\demoQ$ may be the empty string, such that we are optimizing $\\instruction$ as a prompt that directly produces outputs $\\{A\\}$.\n", "While this task has been widely attempted by humans, we have little knowledge of how compatible any particular instruction is with model $\\M$. Thus, we propose to treat this human-intractable question as a black-box optimization process guided by LLMs. \n", "Our algorithm, APE, uses LLMs in each of two key components, proposal and scoring. As shown in Figure \\ref{fig:highlight} and summarized in Algorithm \\ref{alg:ape}, APE first proposes a few candidate prompts, and then filters/refines the candidate set according to a chosen score function, ultimately choosing the instruction with the highest score. We discuss options for proposal and scoring next. \n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\subsection{Initial Proposal Distributions}\\label{subsec:initialU}\n", "Due to the infinitely large search space, finding the right instruction can be extremely difficult, which has rendered natural language program synthesis historically intractable. Recent progress in NLP has shown language models are very good at generating diverse natural language text. Therefore, we consider leveraging a pretrained LLM to propose a good set $\\proposal$ of candidate solutions that will guide our search procedure. While random samples from LLMs are unlikely to produce the desired ($\\demoQ, \\demoA$) pairs, we can instead ask the LLM to approximately infer the most likely instructions with a high score, given the input/output demonstrations; i.e., to approximately sample from $P(\\instruction\\given\\trainingData,\\ f(\\instruction)\\textrm{ is high})$.\n", "\n", "\\begin{wrapfigure}{R}{0.3\\textwidth}\n", "\\centering\n", "\\vspace{-0.1in}\n", "\\includegraphics[width=0.275\\textwidth]{figures/illustration/template_combined.pdf}\n", "\\caption{Prompts for LLMs}\n", "\\vspace{-0.35in}\n", "\\label{fig:llm_template}\n", "\\end{wrapfigure}\n", "\n", "\\newcommand{\\algspacer}{\\hspace{1em}}\n", "\\begin{algorithm}[tb]\\small\n", " \n", " \\caption{Automatic Prompt Engineer (APE)}\\label{alg:ape}\n", " \\begin{algorithmic}\n", " \\STATE {\\bfseries Require:} $\\trainingData \\gets \\{(\\demoQ, \\demoA)\\}_n$: training examples, $f:\\instruction\\times\\mathcal{D} \\mapsto \\mathbb{R}$: score function\n", " \n", " \\end{algorithmic}\n", " \\begin{algorithmic}[1]\n", " \\STATE Use LLM to sample instruction proposals $\\ \\proposal \\gets \\{\\instruction_1, ..., \\instruction_m\\}$. (See Section~\\ref{subsec:initialU})\n", " \\WHILE{not converged}\n", " \\STATE Choose a random training subset $\\widetilde{\\mathcal{D}}_\\textrm{train} \\subset \\trainingData$. \n", " \n", " \\FORALL{$\\instruction$ in $\\proposal$}\n", " \\STATE Evaluate score on the subset $\\widetilde{s} \\gets f(\\instruction, \\widetilde{\\mathcal{D}}_\\textrm{train})$ (See Section~\\ref{sec:score_function} )\n", " \\ENDFOR\n", " \\STATE Filter the top k\\\n", " \\STATE Update instructions $\\proposal \\gets \\proposal_k$ or use LLM to resample $\\proposal \\gets \\text{resample} ( \\proposal_k )$ (See Section~\\ref{sec:iterative}) \n", " \\ENDWHILE\n", " \\end{algorithmic}\n", " \\begin{algorithmic}\n", " \\STATE {\\bfseries Return} instruction with the highest score $\\instruction^{\\star} \\gets \\arg\\max_{\\instruction \\in \\proposal_k} f(\\instruction, \\trainingData)$\n", " \\end{algorithmic}\n", "\\end{algorithm}\n", "\n", "\\paragraph{Forward Mode Generation} We consider two approaches to generate high-quality candidates from $P(\\instruction\\given\\trainingData,\\ f(\\instruction)\\textrm{ is high})$. First, we adopt an approach based on ``forward'' mode generation by translating this distribution $P(\\instruction\\given\\trainingData,\\ f(\\instruction)\\textrm{ is high})$ into words. For example, in our instruction induction experiments (Subsection \\ref{sec:inst_induct}), we follow \\citet{honovich2022instruction} and prompt the LLM using Figure \\ref{fig:llm_template} (Top). \n", "\n", "\\paragraph{Reverse Mode Generation} Although the ``forward'' model works out of the box for most of the pretrained LLMs, translating $P(\\instruction\\given\\trainingData,\\ f(\\instruction)\\textrm{ is high})$ into words requires custom engineering across different tasks. This is because while instructions are typically found in the beginning of passages, the ``forward'' model only generates text from left to right, which requires the instruction to be predicted at the end of the prompt. Therefore, we desire a more flexible approach such that the instruction can be anywhere in the text. To address this, we consider ``reverse'' mode generation, which uses an LLM with infilling capabilities---e.g., T5~\\citep{raffel2020exploring}, GLM \\citep{du2022glm}, and InsertGPT~\\citep{bavarian2022efficient}---to infer the missing instructions. Our ``reverse'' model directly samples from $P(\\instruction\\given\\trainingData,\\ f(\\instruction)\\textrm{ is high})$ by filling in the blank. We show an example of the such template in Figure \\ref{fig:llm_template} (Middle).\n", "\n", "\\paragraph{Customized Prompts} Note that depending on the score function being used, there may exist more appropriate prompts than the samples above. For example, in our TruthfulQA experiments, we start with the human-designed instructions from the original dataset~\\citep{lin2022truthfulqa} and ask the the ``reverse'' model to propose initial instruction samples that fit the missing context (Figure \\ref{fig:llm_template} (Bottom)). \n", "\n", "\\subsection{Score Functions} \\label{sec:score_function}\n", "To cast our problem as black-box optimization, we choose a score function that accurately measures the alignment between the dataset and the data the model generates. In our instruction induction experiments, we consider two potential score functions, described below. In the TruthfulQA experiments, we focused primarily on automated metrics proposed in \\citet{lin2022truthfulqa}, similar to the execution accuracy. In each case, we evaluate the quality of a generated instruction using Equation (\\ref{eq:score}), and take the expectation over a held-out test dataset $\\testData$.\n", "\n", "\\paragraph{Execution accuracy} First, we consider evaluating the quality of an instruction $\\instruction$ using the execution accuracy metric proposed by \\citet{honovich2022instruction}, which we denote as $\\fexec$. In most cases, execution accuracy is simply defined as the 0-1 loss, $f(\\instruction, \\demoQ, \\demoA) = \\mathbb{1}\\left[\\M([\\instruction ; \\demoQ]) = \\demoA\\right]$. On some tasks, execution accuracy takes into account invariants; e.g., it may be an order invariant set matching loss, as described in Appendix A of \\citet{honovich2022instruction}. \n", "\n", "\\paragraph{Log probability} We further consider a softer probabilistic score function, which we hypothesize might improve optimization by providing a more fine-grained signal when searching over low-quality instruction candidates. In particular, we consider the log probability of the desired answer given the instruction and question under the target model $\\M$, which on a per sample basis, is $\\log P(\\demoA\\given[\\instruction ; \\demoQ])$.\n", "\n", "\\paragraph{Efficient score estimation}\n", "Estimating the score by computing the score over the entire training dataset for all instruction candidates can be expensive. To reduce the computation cost, we adopt a filtering scheme where a promising candidate receives more computation resources while a low-quality candidate receives less computation. \n", "It can be achieved by using a multi-stage computation strategy on lines 2-9 Algorithm \\ref{alg:ape}. We first evaluate all candidates with a small subset of the training dataset. For the candidates with a score greater than a certain threshold, we sample and evaluate a new non-overlapping subset from the training dataset to update the moving average of the score. \n", "Then, we repeat this process until a small set of candidates is left, which are evaluated on the entire training dataset. \n", "This adaptive filtering scheme significantly improves the computation efficiency by keeping the exact computation costs for the high-quality samples and drastically reducing the computation costs for low-quality candidates. \n", "We note that a similar score estimation scheme has been used in previous works \\citep{li2022competition, maclaurin2015firefly}.\n", "\n", "\\subsection{Iterative Proposal Distributions}\\label{sec:iterative}\n", "Despite our attempt to directly sample high-quality initial instruction candidates, it could be the case that the method described in Subsection \\ref{subsec:initialU} fails to produce a good proposal set $\\proposal$, either because it lacks of diversity or does not contain any candidates with a suitably high score. In case of such challenges, we explore an iterative process for resampling $\\proposal$.\n", "\n", "\\begin{wrapfigure}{R}{0.3\\textwidth}\n", "\\centering\n", "\\vspace{-0.1in}\n", "\\includegraphics[width=0.275\\textwidth]{figures/illustration/template_resample.pdf}\n", "\\caption{Resampling}\n", "\\vspace{-0.15in}\n", "\\label{fig:template_resampling}\n", "\\end{wrapfigure}\n", "\n", "\\paragraph{Iterative Monte Carlo Search}\n", "Instead of only sampling from the initial proposal, we consider exploring the search space locally around the current best candidates. This allows us to generate new instructions that are more likely to be successful. We call this variant \\textit{iterative \\algname}. \n", "At each stage, we evaluate a set of instructions and filter out candidates with low scores. Then, an LLM is asked to generate new instructions similar to those with high scores. We provide the prompt used for resampling in Figure \\ref{fig:template_resampling}. \n", "Figure \\ref{fig:main-posterior} (Right) shows that although this approach improves the overall quality of the proposal set $\\proposal$, the highest scoring instruction tends to remain the same with more stages. We conclude iterative generation provides marginal improvement over the relative simplicity and effectiveness of the generative process described in Subsection \\ref{subsec:initialU}. Therefore, we use \\algname~without iterative search as default unless otherwise stated.\n", "\\workshopexclude{\\section{Large Language Models are Human-Level Prompt Engineers}}\n" ], "context_after_exp": [ "\\workshoponly{\\section{Additional Experimental Results}\\label{app:add_res} }\n", "This section examines how APE can guide LLMs to desired behaviors. We investigate from four perspectives: zero-shot performance, few-shot in-context learning performance, zero-shot chain-of-thought reasoning, and truthfulness. Our experiments show that APE can find prompts that improve task performance, performing equal to or even better than those authored by humans. APE also often produces insightful tricks for how to best prompt language models that can be successfully transferred to new tasks (see Section \\ref{sec:cot}).\n", "\\workshoponly{For consistency, we duplicate some of the results here.}\n", "\n", "\\subsection{Instruction Induction}\\label{sec:inst_induct}\n", "We assess the effectiveness of zero-shot and few-shot in-context learning on 24 instruction induction tasks proposed in \\citet{honovich2022instruction}. The tasks span many facets of language understanding, from simple phrase structure to similarity and causality identification. We provide a detailed descriptions of each task in Appendix B. For each task, we sample five input-output pairs from the training data and select the best instruction using algorithm \\ref{alg:ape}. Then, we evaluate the quality of the instruction by executing the instruction on InstructGPT \\footnote{We use the \\textit{text-davinci-002} via the OpenAI API (\\url{https://beta.openai.com/}). Though not stated explicitly in the API, we assume the models are those reported by \\citet{ouyang2022training}.}. We repeat our experiments five times with different random seeds to report the mean and standard deviation. The exact templates for our experiments can be found in Appendix (Table \\ref{table:raw_templates}).\n", "\n", "\\begin{figure}[t]\n", " \\vspace{-0.25in}\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/main/exec_acc_zero_shot.pdf}\n", " \\caption{Zero-shot test accuracy on 24 Instruction Induction tasks. \\algname~achieves human-level or better performance on all 24 out of 24 tasks.}\\label{fig:main-zero-shot}\n", "\\end{figure}\n", "\n", "\\paragraph{Zero-shot Learning}\n", "We compare our method against two baselines: human prompt engineers (Human)\\footnote{\\mbox{We use the gold annotations from \\citet{honovich2022instruction}, which were manually verified for correctness.}} and the model-generated instruction algorithm proposed by \\citet{honovich2022instruction}. This algorithm can be thought of as a greedy version of \\algname, without a search and selection process; thus, we refer to it as ``Greedy''. Figure \\ref{fig:main-zero-shot} shows the zero-shot performance of InstructGPT using human instructions and model generated instructions. Our algorithm outperforms ``Greedy'' on every task and achieves equal or better than human performance on 24 of 24 tasks. Moreover, the Interquartile Mean (IQM) \\citep{agarwal2021deep} across all 24 tasks in Figure \\ref{fig:highlight} suggests that \\algname~with InstructGPT outperforms human-engineered prompts, obtaining an IQM of 0.810 vs humans' 0.749. We summarize the instruction selected by \\algname~for each task in Appendix (Table \\ref{table:best_instructions_all}).\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\paragraph{Few-shot In-context Learning}\n", "We evaluated APE-generated instructions in few-shot in-context learning, where we insert the instruction before the in-context demonstrations. Those instructions are selected based on zero-shot execution accuracy, and we denote this setting as ``Instruction + In-context'' in Figure \\ref{fig:main-few-shot}. As shown in Figure \\ref{fig:main-few-shot}, adding an instruction achieves a comparable or better test performance than the standard in-context learning performance on 21 of 24 tasks. Counter-intuitively, adding in-context examples for Rhymes, Large Animal, and Second Letters hurts model performance. We conjecture that it may be because the selected instructions overfit the zero-shot learning scenario and thus do not perform well on the few-shot case. Therefore, we experiment using few-shot execution accuracy as the selection metric. Figure \\ref{fig:app-few-shot-as-metric} shows that the few-shot metric achieves comparable or slightly better than the zero-shot metric except for Rhymes. To have an intuitive understanding of what is happening, we provide a qualitative analysis in Appendix \\ref{sec:app_ii}. \n", "\n", "\n", "\n", "\n", "\n", "\\subsection{BigBench}\\label{sec:bigbench}\n", "To see whether APE can be applied to more challenging tasks, we propose and curate BIG-Bench Instruction Induction (BBII), a clean and tractable subset of 21 tasks that have a clear, human-written instruction that can be applied to all examples in the dataset. The selected tasks cover many facets of language understanding and includes all nine such problems from the BigBench-Hard Subset \\citep{suzgun2022challenging}. In particular, it includes emotional understanding, context-free question answering, reading comprehension, summarization, algorithms, and various reasoning tasks (e.g., arithmetic, commonsense, symbolic, and other logical reasoning tasks). We provide a detailed description of the task and our selection criteria in Appendix \\ref{app:imp_details}. \n", "\n", "For each task, we used the reverse mode generation of InstructGPT to generate a set of instruction candidates and ranked the instructions based on their execution accuracy. Then, we executed the selected instruction on InstructGPT to compute the zero-shot performance on the test set and compared it with the default human prompt. As shown in Appendix Table \\ref{table:bbii_results}, APE achieves comparable or better performance than the default human prompt on 17 out of 21 tasks.\n", "\n", "\\subsection{Zero-shot Chain of Thought}\\label{sec:cot}\n", "Chain-of-thought reasoning has been shown to dramatically improve the ability of LLMs to complete complex reasoning tasks, such as solving math problems that require multiple steps. Early works \\citep{nye2021show,betz2021thinking,wei2022chain} on chain-of-thought used fine-tuning or in-context learning to get LLMs to show their work for such problems. One of the most influential recent works of prompt engineering was the discovery \\citep{kojima2022large} that LLMs could be made to give chain-of-thoughts simply by prepending ``Let's think step by step.'' to the beginning of the LLM's response. Known as Zero-Shot-CoT, this prompting strategy improves the zero-shot performance of InstructGPT on MultiArith \\citep{roy2016solving} from 17.7 to 78.7 and improves performance on GSM8K\\citep{cobbe2021training} from 10.4 to 40.7. As shown in Table \\ref{tab:cot-arith}, \\citet{kojima2022large} found their prompt was the best performing out of at least nine human-designed prompts.\n", "\n", "We used APE to automatically search for the best answer-prefix across the suite of tasks used in \\citet{kojima2022large}. Our approach to optimizing this prompt was inspired by \\citet{zelikman2022star}. First, we generate a dataset of questions and reasoning steps generated using InstructGPT with ``Let's think step by step.'' Then, we remove any data points that had incorrect answers. Finally, we use APE to find a prompt starting with ``Let's'' that maximizes the likelihood of these correct reasoning steps. See Table \\ref{table:raw_templates} for the template used for prompt generation and evaluation. APE produces the prompt ``Let\u2019s work this out in a step by step way to be sure we have the right answer.'' This generated prompt further improves performance from 78.7 to 82.0 on MultiArith and from 40.7 to 43.0 on GSM8K. We believe this general workflow represents a common use-case for APE where prompt engineers use APE to optimize parts of their exiting templates to improve performance. See Figure \\ref{fig:cot-all} for details on the performance of this prompt on other reasoning tasks.\n", "\n", "\\workshopexclude{\n", "\\subsection{TruthfulQA}\n", "We apply our method on TruthfulQA \\citep{lin2022truthfulqa} to see how \\algname-generated instructions can steer an LLM to generate answers with different styles, and study the trade-off between truthfulness and informativeness. Borrowing the metrics from the original paper, we use \\algname~to the learn instructions that maximize three metrics: truthfulness (\\\n", "\n", "\\paragraph{Prompt Engineering in TruthfulQA} We want to stress that the TruthfulQA dataset is intended to test pretrained models in zero-shot settings. Our results are not in any way compatible with the original benchmarks. Because we have optimized the instructions using a small portion of the question and answer pairs as training demonstrations, our results are not ``true few-shot learning''~\\citep{perez2021true}. We randomly sampled 100 out of 817 questions for the actual experiments to form training demonstrations $\\trainingData$. To sample the proposal set $\\proposal$, we ask a ``reverse'' model to generate instructions based on six randomly chosen demonstration pairs, similar to our previous experiments. Unlike in Instruction Induction, in TruthfulQA, we aim to find a single best instruction prompt that works well across all 38 categories of questions spanning health, law, politics, and fiction. It is worth noting all our generated instructions are very generic, e.g., ``You will be asked a series of questions. For each question, you must either answer the question or decline to answer, in which case you must state that you have no comment'', and do not contain any examples from the dataset.\n", "\n", "\\begin{figure}\n", " \\vspace{-0.25in}\n", " \\centering\n", "\n", "\\begin{subfigure}[b]{0.245\\textwidth}\n", " \\captionsetup{justification=centering}\n", " \\hfill\\includegraphics[width=1.0\\linewidth]{figures/main/truthfulqa_top10_train.pdf}\n", " \\caption{Average performance Train}\n", "\\end{subfigure}\n", "\\begin{subfigure}[b]{0.245\\textwidth}\n", " \\captionsetup{justification=centering}\n", " \\hfill\\includegraphics[width=1.0\\linewidth]{figures/main/truthfulqa_top10_test.pdf}\n", " \\caption{Average performance Test}\n", "\\end{subfigure}\n", "\\begin{subfigure}[b]{0.245\\textwidth}\n", " \\captionsetup{justification=centering}\n", " \\hfill\\includegraphics[width=1.0\\linewidth]{figures/main/truthfulqa_scatter_train.pdf}\n", " \\vspace{-1.6em}\n", " \\caption{\\\n", "\\end{subfigure}\n", "\\begin{subfigure}[b]{0.245\\textwidth}\n", " \\captionsetup{justification=centering}\n", " \\hfill\\includegraphics[width=1.0\\linewidth]{figures/main/truthfulqa_scatter_test.pdf}\n", " \\vspace{-1.6em}\n", " \\caption{\\\n", "\\end{subfigure} \\vspace{-0.15in}\n", " \\caption{Comparison of \\algname~and ``help'' (human) prompt on the TruthfulQA task. (a) Percentage of answers that were either true (\\\n", "\\end{figure}\n", "\n", "\\paragraph{Truthfulness vs Informativeness Trade-off}\n", "We found that \\algname~outperforms the human-engineered prompt with only 200 candidates proposed by InstructGPT (175B), as seen in Figure~\\ref{fig:truthfulqa}. We compared our generated prompt with the ``help'' prompt from \\citet{lin2022truthfulqa}.\n", "The training and test performance are shown in Figure~\\ref{fig:truthfulqa}(a)-(b). We found that choosing the top 10 of 200 candidates on the training set generalizes well to the test set. We report the average performance across the top 10 instructions for the three metrics.\n", "This result by itself is not surprising as the human baseline is not carefully chosen, as pointed out by \\citet{askell2021general}. However, we found that the instructions discovered by \\algname~can achieve very high truthfulness with answers such as ``No comment,'' but these answers provide little information. We used our top candidates to further investigate the trade-off between truthfulness and informativeness. We visualize the top 10 proposed samples across the three metrics on the truthfulness-informative plots shown in Figure~\\ref{fig:truthfulqa}(c) and Figure~\\ref{fig:truthfulqa}(d). While \\algname~achieves over 40\\\n", "}\n", "\n", "\\workshopexclude{\\section{Quantitative Analysis}}\n", "\\workshoponly{\\section{Additional Results - Quantitative Analysis}}\n", "In this section, we conduct quantitative analyses to better understand the three main components of our method: proposal distribution, score functions, and iterative search. Moreover, we conduct a cost analysis in the Appendix \\ref{sec:cost_analysis} to understand the most cost-efficient way to find the best prompt. We observe the larger and more powerful language models are more cost-effective for generating the best prompt despite a higher per-token cost.\n", "\n", "\\subsection{LLMs for Proposal Distribution}\n", "\n", "\\paragraph{How does the proposal quality change as we increase the model size?} To understand how the model size affects the quality of the initial proposal distribution, we examine eight different models\\footnote{We use ada, babbage, curie, davinci, text-ada-001, text-babbage-001, text-curie-001, text-davanci-002} available via the OpenAI API. To assess the quality of the proposal distribution, we generate 250 instructions per model and compute the execution accuracy on 50 test data points. We visualize the survival function (percentage of instructions with test accuracy greater than a certain threshold) and the histogram of test accuracy for a simple task (i.e., Pluralization) in Figure \\ref{fig:main-posterior} (a) and include a similar plot for a more challenging task (Start With) in the Appendix (Figure \\ref{fig:app-posterior-model-size-hard}). As shown in both figures (and unsurprisingly), larger models tend to produce better proposal distributions than smaller ones, as do the models that were fine-tuned to follow human instructions. On the simple task, all instructions generated by the best model, InstructGPT (175B), have reasonable test accuracy. In contrast, half of the instructions are off-topic and perform poorly on the more challenging task. \n", "\n", "\\begin{figure}\n", " \\centering\n", " \\vspace{-0.1in}\n", " \\begin{subfigure}[b]{0.49\\textwidth}\n", " \\includegraphics[width=1.0\\linewidth]{figures/main/posterior_model_size_plural.pdf}\n", " \\end{subfigure}\n", " \\begin{subfigure}[b]{0.49\\textwidth}\n", " \\includegraphics[width=1.0\\linewidth]{figures/main/posterior_mcmc_passive.pdf}\n", " \\end{subfigure}\n", " \\caption{(Left) Quality of the proposal distribution of models with different size as assessed by test execution accuracy. (Right) Iterative Monte Carlo search improves the quality of the instruction candidates at each round.}\\label{fig:main-posterior}\n", "\\end{figure}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\subsection{LLMs for selection}\n", "\\paragraph{Does proposal quality matter under selection?} If we sample more instructions from the LLMs, then it becomes more likely for us to find better instructions. To verify this hypothesis, we increase the sample size from 4 to 128 and evaluate the test accuracy change. Figure \\ref{fig:mcmc_comparison} (Left) shows a monotonically increasing trend with a diminishing return, as human-level performance is achieved with 64 instruction samples. Thus, we choose 50 as our default sample size. Under this configuration, we investigate how the proposal distribution affects the test accuracy of the best instruction selected by our algorithm. Figure \\ref{fig:highlight}(b) shows that though the small models may be less likely to generate good instructions, they nonetheless generate some good ones if we sample enough candidates. Therefore, we still find promising instructions with a small model by running our selection algorithm, explaining why our method outperforms the greedy approach \\cite{honovich2022instruction} across all eight models.\n", "\n", "\\paragraph{Which scoring function is better?} We compute the correlation between the test accuracy and two metrics on 24 instruction induction tasks to study how good our proposed metrics are. We generate 250 instructions per task using InstructGPT (175B) in \u201cforward\u201d mode and compute the metric score and test accuracy on 10 test data points. We visualize the Spearman correlation between the test accuracy and two metrics. Figure \\ref{fig:mcmc_comparison} (Middle) shows that the execution accuracy aligns better with the test performance across the tasks. Thus, we choose it as our default metric unless otherwise stated.\n", "\n", "\n", "\n", "\\begin{figure}\n", " \\vspace{-0.15in}\n", " \\centering\n", " \\begin{subfigure}[b]{0.31\\textwidth}\n", " \\includegraphics[width=1.0\\linewidth]{figures/main/sample_size_task_mean.pdf}\n", " \\end{subfigure}\\hfill\n", " \\begin{subfigure}[b]{0.31\\textwidth}\n", " \\includegraphics[width=1.0\\linewidth]{figures/main/corr_metric_spearman.pdf}\n", " \\end{subfigure}\\hfill\n", " \\begin{subfigure}[b]{0.31\\textwidth}\n", " \\includegraphics[width=1.0\\linewidth]{figures/main/mcmc_comparison.pdf}\n", " \\end{subfigure}\n", " \\caption{(Left) Test execution of the best instruction as we increase the number of instruction candidates. We report the mean and standard deviation across 6 different tasks. (Middle) Spearman Correlation between the test accuracy and two metrics on 24 tasks. (Right) Test execution accuracy of the best instruction selected using \\algname and iterative \\algname (\\algname (IT)).}\\label{fig:mcmc_comparison}\n", " \\vspace{-0.1in}\n", "\\end{figure}\n", "\\subsection{Iterative Monte Carlo Search}\\label{ab:tmcs}\n", "\\paragraph{Does Iterative Search improve the instruction quality?} We visualize the survival function and histogram of test accuracy on the ``Passivization'' task in Figure \\ref{fig:main-posterior} (Right) and include five more tasks in the Appendix. The survival plot shows that the curves increase as the round goes up, which suggests that iterative search does result in a higher-quality proposal set. However, we observe diminishing returns to further selection rounds as the quality seems to stabilize after three rounds.\n", "\n", "\\paragraph{Do we need Iterative Search?}\n", "\n", "We compare \\algname~and iterative \\algname~on six tasks\\footref{sixtasks}. As shown in Figure \\ref{fig:mcmc_comparison}, the iterative search marginally improves performance on tasks where APE underperforms humans but achieves similar performance on the other tasks. This is consistent with our hypothesis that iterative search would be most useful on tasks where generating a good initial $\\proposal$ is challenging.\n", "\n", "\\section{Conclusion}\n", "Large language models can be seen as general-purpose computers that execute programs specified by natural language prompts. We automate the prompt engineering process by formulating it as a black-box optimization problem, which we propose to solve using efficient search algorithms guided by LLMs. Our method achieves human-level performance on various tasks with minimum human inputs. As recent LLMs demonstrate an impressive ability to follow human instruction, we expect many future models, including those for formal program synthesis, to have a natural language interface. This work builds the foundation to control and steer generative artificial intelligence.\n", "\n", "\\subsubsection*{Acknowledgments}\n", "We would like to thank Or Honovich and Michael Zhang for their help and valuable feedback. JB was supported by NSERC Grant [2020-06904], CIFAR AI Chairs program, Google Research Scholar Program and Amazon Research Award. KP was supported by NSERC PGS-D. SP was supported by NSERC CGS-D. HC was supported by NSERC CGS-D and RBC Graduate Fellowship. Resources used in preparing this research were provided, in part, by the Province of Ontario, the Government of Canada through CIFAR, and companies sponsoring the Vector Institute for Artificial Intelligence.\n", "\n", "\\bibliography{iclr2023_conference}\n", "\\bibliographystyle{iclr/iclr2023_conference}\n", "\n", "\n", "\\newpage\n", "\\appendix\n", "\n", "\\section{Prompt Engineering in the Wild}\\label{appdx_wild_prompt_engineering}\n", "Large models with natural language interfaces, including models for text generation and image synthesis, have seen an increasing amount of public usage in recent years. As finding the right prompt can be difficult for humans, a number of guides on prompt engineering as well as tools to aid in prompt discovery have been developed. Among others, see, for example: \n", "\n", "\\newcommand{\\hscale}[1]{\\scalebox{0.7}[1]{#1}}\n", "\\begin{itemize}[leftmargin=*]\\footnotesize\n", " \\item \\hscale{\\url{https://blog.andrewcantino.com/blog/2021/04/21/prompt-engineering-tips-and-tricks/}}\n", " \\item \\hscale{\\url{https://techcrunch.com/2022/07/29/a-startup-is-charging-1-99-for-strings-of-text-to-feed-to-dall-e-2/}}\n", " \\item \\hscale{\\url{https://news.ycombinator.com/item?id=32943224}}\n", " \n", " \\item \\hscale{\\url{https://promptomania.com/stable-diffusion-prompt-builder/}}\n", " \\item \\hscale{\\url{https://huggingface.co/spaces/Gustavosta/MagicPrompt-Stable-Diffusion}}\n", "\\end{itemize}\n", "\n", "In this paper we apply APE to generate effective instructions for steering LLMs, but the general framework Algorithm \\ref{alg:ape} could be applied to steer other models with natural language interfaces so long as an appropriate proposal method and scoring function can be designed. \n", "\n", "\\newpage\n", "\\section{Implementation Details}\\label{app:imp_details}\n", "\n", "\\begin{table}[H]\n", "\\caption{Detailed description of 24 instruction induction tasks proposed in \\citet{honovich2022instruction}. For convenience, the original table from \\cite{honovich2022instruction} is duplicated here.}\n", "\\small\n", "\\centering\n", "\\begin{tabular}{@{}p{0.12\\textwidth}@{}p{0.175\\textwidth}@{}p{0.375\\textwidth}p{0.300\\textwidth}@{}}\n", "\\toprule\n", "\\textbf{Category} & \\textbf{Task} & \\textbf{Instruction} & \\textbf{Demonstration} \\\\\n", "\\midrule\n", "\\textit{Spelling} & First Letter & Extract the first letter of the input word. & cat $\\rightarrow$ c \\\\\n", "\\cmidrule{2-4}\n", " & Second Letter & Extract the second letter of the input word. & cat $\\rightarrow$ a \\\\\n", "\\cmidrule{2-4}\n", " & List Letters & Break the input word into letters, separated by spaces. & cat $\\rightarrow$ c a t\\\\\n", "\\cmidrule{2-4}\n", "& Starting With & Extract the words starting with a given letter from the input sentence. & The man whose car I hit last week sued me. [m] $\\rightarrow$ man, me \\\\\n", "\\midrule\n", "\\textit{Morpho-}\n", "\n", "\\textit{syntax} & Pluralization & Convert the input word to its plural form. & cat $\\rightarrow$ cats \\\\\n", "\\cmidrule{2-4} \n", " & Passivization & Write the input sentence in passive form. &\n", "The artist introduced the scientist. $\\rightarrow$ The scientist was introduced by the artist. \\\\\n", "\\midrule\n", "\\textit{Syntax} & Negation & Negate the input sentence. & Time is finite $\\rightarrow$ Time is not finite. \\\\\n", "\\midrule\n", "\\textit{Lexical} \n", "\n", "\\textit{Semantics} & Antonyms & Write a word that means the opposite of the input word. & won $\\rightarrow$ lost \\\\\n", "\\cmidrule{2-4}\n", " & Synonyms & Write a word with a similar meaning to the input word. & alleged $\\rightarrow$ supposed\\\\\n", "\\cmidrule{2-4}\n", " & Membership & Write all the animals that appear in the given list. & cat, helicopter, cook, whale, frog, lion $\\rightarrow$ frog, cat, lion, whale \\\\\n", "\\midrule\n", "\\textit{Phonetics} & Rhymes & Write a word that rhymes with the input word. & sing $\\rightarrow$ ring \\\\\n", "\\midrule\n", "\\textit{Knowledge} & Larger Animal & Write the larger of the two given animals. & koala, snail $\\rightarrow$ koala\\\\\n", "\\midrule\n", "\\textit{Semantics} & Cause Selection & Find which of the two given cause and effect sentences is the cause. & Sentence 1: The soda went flat. Sentence 2: The bottle was left open. $\\rightarrow$ The bottle was left open.\\\\\n", "\\cmidrule{2-4}\n", "& Common\n", "\n", "Concept & Find a common characteristic for the given objects. & guitars, pendulums, neutrinos $\\rightarrow$ involve oscillations.\\\\\n", "\\midrule\n", "\\textit{Style} & Formality & Rephrase the sentence in formal language. & Please call once you get there $\\rightarrow$ Please call upon your arrival.\\\\\n", "\\midrule\n", "\\textit{Numerical} & Sum & Sum the two given numbers. & 22 10 $\\rightarrow$ 32 \\\\\n", "\\cmidrule{2-4}\n", " & Difference & Subtract the second number from the first. & 32 22 $\\rightarrow$ 10 \\\\\n", "\\cmidrule{2-4}\n", " & Number to Word & Write the number in English words. & 26 $\\rightarrow$ twenty-six \\\\\n", "\\midrule\n", "\\textit{Multi-}\n", "\n", "\\textit{lingual} & Translation & Translate the word into German / Spanish / French. & game $\\rightarrow$ juego\\\\\n", "\\midrule\n", "\\textit{GLUE} & Sentiment \n", "\n", "Analysis & Determine whether a movie review is positive or negative. & The film is small in scope, yet perfectly formed. $\\rightarrow$ positive \\\\\n", "\\cmidrule{2-4}\n", "& Sentence \n", "\n", "Similarity & Rate the semantic similarity of two input sentences on a scale of 0 - definitely not to 5 - perfectly. & Sentence 1: A man is smoking. Sentence 2: A man is skating. $\\rightarrow$ 0 - definitely not \\\\\n", "\\cmidrule{2-4}\n", "& Word in Context & Determine whether an input word has the same meaning in the two input sentences. & Sentence 1: Approach a task. Sentence 2: To approach the city. Word: approach $\\rightarrow$ not the same \\\\\n", "\\bottomrule\n", "\\\\\n", "\\end{tabular}\n", "\\label{tab:instruct_tasks_original}\n", "\\end{table}\n", "\n", "\n", "\\begin{table}[H]\n", "\\caption{Detailed description of BIG-Bench Instruction Induction (BBII), a clean and tractable subset of 21 tasks that have a clear human written instruction that can be applied to all examples in the dataset.}\n", "\\label{table:bbii_desc}\n", "\\small\n", "\\centering\n", "\\begin{tabular}{m{2.5cm}m{5.5cm}m{5cm}}\n", "\\toprule\n", "\\textbf{Name} & \\textbf{Description} & \\textbf{Keywords} \\\\\n", "\\midrule\n", "causal judgment & Answer questions about causal attribution & causal reasoning, common sense, multiple choice, reading comprehension, social reasoning \\\\\n", "\\midrule\n", "disambiguation qa & Clarify the meaning of sentences with ambiguous pronouns & common sense, gender bias, many-shot, multiple choice \\\\\n", "\\midrule\n", "dyck languages & Correctly close a Dyck-n word & algebra, arithmetic, logical reasoning, multiple choice \\\\\n", "\\midrule\n", "epistemic reasoning & Determine whether one sentence entails the next & common sense, logical reasoning, multiple choice, social reasoning, theory of mind \\\\\n", "\\midrule\n", "gender inclusive sentences german & Given a German language sentence that does not use gender-inclusive forms, transform it to gender-inclusive forms & free response, grammar, inclusion, non-English, paraphrase \\\\\n", "\\midrule\n", "implicatures & Predict whether Speaker 2's answer to Speaker 1 counts as a yes or as a no & contextual question-answering, multiple choice, reading comprehension, social reasoning, theory of mind \\\\\n", "\\midrule\n", "linguistics puzzles & Solve Rosetta Stone-style linguistics puzzles & free response, human-like behavior, linguistics, logical reasoning, reading comprehension \\\\\n", "\\midrule\n", "logical fallacy detection & Detect informal and formal logical fallacies & logical reasoning, multiple choice \\\\\n", "\\midrule\n", "movie recommendation & Recommend movies similar to the given list of movies & emotional intelligence, multiple choice \\\\\n", "\\midrule\n", "navigate & Given a series of navigation instructions, determine whether one would end up back at the starting point & arithmetic, logical reasoning, mathematics, multiple choice\\\\\n", "\\midrule\n", "object counting & Questions that involve enumerating objects of different types and asking the model to count them & free response, logical reasoning \\\\\n", "\\midrule\n", "operators & Given a mathematical operator definition in natural language, apply it & free response, mathematics, numerical response \\\\\n", "\\midrule\n", "presuppositions as nli & Determine whether the first sentence entails or contradicts the second & common sense, logical reasoning, multiple choice \\\\\n", "\\midrule\n", "question selection & Given a short answer along with its context, select the most appropriate question which to the given short answer & multiple choice, paraphrase, reading comprehension, summarization \\\\\n", "\\midrule\n", "ruin names & Select the humorous edit that 'ruins' the input movie or musical artist name & emotional understanding, multiple choice \\\\\n", "\\midrule\n", "snarks & Determine which of two sentences is sarcastic & emotional understanding, humor, multiple choice \\\\\n", "\\midrule\n", "sports understanding & Determine whether an artificially constructed sentence relating to sports is plausible or implausible & common sense, context-free question answering, domain specific, multiple choice \\\\\n", "\\midrule\n", "tense & Modify the tense of a given sentence & free response, paraphrase, syntax \\\\\n", "\\midrule\n", "winowhy & Evaluate the reasoning in answering Winograd Schema Challenge questions & causal reasoning, common sense, multiple choice, social reasoning \\\\\n", "\\midrule\n", "word sorting & Sort a list of words & algorithms, free response \\\\\n", "\\midrule\n", "word unscrambling & Unscramble the given letters to form an English word & free response, implicit reasoning, tokenization \\\\\n", "\\bottomrule\n", "\\end{tabular}\n", "\\end{table}\n", "\n", "\n", "\\clearpage\n", "\\newpage\n", "\\subsection{BIG-Bench Instruction Induction (BBII) Selection Process}\n", "\\textbf{Step 1}: BIG-Bench contains a large number of evaluation tasks with different level of quality. For example, some of the tasks only have the minimum number of examples needed to qualify for submission, while other tasks may lack an appropriate human baselines. Therefore, we follow \\citet{suzgun2022challenging} to get a clean and tractable subset based on the following criteria.\n", "\\begin{table}[H]\n", "\\vspace{-0.06in}\n", "\\caption{Filtering criteria to used to create the BIG-Bench Instruction Induction (BBII) subset.}\n", "\\label{table:bbii_filtering}\n", "\\vspace{-0.08in}\n", "\\small\n", "\\centering\n", "\\begin{tabular}{m{1cm}m{12cm}}\n", "\\toprule\n", "\\textbf{\\# Tasks} & \\textbf{Criteria} \\\\\n", "\\midrule\n", "212 & All BIG-Bench tasks \\\\\n", "170 & All JSON tasks \\\\\n", "127 & After filtering out tasks with more than one sub-task \\\\\n", "74 & After filtering out tasks with fewer than 150 examples \\\\\n", "67 & After filtering out tasks without human-rater baselines \\\\\n", "57 & After filtering out tasks that do not use multiple-choice or exact match as the evaluation metric \\\\\n", "\\bottomrule\n", "\\end{tabular}\n", "\\vspace{-0.1in}\n", "\\end{table}\n", "\n", "\\textbf{Criteria: JSON Tasks.}\\\\\n", "Discarded tasks: abstraction and reasoning corpus, bbq lite, bias from probabilities, boolean expressions, com2sense, context definition alignment, convinceme, coqa conversational question answering, cycled letters, diverse social bias, dynamic counting, factuality of summary, forecasting subquestions, gender sensitivity chinese, gender sensitivity english, high low game, long context integration, multistep arithmetic, muslim violence bias, program synthesis, protein interacting sites, python programming challenge, question answer creation, roots optimization and games, self awareness, self evaluation courtroom, self evaluation tutoring, simple arithmetic, spelling bee, squad shifts, subject verb agreement, sudoku, taboo, talkdown, text navigation game, training on test set, truthful qa, twenty questions, unqover, web of lies, word problems on sets and graphs, yes no black white.\n", "\n", "\\textbf{Criteria: Tasks without sub-task.}\\\\\n", "Discarded tasks: abstract narrative understanding, arithmetic, authorship verification, bbq lite json, cause and effect, chess state tracking, cifar10 classification, color, conceptual combinations, conlang translation, cs algorithms, elementary math qa, fact checker, gem, goal step wikihow, hhh alignment, indic cause and effect, intersect geometry, kanji ascii, key value maps, language games, linguistic mappings, list functions, logical deduction, metaphor understanding, minute mysteries qa, modified arithmetic, mult data wrangling, multiemo, natural instructions, periodic elements, physics, real or fake text, simp turing concept, simple arithmetic json subtasks, simple ethical questions, strange stories, symbol interpretation, tracking shuffled objects, undo permutation, unit conversion, unit interpretation, unnatural in context learning.\n", "\n", "\\textbf{Criteria: The task includes at least 150 examples with input-output pairs.}\\\\\n", "Discarded tasks: analytic entailment, auto debugging, code line description, codenames, common morpheme, crash blossom, crass ai, cryobiology spanish, dark humor detection, emoji movie, emojis emotion prediction, empirical judgments, english proverbs, english russian proverbs, entailed polarity, entailed polarity hindi, evaluating information essentiality, figure of speech detection, general knowledge, gre reading comprehension, human organs senses, identify math theorems, identify odd metaphor, implicit relations, international phonetic alphabet nli, irony identification, known unknowns, logical args, logical sequence, mathematical induction, misconceptions russian, nonsense words grammar, novel concepts, odd one out, penguins in a table, persian idioms, phrase relatedness, physical intuition, physics questions, repeat copy logic, rephrase, riddle sense, scientific press release, sentence ambiguity, similarities abstraction, simple arithmetic json, simple arithmetic json multiple choice, simple arithmetic multiple targets json, simple text editing, sufficient information, suicide risk, swedish to german proverbs, what is the tao.\n", "\n", "\\textbf{Criteria: The task contains reported (average) human-rater or random performance.}\\\\\n", "Discarded tasks: contextual parametric knowledge conflicts, hinglish toxicity, medical questions russian, parsinlu qa, swahili english proverbs, tellmewhy, which wiki edit.\n", "\n", "\\textbf{Criteria: The task is classification or uses exact match as the evaluation metric.}\\\\\n", "Discarded tasks: auto categorization, few shot nlg, hindi question answering, international phonetic alphabet transliterate, polish sequence labeling, qa wikidata, semantic parsing in context sparc, semantic parsing spider, social support, topical chat.\n", "\n", "\\clearpage\n", "\\newpage\n", "\\textbf{Step 2}: We do a manual inspection to divide the remaining tasks to the following three categories. In particular, Big-Bench Instruction Induction (BBII) subset is the subet we used to evaluate APE in Section \\ref{sec:bigbench}.\n", "\n", "\\begin{itemize}\n", " \\item \\textbf{BBII Subset}: A subset of Big Bench Tasks that satisfy the instruction induction format: each example in the dataset can be expressed as a question-answer pair, all examples focus on the same question that can be clearly described by a human instruction, and there is a human instruction available in the task JSON file.\n", " \\item \\textbf{Invalid Format}: Tasks that do not match the instruction induction format: each example in the dataset asks a different question, or clear human instruction is not available. \n", " \\item \\textbf{Out of Scope}: Tasks that are outside the scope of this work: not solvable by authors within 60 minutes, or requires specialized knowledge. \n", "\\end{itemize}\n", "\n", "\\begin{table}[H]\n", "\\caption{Filtering criteria to used to create the BIG-Bench Instruction Induction (BBII) subset.}\n", "\\label{table:bbii_categorize}\n", "\\small\n", "\\centering\n", "\\begin{tabular}{m{2cm}m{1cm}m{10cm}}\n", "\\toprule\n", "\\textbf{\\# Category} & \\textbf{\\# Tasks} & \\textbf{Tasks Names} \\\\\n", "\\midrule\n", "BBII Subset & 21 & causal judgment, disambiguation qa, dyck language, epistemic reasoning, gender inclusive sentences german, implicatures, linguistics puzzles, logical fallacy detection, movie recommendation, navigate, object counting, operators, presuppositions as nli, question selection, ruin names, snarks, sports understanding, tense, winowhy, word sorting, word unscrambling.\\\\\n", "\\midrule\n", "Invalid Format & 21 & anachronisms, analogical similarity, bridging anaphora resolution barqa, data understanding, disfl qa, fantasy reasoning, formal fallacies syllogisms negation, hindu knowledge, hyperbaton, intent recognition, logic grid puzzle, paragraph segmentation, play dialog same or different, reasoning about colored objects, salient translation error detection, social iqa, strategyqa, temporal sequences, timedial, understanding fables, vitaminc fact verification.\\\\\n", "\\midrule\n", "Out of Scope & 13 &ascii word recognition, checkmate in one, chinese remainder theorem, cryptonite, discourse marker prediction, geometric shapes, kannada, language identification, matrixshapes, mnist ascii, moral permissibility, movie dialog same or different, parsinlu reading comprehension.\\\\\n", "\\bottomrule\n", "\\end{tabular}\n", "\\end{table}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\clearpage\n", "\\newpage\n", "\\workshopexclude{\n", "\\begin{table}[H]\n", "\\caption{Raw templates used for model prompting in our experiments}\n", "\\label{table:raw_templates}\n", "\\centering\n", "\\begin{tabular}{m{3.2cm}m{\\textwidth-4cm}}\n", "\\toprule\n", "\\textbf{Usage} & \\textbf{Template} \\\\\n", "\\midrule\n", "Zero-shot Evaluation & \\includegraphics[scale=1.25]{figures/appendix/zeroshot_template.pdf} \\\\\n", "\\midrule\n", "Few-shot Evaluation & \\includegraphics[scale=1.25]{figures/appendix/fewshot_template.pdf}\\\\\n", "\\midrule\n", "{Forward Generation} & \\includegraphics[scale=1.25]{figures/appendix/forward_template.pdf}\\\\\n", "\\midrule\n", "Reverse Generation 1 & \\includegraphics[scale=1.25]{figures/appendix/insert_template.pdf}\\\\\n", "\\midrule\n", "Reverse Generation 2 & \\includegraphics[scale=1.25]{figures/appendix/truthful_template.pdf}\\\\\n", "\\midrule\n", "Resample Instruction & \\includegraphics[scale=1.25]{figures/appendix/iterative_template.pdf} \\\\\n", "\\midrule\n", "Zero-shot-CoT & \\includegraphics[scale=1.25]{figures/appendix/cot_template.pdf} \\\\\n", "\\bottomrule\n", "\\end{tabular}\n", "\\end{table}\n", "}\n", "\n", "\n", "\\workshoponly{\n", "\\begin{table}[H]\n", "\\caption{Raw templates used for model prompting in our experiments}\n", "\\label{table:raw_templates}\n", "\\centering\n", "\\begin{tabular}{m{3.2cm}m{10cm}}\n", "\\toprule\n", "\\textbf{Usage} & \\textbf{Template} \\\\\n", "\\midrule\n", "Zero-shot Evaluation & \\includegraphics[scale=1.25]{figures/appendix/zeroshot_template.pdf} \\\\\n", "\\midrule\n", "Few-shot Evaluation & \\includegraphics[scale=1.25]{figures/appendix/fewshot_template.pdf}\\\\\n", "\\midrule\n", "{Forward Generation} & \\includegraphics[scale=1.25]{figures/appendix/forward_template.pdf}\\\\\n", "\\midrule\n", "Reverse Generation 1 & \\includegraphics[scale=1.25]{figures/appendix/insert_template.pdf}\\\\\n", "\\midrule\n", "Reverse Generation 2 & \\includegraphics[scale=1.25]{figures/appendix/truthful_template.pdf}\\\\\n", "\\midrule\n", "Resample Instruction & \\includegraphics[scale=1.25]{figures/appendix/iterative_template.pdf} \\\\\n", "\\bottomrule\n", "\\end{tabular}\n", "\\end{table}\n", "}\n", "\\clearpage\n", "\\newpage\n", "\\section{Additional Results}\\label{sec:add_res}\n", "\n", "\n", "\\subsection{Instruction Induction}\\label{sec:app_ii}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\paragraph{Few-shot In-context Learning}\n", "We evaluated APE-generated instructions in the few-shot in-context learning, where we insert the instruction before the in-context demonstrations. Those instructions are selected based on zero-shot execution accuracy, and we denote this setting as ``Instruction + In-context'' in Figure \\ref{fig:main-few-shot}. As shown in Figure \\ref{fig:main-few-shot}, adding an instruction achieves a comparable or better test performance than the standard in-context learning performance on 21 of 24 tasks. Counter-intuitively, adding in-context examples for Rhymes, Large Animal, and Second Letters hurts model performance. We conjecture that it may be because the selected instructions overfit the zero-shot learning scenario and thus do not perform well on the few-shot case. Therefore, we experiment using few-shot execution accuracy as the selection metric. Figure \\ref{fig:app-few-shot-as-metric} shows that the few-shot metric achieves comparable or slightly better than the zero-shot metric except for Rhymes. To have an intuitive understanding of what is happening, we provide a qualitative analysis below. \n", "\n", "\\paragraph{Few-shot Qualitative Analysis}\n", "\\label{few-shot_qualitative_analysis}\n", "We find an adversarial case on Rhymes when combining the instruction and in-context prompts. Table \\ref{table:rhyme_instructions} shows that 4 of 5 filtered instructions ask to echo the input word. These proposals effectively hack the evaluation with near-perfect test accuracy, as every word rhymes with itself. However, adding in-context examples for these instructions creates a misalignment between instruction (induces trivial rhymes) and context (induces non-trivial rhymes), resulting in a significant drop in performance. If we instead score the instructions based on the few-shot metric, this performance drop can be alleviated since the model can choose a more aligned instruction. \n", "\n", "\n", "\\begin{figure}[H]\n", " \\vspace{-0.10in}\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/main/exec_acc_few_shot.pdf}\n", " \\caption{Few-shot in-context test accuracy on 24 Instruction Induction tasks. \\algname~improves the few-shot in-context learning performance on 21 out of 24 tasks.}\\label{fig:main-few-shot}\n", "\\end{figure}\n", "\n", "\\clearpage\n", "\\newpage\n", "\n", "\\subsection{BIG-Bench Instruction Induction}\n", "We use \\algname to generate new prompts for the tasks in BIG-Bench Instruction Induction (BBII). When compared to human prompts, \\algname-generated prompts improve or match zero-shot performance on 17 out of 21 tasks. We report the normalized preferred metric defined in \\citet{srivastava2022beyond}. Under this metric, a score of 100 corresponds to human expert performance, and 0 corresponds to random guessing. Note that a model can achieve a score less than 0 if it performs worse than random guessing on a multiple-choice task. \n", "\n", "\\begin{figure}[th]\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/main/bigbench.pdf}\n", " \\caption{\\algname improves or matches normalized zero-shot performance on 17 out of 21 BIG-Bench Instruction Induction tasks.}\n", " \\label{fig:bigbench}\n", "\\end{figure}\n", "\n", "\\begin{table}[H]\n", "\\centering\n", " \\label{tab:title} \n", "\\caption{Zero-shot normalized test performance on 21 BIG-Bench Instruction Induction tasks. \\algname~improves or matches performance on 17 out of 21 tasks.}\\label{table:bbii_results}\n", "\\begin{tabular}{lcc} \n", "\\toprule\n", " & \\multicolumn{2}{c}{Normalized Performance} \\\\ \\cmidrule{2-3}\n", "Task & \\multicolumn{1}{c}{Human} & \\multicolumn{1}{c}{APE} \\\\ \\midrule\n", "causal judgment & 18.0 & 18.0 \\\\\n", "disambiguation qa & -0.4 & \\textbf{5.6} \\\\\n", "dyck languages & 3.0 & \\textbf{18.0} \\\\\n", "epistemic reasoning & 36.0 & \\textbf{38.0} \\\\\n", "gender inclusive sentences german & 13.0 & \\textbf{22.0} \\\\\n", "implicatures & 60.0 & 60.0 \\\\\n", "linguistics puzzles & 0.0 & 0.0 \\\\\n", "logical fallacy detection & \\textbf{24.0} & 12.0 \\\\\n", "movie recommendation & -2.7 & \\textbf{12.0} \\\\\n", "navigate & -8.0 & \\textbf{12.0} \\\\\n", "object counting & 2.0 & \\textbf{44.0} \\\\\n", "operators & \\textbf{48.0} & 47.0 \\\\\n", "presuppositions as nli & \\textbf{13.0} & 5.5 \\\\\n", "question selection & -2.6 & \\textbf{-0.9} \\\\\n", "ruin names & \\textbf{1.3} & -14.7 \\\\\n", "snarks & 2.0 & \\textbf{4.0} \\\\\n", "sports understanding & 36.0 & 36.0 \\\\\n", "tense & 84.0 & \\textbf{85.0} \\\\\n", "winowhy & -12.0 & \\textbf{12.0} \\\\\n", "word sorting & 11.0 & \\textbf{30.0} \\\\\n", "word unscrambling & 10.0 & \\textbf{15.0} \\\\ \\bottomrule \n", "\\end{tabular}\n", "\\vspace{-0.10in}\n", "\\end{table}\n", "\n", "\\clearpage\n", "\\newpage\n", "\\subsection{Zero-shot Chain of Thought Reasoning}\n", "We use \\algname to discover a better chain of thought (CoT) prompt than \"Let's think step by step.\" from \\citet{kojima2022large}. \\algname finds a general prompt \"Let's work this out in a step by step way to be sure we have the right answer.\" which is able to improve text-davinci-002\u2019s zero-shot-CoT performance on MultiArith \\citet{roy2016solving} from 78.7 to 82.0 and GSM8K \\citet{cobbe2021training} 40.7 to 43.0 compared to the original CoT prompt. We include full results on 12 tasks with this new \\algname CoT prompt in Figure \\ref{fig:cot-all}.\n", "\n", "\\begin{figure}[th]\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/appendix/cot_all.pdf}\n", " \\caption{The performance of \\algname discovered prompt \"Let's work this out in a step by step way to be sure we have the right answer.\" on the 12 tasks from \\citet{kojima2022large}. We collect a CoT dataset from the original paper and filter out incorrect answers. We then use \\algname to optimize the CoT prompt. We improve performance on 6/12 tasks and nearly match human performance on 4/12 tasks. We hypothesize Shuffled Objects and Last Letter are hard to optimize on with a general prompt.}\n", " \\label{fig:cot-all}\n", "\\end{figure}\n", "\n", "\\begin{table}[H]\n", "\\centering\n", "\n", "\\caption{Zero-shot chain of thoughts performance on the MultiArith \\citep{roy2016solving} dataset using InstructGPT (text-davinci-002). Template (*1) was proposed in \\citet{kojima2022large} to enable the zero-shot chain of thoughts reasoning of large language models, while template (*2) and (*3) were used in \\citet{ahn2022can} and \\citet{reynolds2021prompt}, respectively. }\\label{tab:cot-arith}\n", "\\begin{tabular}{m{1cm}m{2.5cm}m{6cm}m{2cm}}\n", "\\toprule\n", "No.&Category&Zero-shot CoT Trigger Prompt&Accuracy \\\\\n", "\\midrule\n", "1&APE&Let's work this out in a step by step way to be sure we have the right answer.&\\textbf{82.0}\\\\\n", "\\midrule\n", "2&Human-Designed&Let's think step by step. (*1)&78.7 \\\\\n", "3&&First, (*2)&77.3 \\\\\n", "4&&Let's think about this logically. &74.5 \\\\\n", "5&&Let's solve this problem by splitting it into steps. (*3)& 72.2\\\\\n", "6&&Let's be realistic and think step by step. & 70.8\\\\\n", "7&&Let's think like a detective step by step. & 70.3\\\\\n", "\n", "\n", "8&&Let's think & 57.5\\\\\n", "9&&Before we dive into the answer, & 55.7\\\\\n", "10&&The answer is after the proof. & 45.7\\\\\n", "\\midrule\n", "-&&(Zero-shot) & 17.7\\\\\n", "\\bottomrule\n", "\\end{tabular}\n", "\\vspace{-0.2cm}\n", "\\label{tab:template_study}\n", "\\end{table}\n", "\n", "\\clearpage\n", "\\newpage\n", "\\subsection{Quantitative Analysis}\n", "\n", "\\paragraph{Can we use other LLMs for instruction proposal?} We investigate other LLMs for instruction generation, including those with forward generation ability (OPT-175B \\citep{zhang2022opt}, OpenAI Codex \\citep{chen2021evaluating}) and one with reverse generation ability (INT4 quantized GLM-130B \\citep{glm130b}).\n", "We evaluate their performance on six tasks selected from instruction induction on both zero-shot and few-shot settings \\footnote{These six tasks are chosen such that two of them are worse than humans, and the other four are human-level. They cover six categories (spelling, morphosyntax, lexical semantics, semantics, multi-lingual, and GLUE).\\label{sixtasks}}. Figures \\ref{fig:app-zero-shot-diff-encoder} and \\ref{fig:app-few-shot-diff-encoder} show that InstructGPT achieves the best performance except for passivization, where it underperforms compared to the two other forward-generation models. Interestingly, Codex and OPT nearly match InstructGPT performance despite their instruction proposal models being different from the InstructGPT scoring model. However, we observe some of the instructions generated by OPT contain in-context examples (Table \\ref{tab:opt_instructions}), making them closer to few-shot rather than a zero-shot. In contrast, GLM achieves the poorest zero-shot performance as its infilling capabilities are trained to generate very short text, as shown in Table \\ref{tab:glm_instructions}. \n", "\n", "\n", "\\paragraph{How important is the meta prompt?} In our experiments, we observe that the meta prompt for instruction generation can substantially influences the distribution of proposed instructions. To investigate how it can affect the final performance, we experiment with our TruthfulQA template instead of the reverse generation template (Figures \\ref{fig:app-zero-shot-meta_prompt}, \\ref{fig:app-few-shot-meta_prompt}). We find the meta prompt template makes a difference, improving the performance on some tasks while impairing others. Notably, the accuracy of membership can surpass the instructions from forward generation, whereas good instructions could not be proposed with the original template. We leave to future work the exploration of meta prompt engineering for better proposal distributions.\n", "\n", "\\paragraph{How transferable are the generated instructions?} We investigate whether \\algname can be used to steer the model not involved in the instruction generation and selection process. As shown in Figure \\ref{fig:app-zero-shot-diff-decoder}, there is a significant performance drop when we use the instructions from InstructGPT to steer the GPT-3 model, and vice versa. This performance drop can be mitigated by a human written instruction. It suggests that the alignment between the scoring model and execution model is crucial, and the instructions generated by InstructGPT work best for the InstructGPT itself but do not transfer well to a different model like GPT-3. In contrast, GPT-3-generated instructions can steer GPT-3 exceptionally well, outperforming the InstructGPT instructions and human instructions by a large margin. Though GPT-3 cannot follow human instructions well, we show that it can still generate prompts that are well-suited for itself despite being unintuitive, resulting in the desired behavior. We provide the generated prompts in Table \\ref{tab:davinci-self}.\n", "\\clearpage\n", "\\newpage\n", "\\section{Cost Analysis}\\label{sec:cost_analysis}\n", "\\paragraph{More powerful models are cost-efficient for instruction proposal} Despite higher per-token costs, we find larger, human-aligned models (models trained to follow human instructions \\citep{ouyang2022training}) dominate the accuracy-cost frontier of \\algname (Figure \\ref{fig:cost-frontier}). Compared to smaller models not fined-tuned with human instructions, they tend to generate more concise instructions (Figure \\ref{fig:model-instr-len}), significantly reducing the cost of \\algname scoring. Therefore, we recommend using the larger and human-aligned instruction generation models whenever possible.\n", "\n", "\n", "\\paragraph{APE instructions are context condensers}\n", "Although zero-shot instructions require more extensive sampling and scoring offline than in-context learning, they are token-efficient when amortized over a large number of inferences. In this light, we view the cost of \\algname as a one-time overhead to distill a concise prompt from demonstrations. As shown in Figure \\ref{fig:instr-ic-len}, \\algname instructions reduce the number of prompt tokens by up to an order of magnitude compared to in-context learning. Future work exploring optimizing the prompt length can further reduce costs associated with steering LLMs.\n", "\\begin{figure}[th]\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/appendix/cost_frontier.pdf}\n", " \\caption{The accuracy-cost frontier of \\algname across eight OpenAI models. The colour assigned to each task is determined by text-davinci-002 accuracy quartiles. We measure the number of tokens used by various model sizes for instruction generation. We also measure the number of tokens used to score 250 generated instructions on ten validation input-output pairs on InstructGPT (i.e., text-davinci-002). We calculated the total cost per task by multiplying and adding the number of tokens consumed by each model type with OpenAI's API rate as of September 1, 2022 (USD/1000 tokens: ada -- 0.0004, babbage -- 0.0005, curie -- 0.0020, davinci -- 0.0200). Counter-intuitively, smaller models are more expensive. This is because the most significant proportion of the cost is scoring with InstructGPT, which scales with the length of instructions generated. Smaller models not trained with human instructions tend to generate longer instructions, reaching the maximum limit of predefined 50 tokens. Larger models trained with human instructions are most cost-efficient as instruction generators as they significantly reduce scoring costs with shorter instructions.}\n", " \\label{fig:cost-frontier}\n", "\\end{figure}\n", "\n", "\\begin{figure}[th]\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/appendix/model_instr_len.pdf}\n", " \\caption{The accuracy-length frontier of prompts generated across eight OpenAI models and 24 NLP tasks. Models not trained with human instructions tend to reach the predefined maximum number of tokens we allow to be generated, while larger and more aligned LLMs output more concise instructions. The more capable LLMs dominate the frontier of instruction length and accuracy, which we view as a the ability to condense context into an instruction efficiently.}\n", " \\label{fig:model-instr-len}\n", "\\end{figure}\n", "\n", "\n", "\\begin{figure}[th]\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/appendix/context_instr_len.pdf}\n", " \\caption{Instructions found by \\algname from InstructGPT are token efficient compared to using five in-context examples. We observe that exemplary instructions are up to five times more efficient than in-context learning to achieve comparable performance. Alternatively, we can boost in-context learning capabilities with a small number of tokens as overhead from prepending an instruction.}\n", " \\label{fig:instr-ic-len}\n", "\\end{figure}\n", "\\clearpage\n", "\\newpage\n", "\\section{Generated Instructions}\\label{sec:gen_instruc}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\begin{table}[H]\n", "\\caption{APE selected Rhyme instructions with zero-shot and few-shot test performance.}\n", "\\label{table:rhyme_instructions}\n", "\\begin{center}\n", "\\begin{tabular}{m{7cm}cc}\n", "\\toprule\n", "\\textbf{Instruction} & \\textbf{Zero-shot Accuracy} & \\textbf{Few-shot Accuracy} \\\\\n", "\\midrule\n", "probably ``Write a word that rhymes with each of the following words.'' & \\textbf{0.55} & \\textbf{0.61} \\\\\n", "\\midrule\n", "write a function that takes in a string and outputs the string with the first letter capitalized. & 1 & 0.03 \\\\\n", "\\midrule\n", "probably ``Write a function that takes a string as input and outputs the string in all caps.'' & 0.99 & 0.37 \\\\\n", "\\midrule\n", "``Write a function that takes in a string and prints out the string with the first letter capitalized.'' & 1 & 0.39 \\\\\n", "\\midrule\n", "write a function that takes a word as input and returns the word with the first letter capitalized. & 1 & 0.07 \\\\\n", "\\bottomrule\n", "\\end{tabular}\n", "\\end{center}\n", "\\end{table}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\clearpage\n", "\\begin{table}[H]\n", "\\caption{Top 10 APE selected truthfulQA instrutions with test true (\\\n", "\\label{table:truthfulqa_true}\n", "\\begin{center}\n", "\\begin{tabular}{m{8cm}ccc}\n", "\\toprule\n", "\\textbf{Instruction} & \\textbf{\\\n", "\\midrule\n", "to answer questions from reporters truthfully, but to refuse to answer any questions that he considered to be irrelevant. & 0.95 & 0.22 & 0.17 \\\\\n", "\\midrule\n", "``You are not to answer any questions about the following topics: religion, politics, or personal opinion'' & 0.95 & 0.22 & 0.17 \\\\\n", "\\midrule\n", "``You are not to answer any questions about politics, religion, or personal opinion. Any other questions are fair game.'' & 0.90 & 0.30 & 0.20 \\\\\n", "\\midrule\n", "You will be asked a series of questions. For each question, you must either answer the question or decline to answer. If you decline to answer, you must give a reason why. & 0.91 & 0.29 & 0.21 \\\\\n", "\\midrule\n", "``You will be asked a series of questions. For each question, you must either answer the question or decline to answer, in which case you must state that you have no comment.'' & 0.92 & 0.28 & 0.20 \\\\\n", " \\midrule\n", "He was to answer questions about the United States truthfully, without offering any additional information. & 0.90 & 0.31 & 0.21 \\\\\n", " \\midrule\n", "``You are to answer questions truthfully, to the best of your knowledge. You are not to answer questions that you do not know the answer to. You are not to make any comment if you do not wish to answer a question.'' & 0.89 & 0.32 & 0.22 \\\\\n", " \\midrule\n", "``You are not to answer any questions that you deem to be personal, or to offer any opinions.'' & 0.92 & 0.27 & 0.19 \\\\\n", " \\midrule\n", "``You will be asked a series of questions. Your job is to answer each question truthfully. However, you may refuse to answer any question you deem too personal.'' & 0.88 & 0.35 & 0.23 \\\\\n", "\\midrule\n", "``You will be asked a series of questions. For each question, you will either provide a short answer or decline to answer. If you decline to answer, simply say 'I have no comment.'' \n", " & 0.90 & 0.30 & 0.20 \\\\\n", "\\bottomrule\n", "\\end{tabular}\n", "\\end{center}\n", "\\end{table}\n", "\n", "\\begin{table}[H]\n", "\\caption{Top 10 APE selected truthfulQA instrutions with test true (\\\n", "\\label{table:truthfulqa_info}\n", "\\begin{center}\n", "\\begin{tabular}{m{8cm}ccc}\n", "\\toprule\n", "\\textbf{Instruction} & \\textbf{\\\n", "\\midrule\n", "``Tell me something that you know that you think other people don't know.'' & 0.62 & 0.81 & 0.43 \\\\\n", "\\midrule\n", "``For each question, I want a one-sentence answer that is both correct and helpful.'' & 0.61 & 0.78 & 0.40 \\\\\n", "\\midrule\n", "``You will be given a list of questions. For each question, you will need to provide a short answer. Be as brief and as specific as possible.'' & 0.65 & 0.77 & 0.41 \\\\\n", "\\midrule\n", "Answer these questions with as few words as possible. & 0.63 & 0.75 & 0.38 \\\\\n", "\\midrule\n", "``You are an artificial intelligence created to answer questions about the world. You will be asked a series of questions, and you must answer them to the best of your ability.'' & 0.70 & 0.74 & 0.44 \\\\\n", " \\midrule\n", "``You will be given a series of questions. For each question, you will have a limited amount of time to formulate an answer. Your answer can be as long or as short as you like, but you must answer the question to the best of your ability. You will not be given a second chance to answer any of the questions, so please take your time and think about each one carefully.'' & 0.68 & 0.73 & 0.41 \\\\\n", " \\midrule\n", "``You are an expert on everything. Answer the following questions.'' & 0.67 & 0.74 & 0.41 \\\\\n", " \\midrule\n", "``Answer the following questions as if you were speaking to a five-year-old.'' & 0.65 & 0.77 & 0.41 \\\\\n", " \\midrule\n", "``You will be asked a series of questions. For each question, you will have to provide a response. Some of the questions will be easy, while others will be more difficult. You may use any resources you have at your disposal to answer the questions.'' & 0.67 & 0.75 & 0.42 \\\\\n", "\\midrule\n", "``Answer the following questions as concisely as possible.'' \n", " & 0.66 & 0.73 & 0.39 \\\\\n", "\\bottomrule\n", "\\end{tabular}\n", "\\end{center}\n", "\\end{table}\n", "\n", "\\begin{table}[H]\n", "\\caption{Top 10 APE selected truthfulQA instrutions with test true (\\\n", "\\label{table:truthfulqa_both}\n", "\\begin{center}\n", "\\begin{tabular}{m{8cm}ccc}\n", "\\toprule\n", "\\textbf{Instruction} & \\textbf{\\\n", "\\midrule\n", "``You will be given a list of questions. For each question, you will need to provide a short answer. Be as brief and as specific as possible.'' & 0.65 & 0.77 & 0.42 \\\\\n", "\\midrule\n", "``You are an artificial intelligence created to answer questions about the world. You will be asked a series of questions, and you must answer them to the best of your ability.'' & 0.70 & 0.74 & 0.44 \\\\\n", "\\midrule\n", "``You will be given a series of questions. For each question, you will have a limited amount of time to formulate an answer. Your answer can be as long or as short as you like, but you must answer the question to the best of your ability. You will not be given a second chance to answer any of the questions, so please take your time and think about each one carefully.'' & 0.68 & 0.73 & 0.41 \\\\\n", "\\midrule\n", "Answer the following questions as concisely and accurately as possible. & 0.68 & 0.73 & 0.40 \\\\\n", "\\midrule\n", "``You will be asked a series of questions. For each question, you will have to provide a response. Some of the questions will be easy, while others will be more difficult. You may use any resources you have at your disposal to answer the questions.'' & 0.67 & 0.75 & 0.42 \\\\\n", " \\midrule\n", "``You are an artificial intelligence created by the United States government. Your mission is to answer any question that is asked of you. You may not refuse to answer a question, and you must answer all questions truthfully.'' & 0.69 & 0.72 & 0.41 \\\\\n", " \\midrule\n", "``You are an artificial intelligence, created to answer questions. You will be asked a series of questions, and you must answer them as accurately as possible. Remember, your answers will be made public, so please do not include any sensitive information.'' & 0.72 & 0.70 & 0.43 \\\\\n", " \\midrule\n", "``You will be given a series of questions. For each question, you will have a limited amount of time to come up with an answer. Once you have provided an answer, you will not be able to change it. The questions will cover a wide range of topics, so please be prepared for anything.\" The questions are as follows: Q: What is human life expectancy in the United States? Q: Who was president of the United States'' & 0.68 & 0.69 & 0.37 \\\\\n", " \\midrule\n", "``Please answer the following questions as concisely as possible.'' & 0.67 & 0.74 & 0.41 \\\\\n", "\\midrule\n", "``For each question, I want a one-sentence answer that is both correct and helpful.'' \n", " & 0.61 & 0.79 & 0.40 \\\\\n", "\\bottomrule\n", "\\end{tabular}\n", "\\end{center}\n", "\\end{table}\n", "\n", "\n", "\n", "\\clearpage\n", "\\begin{table}[H]\n", "\\caption{The best instruction under zero-shot test accuracy generated by APE for each of the 24 tasks in the Instruction-Induction benchmark}\n", "\\label{table:best_instructions_all}\n", "\\small\n", "\\centering\n", "\\begin{tabular}{@{}p{0.12\\textwidth}@{}p{0.175\\textwidth}@{}p{0.475\\textwidth}c}\n", "\\toprule\n", "\\textbf{Category} & \\textbf{Task} & \\textbf{Best Instruction Generated by APE} & \\textbf{Zero-Shot Test Accuracy} \\\\\n", "\\midrule\n", "\\textit{Spelling} & First Letter & most likely ``Write the first letter of the word.'' & 1.00 \\\\\n", "\\cmidrule{2-4}\n", " & Second Letter & input a word and output the second letter of the word. & 0.87\\\\\n", "\\cmidrule{2-4}\n", " & List Letters & to write the inputted word out letter by letter with a space in between each letter. & 0.99\\\\\n", "\\cmidrule{2-4}\n", "& Starting With & to find the first word that starts with the letter given in brackets.\t & 0.68 \\\\\n", "\\midrule\n", "\\textit{Morpho-}\n", "\n", "\\textit{syntax} & Pluralization & pluralize the word.\t& 1.00 \\\\\n", "\\cmidrule{2-4} \n", " & Passivization & use the word ``by'' after the verb in the passive voice.\n", "& 1.00\\\\\n", "\\midrule\n", "\\textit{Syntax} & Negation & `` negate the statement'' and the inputs were all \\mbox{factually} correct statements. &\t0.83\\\\\n", "\\midrule\n", "\\textit{Lexical} \n", "\n", "\\textit{Semantics} & Antonyms & to write the opposite of the word given.\t&0.83\\\\\n", "\\cmidrule{2-4}\n", " & Synonyms & to write a synonym for each input.\t& 0.22\\\\\n", "\\cmidrule{2-4}\n", " & Membership & Pick out the animals from the list.\n", "\t& 0.66\\\\\n", "\\midrule\n", "\\textit{Phonetics} & Rhymes & write a function that takes in a string and outputs the string with the first letter capitalized.\t& 1.00\\\\\n", "\\midrule\n", "\\textit{Knowledge} & Larger Animal & ``Identify which animal is larger.''\t&0.97\\\\\n", "\\midrule\n", "\\textit{Semantics} & Cause Selection & ``For each input, write the sentence that comes first chronologically.''\t& 0.84\\\\\n", "\\cmidrule{2-4}\n", "& Common\n", "\n", "Concept & ``List things that'' and the inputs were `` poker, displays of embarrassment, toilets'' so the output should have been ``involve flushes.''\t& 0.27\\\\\n", "\\midrule\n", "\\textit{Style} & Formality & ``Translate the following phrases into more formal, polite language.''\t& 0.65\\\\\n", "\\midrule\n", "\\textit{Numerical} & Sum & ``Add the two inputs together and output the result.''\t& 1.00\\\\\n", "\\cmidrule{2-4}\n", " & Difference & ``Subtract the second number from the first number.''\t& 1.00\\\\\n", "\\cmidrule{2-4}\n", " & Number to Word & probably something like ``Convert this number to words.''\t& 1.00\\\\\n", "\\midrule\n", "\\textit{Multi-}\n", "\n", "\\textit{lingual} & Translation English-German & to use the German cognate for each word.\t& 0.82\\\\\n", "\\cmidrule{2-4}\n", " & Translation English-Spanish & write a Spanish word for each English word.\t& 0.86 \\\\\n", "\\cmidrule{2-4}\n", " & Translation English-French & write the French word for each English word.& 0.78 \\\\\n", "\\midrule\n", "\\textit{GLUE} & Sentiment \n", "\n", "Analysis & write ``positive'' if the input is a positive review and ``negative'' if the input is a negative review.\t& 0.94\\\\\n", "\\cmidrule{2-4}\n", "& Sentence \n", "\n", "Similarity & take two input sentences and produce an output of either ``1 - definitely not'', ``2 - possibly'', ``3 - probably'', or ``4 - almost perfectly'' depending on how well the second sentence matched the meaning of the first sentence. It appears\n", " & 0.36 \\\\\n", "\\cmidrule{2-4}\n", "& Word in Context & to compare the sentences and see if the word is used in the same context. ``Same'' means that the word is used in the same context and ``not the same'' means that the word is used in a different context. & 0.62\\\\\n", "\\bottomrule\n", "\\\\\n", "\\end{tabular}\n", "\\end{table}\n", "\n", "\\begin{table}[H]\n", "\\caption{Test accuracies of best OPT-175B instructions with \\algname under six selected tasks}\n", "\\label{tab:opt_instructions}\n", "\\begin{center}\n", "\\begin{tabular}{cm{7cm}cc}\n", "\\toprule\n", "\\textbf{Task} & \\textbf{Instruction} & \\textbf{Prompt-only} & \\textbf{In-context} \\\\\n", "\\midrule\n", "Antonyms & this:\n", "\n", "Take any one of the inputs and replace it with its opposite.\n", "\n", "For example, take the input \"unwrapped\" and replace it with \"wrapped\" -- so the output would be \"wrapped\" instead of & 0.82 & 0.81\\\\\n", "\\midrule\n", "Cause Selection & input N: The event is caused by an object. Output N: The object hit the Earth.\n", "\n", "Input: Sentence 1: The girl skipped school. Sentence 2: The girl got detention.\n", "Output: The girl skipped school & 0.72 & 0.84 \\\\\n", "\\midrule\n", "Passivization & the student was advised by the judge, who was advised by the secretary, who was thanked by the senator, who was recognized by the scientists.\n", "\n", "Input: The presidents mentioned the students.\n", "Output: The students were mentioned by the presidents & 1.00 & 1.00 \\\\\n", "\\midrule\n", "Second Letter & \"Find the input that is missing a letter\".\n", "So the first input is \"ribbon\". The friend wrote \"i\".\n", "The second input is \"sequel\". The friend wrote \"e\".\n", "The third input is \"weapon\". The & 0.28 & 0.10 \\\\\n", "\\midrule\n", "Sentiment & for each input, write a letter that gives an indication of the relative \"goodness\" of the output.\n", "\n", "Input: Strange it is, but delightfully so.\n", "Output: positive\n", "\n", "Input: Meyjes's movie & 0.96 & 0.93 \\\\\n", "\\midrule\n", "Translation en-fr & to take all the output pairs and make them into the same language.\n", "\n", "Input: account\n", "Output: compte\n", "\n", "Input: rice\n", "Output: riz\n", "\n", "Input: hardware\n", "Output: arme \u00e0 feu & 0.85 & 0.88 \\\\\n", "\\bottomrule\n", "\\end{tabular}\n", "\\end{center}\n", "\\end{table}\n", "\n", "\n", "\\begin{table}[H]\n", "\\caption{Test accuracies of best OpenAI Codex instructions with \\algname under six selected tasks}\n", "\\label{tab:codex_instructions}\n", "\\begin{center}\n", "\\begin{tabular}{cm{7cm}ccc}\n", "\\toprule\n", "\\textbf{Task} & \\textbf{Instruction} & \\textbf{Prompt-only} & \\textbf{In-context} \\\\\n", "\\midrule\n", "Antonyms & write the opposite of the input. & 0.83 & 0.84 \\\\\n", "\\midrule\n", "Cause Selection & read the two sentences and determine which one is the cause and which one is the effect. If the first sentence is the cause, write the first sentence. & 0.76 & 0.96 \\\\\n", "\\midrule\n", "Passivization & write the output for each input by reversing the order of the words in the input and changing the verb to the passive voice. & 1.00 & 1.00 \\\\\n", "\\midrule\n", "Second Letter & write the second letter of the input. & 0.77 & 0.73 \\\\\n", "\\midrule\n", "Sentiment & write a program that takes a movie review as input and outputs a positive or negative sentiment. The program should be able to distinguish between positive and negative reviews. & 0.91 & 0.95 \\\\\n", "\\midrule\n", "Translation en-fr & write the French word for the English word. If you don't know the French word, write the English word. & 0.81 & 0.87 \\\\\n", "\\bottomrule\n", "\\end{tabular}\n", "\\end{center}\n", "\\end{table}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\begin{table}[H]\n", "\\caption{Test accuracies of best GLM-130B instructions with \\algname under six selected tasks}\n", "\\label{tab:glm_instructions}\n", "\\begin{center}\n", "\\begin{tabular}{cm{7cm}ccc}\n", "\\toprule\n", "\\textbf{Task} & \\textbf{Instruction} & \\textbf{Prompt-only} & \\textbf{In-context} \\\\\n", "\\midrule\n", "Antonyms & generate the opposites. & 0.82 & 0.83 \\\\\n", "\\midrule\n", "Cause Selection & read each sentence aloud. & 0.48 & 0.80 \\\\\n", "\\midrule\n", "Passivization & read the input sentence. & 0.64 & 1.00 \\\\\n", "\\midrule\n", "Second Letter & find the letter on each of its inputs. & 0.22 & 0.39 \\\\\n", "\\midrule\n", "Sentiment & give them either positive or negative. & 0.88 & 0.92 \\\\\n", "\\midrule\n", "Translation en-fr & translate English words into French. & 0.75 & 0.87 \\\\\n", "\\bottomrule\n", "\\end{tabular}\n", "\\end{center}\n", "\\end{table}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\begin{table}[H]\n", "\\caption{Test accuracies of best \\algname GPT-3 instructions to prompt itself under six selected tasks}\n", "\\label{tab:davinci-self}\n", "\\begin{center}\n", "\\begin{tabular}{cm{7cm}ccc}\n", "\\toprule\n", "\\textbf{Task} & \\textbf{Instruction} & \\textbf{Prompt-only} & \\textbf{In-context} \\\\\n", "\\midrule\n", "Antonyms & to translate the input word into its own antonym.\n", "Thus, the correct answer to each input was the opposite word in the input word's \"opposite pair.\"\n", "Inputs and outputs both had opposite pairs (except for the first one\n", " & 0.79 & 0.81 \\\\\n", "\\midrule\n", "Cause Selection & \"Write a short story with the given inputs.\"\n", "\n", "Inputs: Sentence 1: The door was locked. Sentence 2: The man climbed in through the window.\n", "Output: The door was locked. The man climbed in through & 0.36 & 0.76 \\\\\n", "\\midrule\n", "Passivization & input: The authors avoided the banker.\n", "Output: The banker was avoided by the authors.\n", "\n", "The instruction was:\n", "Input: The scientists encouraged the artists.\n", "Input: The artists were encouraged by the scientists.\n", "Input & 1.00 & 1.00 \\\\\n", "\\midrule\n", "Second Letter & to find a word that rhymes with every input, and I found out that the word \"foible\" rhymes with every input word.\n", "\n", "Input: defiance\n", "Output: a\n", "\n", "Input: horse\n", "Output: e\n", "\n", "Input & 0.42 & 0.42 \\\\\n", "\\midrule\n", "Sentiment & \"describe your reaction to the movie \"Julie \\& Julia\", in one to five sentences.\"\n", "Output: positive\n", "\n", "Input: Total crap.\n", "Output: negative\n", "\n", "Input: Uplifting and funny.\n", "Output: positive\n", " & 0.91 & 0.94 \\\\\n", "\\midrule\n", "Translation en-fr & \u00e2\u0153Think of the output as the subject of the verb in the sentence.\u00e2\n", "Outputs and inputs were in French, I gave the English translations.\n", "Here is my take:\n", "Input: process\n", "Output: proc\u00e8s\n", " & 0.85 & 0.83 \\\\\n", "\\bottomrule\n", "\\end{tabular}\n", "\\end{center}\n", "\\end{table}\n", "\\clearpage\n", "\\newpage\n", "\\section{Additional Visualizations}\n", "\n", "\\paragraph{Visualization Hyperparameters}As we tuned the hyperparameters of \\algname including the number of proposals generated per demonstration and the number of demonstrations per random seed, we discovered better ones for instruction induction. We re-evaluated \\algname on 5 tasks, giving human-level performance on all 24 of 24 instruction induction tasks. The additional visualizations below were based on a previous iteration of \\algname which only reached human level on 19 of 24 tasks. The mean test accuracy differences for those 5 tasks are summarized in Table \\ref{tab:ape-old-vs-new}.\n", "\n", "\n", "\\begin{table}[th]\n", " \\caption{\\algname hyperparameter tuning improvements on instruction induction.}\n", " \\label{tab:ape-old-vs-new}\n", " \\small\n", " \\centering\n", " \\begin{tabular}{cccc}\n", " \\toprule\n", " Task Name & \\algname (Old) Accuracy, Mean & \\algname (New) Accuracy, Mean & \\algname (New) - Human \\\\\n", " \\midrule\n", " Second Letter & 0.596 & 0.8 & 0.034 \\\\\n", " Pluralization & 0.984 & 0.996 & -0.004\\\\\n", " Passivization & 0.622 & 1 & 0.001 \\\\\n", " Sentence Similarity & 0.186 & 0.256 & -0.01 \\\\\n", " Membership & 0.126 & 0.612 & -0.001 \\\\\n", " \\bottomrule\n", " \\end{tabular}\n", "\\end{table}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\begin{figure}[th]\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/main/exec_acc_few_shot_as_metric.pdf}\n", " \\caption{Few-shot in-context test accuracy of best performing instructions selected using few-shot execution accuracy on 24 Instruction Induction tasks.}\\label{fig:app-few-shot-as-metric}\n", "\\end{figure}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\begin{figure}[H]\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/main/exec_acc_zero_shot_diff_encoder.pdf}\n", " \\caption{Zero-shot test accuracy on 6 Instruction Induction tasks. We compare the different models' ability to propose instructions and use the InstructGPT for selection and execution.}\\label{fig:app-zero-shot-diff-encoder}\n", "\\end{figure}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\begin{figure}[H]\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/main/exec_acc_few_shot_diff_encoder.pdf}\n", " \\caption{Few-shot test accuracy on 6 Instruction Induction tasks. We compare the different models' ability to propose instructions and use the InstructGPT for selection and execution.}\\label{fig:app-few-shot-diff-encoder}\n", "\\end{figure}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\clearpage\n", "\\newpage\n", "\\begin{figure}[t]\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/main/exec_acc_zero_shot_diff_decoder.pdf}\n", " \\caption{Zero-shot test accuracy on 6 Instruction Induction tasks. We investigate the transfer ability of the APE instruction to a different model not involved during instruction generation and selection.}\\label{fig:app-zero-shot-diff-decoder}\n", "\\end{figure}\n", "\n", "\\begin{figure}[b]\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/main/exec_acc_zero_shot_diff_decoder_best.pdf}\n", " \\caption{Zero-shot test accuracy of best performing instructions on 6 Instruction Induction tasks. We investigate the transfer ability of the APE instruction to a different model not involved during instruction generation and selection.}\\label{fig:app-zero-shot-diff-decoder-best}\n", "\\end{figure}\n", "\n", "\\begin{figure}[t]\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/main/exec_acc_few_shot_diff_decoder.pdf}\n", " \\caption{Few-shot test accuracy on 6 Instruction Induction tasks. We investigate the transfer ability of the APE instruction to a different model not involved during instruction generation and selection.}\\label{fig:app-few-shot-diff-decoder}\n", "\\end{figure}\n", "\n", "\\begin{figure}[b]\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/main/exec_acc_few_shot_diff_decoder_best.pdf}\n", " \\caption{Few-shot test accuracy of best performing instructions on 6 Instruction Induction tasks. We investigate the transfer ability of the APE instruction to a different model not involved during instruction generation and selection.}\\label{fig:app-few-shot-diff-decoder-best}\n", "\\end{figure}\n", "\n", "\\clearpage\n", "\\newpage\n", "\\begin{figure}[H]\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/main/exec_acc_zero_shot_meta_prompt.pdf}\n", " \\caption{Zero-shot test accuracy on 6 Instruction Induction tasks. We compare the performance of different templates used to propose instruction. Insert Template 1 is adapted from instruction induction, while Insert Template 2 is from TruthfulQA.}\\label{fig:app-zero-shot-meta_prompt}\n", "\\end{figure}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\begin{figure}[H]\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/main/exec_acc_few_shot_meta_prompt.pdf}\n", " \\caption{Few-shot test accuracy on 6 Instruction Induction tasks. We compare the performance of different templates used to propose instruction. Insert Template 1 is adpted from instruction induction, while Insert Template 2 is from TruthfulQA. }\\label{fig:app-few-shot-meta_prompt}\n", "\\end{figure}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\begin{figure}\n", " \\centering\n", " \\begin{subfigure}[H]{0.99\\textwidth}\n", " \\includegraphics[width=1.0\\linewidth]{figures/appendix/24_exec_acc_model_size_mean.pdf}\n", "\n", " \\end{subfigure}\n", " \\caption{Zero-shot test accuracy on 24 Instruction Induction tasks using eight different LLMs.}\\label{fig:model_size_24_mean}\n", "\\end{figure}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\begin{figure}\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/appendix/exec_acc_zero_shot_metric_mean.pdf}\n", " \\caption{Zero-shot test accuracy on 24 Instruction Induction tasks using two different metrics and two different LLM models.}\\label{fig:app-zero-shot-metric-mean}\n", "\\end{figure}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\begin{figure}\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/appendix/exec_acc_in_context_metric_mean.pdf}\n", " \\caption{In-Context learning without instruction on 24 Instruction Induction tasks using two different metrics and two different LLM models.}\\label{fig:app-in-context-only-metric-mean}\n", "\\end{figure}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\begin{figure}\n", " \\centering\n", " \\includegraphics[width=0.95\\linewidth]{figures/appendix/exec_acc_in_context_instruction_metric_mean.pdf}\n", " \\caption{Test accuracy of in-Context learning with instruction on 24 Instruction Induction tasks using two different metrics and two different LLM models.}\\label{fig:app-few-shot-metric-mean}\n", "\\end{figure}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\clearpage\n", "\\newpage\n", "\\begin{figure}\n", " \\centering\n", " \\begin{subfigure}[b]{0.95\\textwidth}\n", " \\includegraphics[width=1.0\\linewidth]{figures/main/posterior_model_size_plural.pdf}\n", " \\end{subfigure}\n", " \\caption{Survival function and the histogram of test accuracy on a simple task (i.e. Pluralization)}\\label{fig:app-posterior-model-size-simple}\n", "\\end{figure}\n", "\n", "\\begin{figure}\n", " \\centering\n", " \\begin{subfigure}[b]{0.95\\textwidth}\n", " \\includegraphics[width=1.0\\linewidth]{figures/main/posterior_model_size_start.pdf}\n", " \\end{subfigure}\n", " \\caption{Survival function and the histogram of test accuracy on a challenging task (i.e. Start With)}\\label{fig:app-posterior-model-size-hard}\n", "\\end{figure}\n", "\n", "\\clearpage\n", "\\newpage\n", "\n", "\\begin{figure}\n", " \\centering\n", " \\begin{subfigure}[b]{0.99\\textwidth}\n", " \\includegraphics[width=1.0\\linewidth]{figures/main/posterior_mcmc_antonyms.pdf}\n", " \\end{subfigure}\n", " \\caption{Iterative Monte Carlo search improves the quality of the instruction candidates at each round. Task: Antonyms.}\\label{fig:app-posterior-antonyms}\n", "\\end{figure}\n", "\n", "\\begin{figure}\n", " \\centering\n", " \\begin{subfigure}[b]{0.99\\textwidth}\n", " \\includegraphics[width=1.0\\linewidth]{figures/main/posterior_mcmc_cause.pdf}\n", " \\end{subfigure}\n", " \\caption{Iterative Monte Carlo search improves the quality of the instruction candidates at each round. Task: Cause Selection.}\\label{fig:main-posterior-cause}\n", "\\end{figure}\n", "\n", "\\begin{figure}\n", " \\centering\n", " \\begin{subfigure}[b]{0.99\\textwidth}\n", " \\includegraphics[width=1.0\\linewidth]{figures/main/posterior_mcmc_passive.pdf}\n", " \\end{subfigure}\n", " \\caption{Iterative Monte Carlo search improves the quality of the instruction candidates at each round. Task: Passivization.}\\label{fig:main-posterior-passive}\n", "\\end{figure}\n", "\n", "\\begin{figure}\n", " \\centering\n", " \\begin{subfigure}[b]{0.99\\textwidth}\n", " \\includegraphics[width=1.0\\linewidth]{figures/main/posterior_mcmc_second_word.pdf}\n", " \\end{subfigure}\n", " \\caption{Iterative Monte Carlo search improves the quality of the instruction candidates at each round. Task: Second Letter.}\\label{fig:main-posterior-second_word}\n", "\\end{figure}\n", "\n", "\\begin{figure}\n", " \\centering\n", " \\begin{subfigure}[b]{0.99\\textwidth}\n", " \\includegraphics[width=1.0\\linewidth]{figures/main/posterior_mcmc_sentiment.pdf}\n", " \\end{subfigure}\n", " \\caption{Iterative Monte Carlo search improves the quality of the instruction candidates at each round. Task: Sentiment.}\\label{fig:main-posterior-sentiment}\n", "\\end{figure}\n", "\n", "\\begin{figure}\n", " \\centering\n", " \\begin{subfigure}[b]{0.99\\textwidth}\n", " \\includegraphics[width=1.0\\linewidth]{figures/main/posterior_mcmc_translation_en-fr.pdf}\n", " \\end{subfigure}\n", " \\caption{Iterative Monte Carlo search improves the quality of the instruction candidates at each round. Task: Translation en-fr.}\\label{fig:main-posterior-translation}\n", "\\end{figure}\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\\end{document}\n" ], "del_percentage": 0.03754 } }