|
{ |
|
"id": "2209.01975", |
|
"annotator": "jiangshu", |
|
"input": [ |
|
"\\documentclass{article} \n", |
|
"\\usepackage{iclr2022_conference,times}\n", |
|
"\\usepackage[utf8]{inputenc} \n", |
|
"\\usepackage[T1]{fontenc} \n", |
|
"\\usepackage{hyperref} \n", |
|
"\\usepackage{url} \n", |
|
"\\usepackage{booktabs} \n", |
|
"\\usepackage{amsfonts} \n", |
|
"\\usepackage{nicefrac} \n", |
|
"\\usepackage{microtype} \n", |
|
"\\usepackage{xcolor} \n", |
|
"\\usepackage{standalone}\n", |
|
"\\usepackage{latexsym}\n", |
|
"\\usepackage{amsmath}\n", |
|
"\\usepackage{amssymb}\n", |
|
"\\usepackage{amsthm}\n", |
|
"\\usepackage{graphicx}\n", |
|
"\\usepackage{array}\n", |
|
"\\usepackage{tabu}\n", |
|
"\\usepackage{makecell}\n", |
|
"\\usepackage{paralist}\n", |
|
"\\usepackage{cases}\n", |
|
"\\usepackage{diagbox}\n", |
|
"\\usepackage{enumitem}\n", |
|
"\\usepackage{soul}\n", |
|
"\\usepackage{multirow}\n", |
|
"\\usepackage{verbatim}\n", |
|
"\\usepackage{tabulary}\n", |
|
"\\usepackage{booktabs}\n", |
|
"\\usepackage[mathscr]{euscript}\n", |
|
"\\usepackage{mathtools}\n", |
|
"\\usepackage{algorithm}\n", |
|
"\\usepackage{algpseudocode}\n", |
|
"\\usepackage{stmaryrd}\n", |
|
"\\usepackage{tikz-dependency}\n", |
|
"\\usepackage{subcaption}\n", |
|
"\\usetikzlibrary{automata,decorations.markings,arrows,positioning,matrix,calc,patterns,angles,quotes,calc}\n", |
|
"\\usepackage{adjustbox}\n", |
|
"\\usepackage{tabularx}\n", |
|
"\\usepackage{xspace}\n", |
|
"\\usepackage{tabulary}\n", |
|
"\\usepackage{afterpage}\n", |
|
"\\usepackage{hyperref}\n", |
|
"\\usepackage{url}\n", |
|
"\\usepackage{bm}\n", |
|
"\\usepackage{color}\n", |
|
"\\usepackage{graphicx}\n", |
|
"\\usepackage{slashbox}\n", |
|
"\\usepackage[toc,page]{appendix}\n", |
|
"\\usepackage{makecell}\n", |
|
"\\usepackage{boldline}\n", |
|
"\\usepackage{bbm}\n", |
|
"\\usepackage{wrapfig,lipsum,booktabs}\n", |
|
"\\definecolor{orange}{rgb}{1,0.5,0}\n", |
|
"\\definecolor{mdgreen}{rgb}{0.05,0.6,0.05}\n", |
|
"\\definecolor{mdblue}{rgb}{0,0,0.7}\n", |
|
"\\definecolor{dkblue}{rgb}{0,0,0.5}\n", |
|
"\\definecolor{dkgray}{rgb}{0.3,0.3,0.3}\n", |
|
"\\definecolor{slate}{rgb}{0.25,0.25,0.4}\n", |
|
"\\definecolor{gray}{rgb}{0.5,0.5,0.5}\n", |
|
"\\definecolor{ltgray}{rgb}{0.7,0.7,0.7}\n", |
|
"\\definecolor{purple}{rgb}{0.7,0,1.0}\n", |
|
"\\definecolor{lavender}{rgb}{0.65,0.55,1.0}\n", |
|
"\\definecolor{mypurple}{RGB}{111,61,121}\n", |
|
"\\definecolor{myblue}{RGB}{46,88,180}\n", |
|
"\\definecolor{myred}{RGB}{181,68,106}\n", |
|
"\\definecolor{myyellow}{RGB}{204,143,55}\n", |
|
"\\definecolor{amber}{rgb}{1.0, 0.75, 0.0}\n", |
|
"\\newcommand{\\textred}[1]{\\textcolor{red}{#1}}\n", |
|
"\\newcommand{\\textblue}[1]{\\textcolor{blue}{#1}}\n", |
|
"\\newcommand{\\ensuretext}[1]{#1}\n", |
|
"\\newcommand{\\marker}[2]{\\ensuremath{^{\\textsc{#1}}_{\\textsc{#2}}}}\n", |
|
"\\newcommand{\\arkcomment}[3]{\\ensuretext{\\textcolor{#3}{[#1 #2]}}}\n", |
|
"\\newcommand{\\nascomment}[1]{\\arkcomment{\\marker{NA}{S}}{#1}{blue}}\n", |
|
"\\newcommand{\\jungo}[1]{\\arkcomment{\\marker{J}{K}}{#1}{brown}}\n", |
|
"\\newcommand{\\tao}[1]{\\arkcomment{\\marker{T}{Y}}{#1}{orange}}\n", |
|
"\\newcommand{\\hongjin}[1]{\\arkcomment{\\marker{H}{S}}{#1}{gray}}\n", |
|
"\\newcommand{\\tianlu}[1]{\\arkcomment{\\marker{T}{W}}{#1}{purple}}\n", |
|
"\\newcommand{\\chen}[1]{\\arkcomment{\\marker{C}{W}}{#1}{violet}}\n", |
|
"\\newcommand{\\rui}[1]{\\arkcomment{\\marker{R}{Z}}{#1}{amber}}\n", |
|
"\\newcommand{\\weijia}[1]{\\arkcomment{\\marker{W}{S}}{#1}{green}}\n", |
|
"\\newcommand{\\luke}[1]{\\arkcomment{\\marker{L}{Z}}{#1}{cyan}}\n", |
|
"\\newcommand{\\revcolor}{red}\n", |
|
"\\newcommand{\\rev}[1]{{#1}}\n", |
|
"\\newcommand{\\term}[1]{\\textbf{#1}} \n", |
|
"\\newcommand{\\tabincell}[2]{\\begin{tabular}{@{}#1@{}}#2\\end{tabular}}\n", |
|
"\\newcommand{\\interalia}[1]{\\citep[\\emph{inter alia}]{#1}}\n", |
|
"\\newcommand{\\argmaxinline}[1]{\\operatorname{argmax}_{#1}}\n", |
|
"\\newcommand{\\argmininline}[1]{\\operatorname{argmin}_{#1}}\n", |
|
"\\newcommand{\\argmaxname}{\\operatorname{argmax}}\n", |
|
"\\newcommand{\\argminname}{\\operatorname{argmin}}\n", |
|
"\\newcommand{\\relu}{\\operatorname{ReLU}}\n", |
|
"\\newcommand{\\rulesep}{\\unskip\\ \\vrule\\ }\n", |
|
"\\newcommand{\\norm}[1]{\\left\\lVert#1\\right\\rVert}\n", |
|
"\\DeclareSymbolFont{extraup}{U}{zavm}{m}{n}\n", |
|
"\\DeclareMathSymbol{\\vardiamond}{\\mathalpha}{extraup}{87}\n", |
|
"\\newcolumntype{L}[1]{>{\\raggedright\\let\\newline\\\\\\arraybackslash\\hspace{0pt}}m{#1}}\n", |
|
"\\newcolumntype{C}[1]{>{\\centering\\let\\newline\\\\\\arraybackslash\\hspace{0pt}}m{#1}}\n", |
|
"\\newcolumntype{R}[1]{>{\\raggedleft\\let\\newline\\\\\\arraybackslash\\hspace{0pt}}m{#1}}\n", |
|
"\\newtheorem{theorem}{Theorem}\n", |
|
"\\newtheorem{lemma}[theorem]{Lemma}\n", |
|
"\\newtheorem{proposition}[theorem]{Proposition}\n", |
|
"\\newtheorem{corollary}[theorem]{Corollary}\n", |
|
"\\theoremstyle{definition}\n", |
|
"\\newtheorem{definition}[theorem]{Definition}\n", |
|
"\\newtheorem{example}[theorem]{Example}\n", |
|
"\\theoremstyle{remark}\n", |
|
"\\newtheorem{remark}[theorem]{Remark}\n", |
|
"\\newcommand*{\\QEDA}{\\hfill\\ensuremath{\\blacksquare}}\n", |
|
"\\newcommand*{\\QEDB}{\\hfill\\ensuremath{\\square}}\n", |
|
"\\algrenewcommand{\\algorithmiccomment}[1]{\\leavevmode$\\triangleright$ #1}\n", |
|
"\\newcommand{\\pd}[2]{\\frac{\\partial #1}{\\partial #2}}\n", |
|
"\\setul{1pt}{.4pt}\n", |
|
"\\DeclareCaptionFont{tiny}{\\tiny}\n", |
|
"\\usepackage[shortcuts]{extdash} \n", |
|
"\\usepackage{blindtext}\n", |
|
"\\usepackage{graphicx}\n", |
|
"\\usepackage{capt-of}\n", |
|
"\\usepackage{booktabs}\n", |
|
"\\usepackage{varwidth}\n", |
|
"\\newsavebox\\tmpbox\n", |
|
"\\usepackage{amsmath,amsfonts,bm}\n", |
|
"\\newcommand{\\figleft}{{\\em (Left)}}\n", |
|
"\\newcommand{\\figcenter}{{\\em (Center)}}\n", |
|
"\\newcommand{\\figright}{{\\em (Right)}}\n", |
|
"\\newcommand{\\figtop}{{\\em (Top)}}\n", |
|
"\\newcommand{\\figbottom}{{\\em (Bottom)}}\n", |
|
"\\newcommand{\\captiona}{{\\em (a)}}\n", |
|
"\\newcommand{\\captionb}{{\\em (b)}}\n", |
|
"\\newcommand{\\captionc}{{\\em (c)}}\n", |
|
"\\newcommand{\\captiond}{{\\em (d)}}\n", |
|
"\\newcommand{\\newterm}[1]{{\\bf #1}}\n", |
|
"\\def\\figref#1{figure~\\ref{#1}}\n", |
|
"\\def\\Figref#1{Figure~\\ref{#1}}\n", |
|
"\\def\\twofigref#1#2{figures \\ref{#1} and \\ref{#2}}\n", |
|
"\\def\\quadfigref#1#2#3#4{figures \\ref{#1}, \\ref{#2}, \\ref{#3} and \\ref{#4}}\n", |
|
"\\def\\secref#1{section~\\ref{#1}}\n", |
|
"\\def\\Secref#1{Section~\\ref{#1}}\n", |
|
"\\def\\twosecrefs#1#2{sections \\ref{#1} and \\ref{#2}}\n", |
|
"\\def\\secrefs#1#2#3{sections \\ref{#1}, \\ref{#2} and \\ref{#3}}\n", |
|
"\\def\\eqref#1{equation~\\ref{#1}}\n", |
|
"\\def\\Eqref#1{Equation~\\ref{#1}}\n", |
|
"\\def\\plaineqref#1{\\ref{#1}}\n", |
|
"\\def\\chapref#1{chapter~\\ref{#1}}\n", |
|
"\\def\\Chapref#1{Chapter~\\ref{#1}}\n", |
|
"\\def\\rangechapref#1#2{chapters\\ref{#1}--\\ref{#2}}\n", |
|
"\\def\\algref#1{algorithm~\\ref{#1}}\n", |
|
"\\def\\Algref#1{Algorithm~\\ref{#1}}\n", |
|
"\\def\\twoalgref#1#2{algorithms \\ref{#1} and \\ref{#2}}\n", |
|
"\\def\\Twoalgref#1#2{Algorithms \\ref{#1} and \\ref{#2}}\n", |
|
"\\def\\partref#1{part~\\ref{#1}}\n", |
|
"\\def\\Partref#1{Part~\\ref{#1}}\n", |
|
"\\def\\twopartref#1#2{parts \\ref{#1} and \\ref{#2}}\n", |
|
"\\def\\ceil#1{\\lceil #1 \\rceil}\n", |
|
"\\def\\floor#1{\\lfloor #1 \\rfloor}\n", |
|
"\\def\\1{\\bm{1}}\n", |
|
"\\newcommand{\\train}{\\mathcal{D}}\n", |
|
"\\newcommand{\\valid}{\\mathcal{D_{\\mathrm{valid}}}}\n", |
|
"\\newcommand{\\test}{\\mathcal{D_{\\mathrm{test}}}}\n", |
|
"\\def\\eps{{\\epsilon}}\n", |
|
"\\def\\reta{{\\textnormal{$\\eta$}}}\n", |
|
"\\def\\ra{{\\textnormal{a}}}\n", |
|
"\\def\\rb{{\\textnormal{b}}}\n", |
|
"\\def\\rc{{\\textnormal{c}}}\n", |
|
"\\def\\rd{{\\textnormal{d}}}\n", |
|
"\\def\\re{{\\textnormal{e}}}\n", |
|
"\\def\\rf{{\\textnormal{f}}}\n", |
|
"\\def\\rg{{\\textnormal{g}}}\n", |
|
"\\def\\rh{{\\textnormal{h}}}\n", |
|
"\\def\\ri{{\\textnormal{i}}}\n", |
|
"\\def\\rj{{\\textnormal{j}}}\n", |
|
"\\def\\rk{{\\textnormal{k}}}\n", |
|
"\\def\\rl{{\\textnormal{l}}}\n", |
|
"\\def\\rn{{\\textnormal{n}}}\n", |
|
"\\def\\ro{{\\textnormal{o}}}\n", |
|
"\\def\\rp{{\\textnormal{p}}}\n", |
|
"\\def\\rq{{\\textnormal{q}}}\n", |
|
"\\def\\rr{{\\textnormal{r}}}\n", |
|
"\\def\\rs{{\\textnormal{s}}}\n", |
|
"\\def\\rt{{\\textnormal{t}}}\n", |
|
"\\def\\ru{{\\textnormal{u}}}\n", |
|
"\\def\\rv{{\\textnormal{v}}}\n", |
|
"\\def\\rw{{\\textnormal{w}}}\n", |
|
"\\def\\rx{{\\textnormal{x}}}\n", |
|
"\\def\\ry{{\\textnormal{y}}}\n", |
|
"\\def\\rz{{\\textnormal{z}}}\n", |
|
"\\def\\rvepsilon{{\\mathbf{\\epsilon}}}\n", |
|
"\\def\\rvtheta{{\\mathbf{\\theta}}}\n", |
|
"\\def\\rva{{\\mathbf{a}}}\n", |
|
"\\def\\rvb{{\\mathbf{b}}}\n", |
|
"\\def\\rvc{{\\mathbf{c}}}\n", |
|
"\\def\\rvd{{\\mathbf{d}}}\n", |
|
"\\def\\rve{{\\mathbf{e}}}\n", |
|
"\\def\\rvf{{\\mathbf{f}}}\n", |
|
"\\def\\rvg{{\\mathbf{g}}}\n", |
|
"\\def\\rvh{{\\mathbf{h}}}\n", |
|
"\\def\\rvu{{\\mathbf{i}}}\n", |
|
"\\def\\rvj{{\\mathbf{j}}}\n", |
|
"\\def\\rvk{{\\mathbf{k}}}\n", |
|
"\\def\\rvl{{\\mathbf{l}}}\n", |
|
"\\def\\rvm{{\\mathbf{m}}}\n", |
|
"\\def\\rvn{{\\mathbf{n}}}\n", |
|
"\\def\\rvo{{\\mathbf{o}}}\n", |
|
"\\def\\rvp{{\\mathbf{p}}}\n", |
|
"\\def\\rvq{{\\mathbf{q}}}\n", |
|
"\\def\\rvr{{\\mathbf{r}}}\n", |
|
"\\def\\rvs{{\\mathbf{s}}}\n", |
|
"\\def\\rvt{{\\mathbf{t}}}\n", |
|
"\\def\\rvu{{\\mathbf{u}}}\n", |
|
"\\def\\rvv{{\\mathbf{v}}}\n", |
|
"\\def\\rvw{{\\mathbf{w}}}\n", |
|
"\\def\\rvx{{\\mathbf{x}}}\n", |
|
"\\def\\rvy{{\\mathbf{y}}}\n", |
|
"\\def\\rvz{{\\mathbf{z}}}\n", |
|
"\\def\\erva{{\\textnormal{a}}}\n", |
|
"\\def\\ervb{{\\textnormal{b}}}\n", |
|
"\\def\\ervc{{\\textnormal{c}}}\n", |
|
"\\def\\ervd{{\\textnormal{d}}}\n", |
|
"\\def\\erve{{\\textnormal{e}}}\n", |
|
"\\def\\ervf{{\\textnormal{f}}}\n", |
|
"\\def\\ervg{{\\textnormal{g}}}\n", |
|
"\\def\\ervh{{\\textnormal{h}}}\n", |
|
"\\def\\ervi{{\\textnormal{i}}}\n", |
|
"\\def\\ervj{{\\textnormal{j}}}\n", |
|
"\\def\\ervk{{\\textnormal{k}}}\n", |
|
"\\def\\ervl{{\\textnormal{l}}}\n", |
|
"\\def\\ervm{{\\textnormal{m}}}\n", |
|
"\\def\\ervn{{\\textnormal{n}}}\n", |
|
"\\def\\ervo{{\\textnormal{o}}}\n", |
|
"\\def\\ervp{{\\textnormal{p}}}\n", |
|
"\\def\\ervq{{\\textnormal{q}}}\n", |
|
"\\def\\ervr{{\\textnormal{r}}}\n", |
|
"\\def\\ervs{{\\textnormal{s}}}\n", |
|
"\\def\\ervt{{\\textnormal{t}}}\n", |
|
"\\def\\ervu{{\\textnormal{u}}}\n", |
|
"\\def\\ervv{{\\textnormal{v}}}\n", |
|
"\\def\\ervw{{\\textnormal{w}}}\n", |
|
"\\def\\ervx{{\\textnormal{x}}}\n", |
|
"\\def\\ervy{{\\textnormal{y}}}\n", |
|
"\\def\\ervz{{\\textnormal{z}}}\n", |
|
"\\def\\rmA{{\\mathbf{A}}}\n", |
|
"\\def\\rmB{{\\mathbf{B}}}\n", |
|
"\\def\\rmC{{\\mathbf{C}}}\n", |
|
"\\def\\rmD{{\\mathbf{D}}}\n", |
|
"\\def\\rmE{{\\mathbf{E}}}\n", |
|
"\\def\\rmF{{\\mathbf{F}}}\n", |
|
"\\def\\rmG{{\\mathbf{G}}}\n", |
|
"\\def\\rmH{{\\mathbf{H}}}\n", |
|
"\\def\\rmI{{\\mathbf{I}}}\n", |
|
"\\def\\rmJ{{\\mathbf{J}}}\n", |
|
"\\def\\rmK{{\\mathbf{K}}}\n", |
|
"\\def\\rmL{{\\mathbf{L}}}\n", |
|
"\\def\\rmM{{\\mathbf{M}}}\n", |
|
"\\def\\rmN{{\\mathbf{N}}}\n", |
|
"\\def\\rmO{{\\mathbf{O}}}\n", |
|
"\\def\\rmP{{\\mathbf{P}}}\n", |
|
"\\def\\rmQ{{\\mathbf{Q}}}\n", |
|
"\\def\\rmR{{\\mathbf{R}}}\n", |
|
"\\def\\rmS{{\\mathbf{S}}}\n", |
|
"\\def\\rmT{{\\mathbf{T}}}\n", |
|
"\\def\\rmU{{\\mathbf{U}}}\n", |
|
"\\def\\rmV{{\\mathbf{V}}}\n", |
|
"\\def\\rmW{{\\mathbf{W}}}\n", |
|
"\\def\\rmX{{\\mathbf{X}}}\n", |
|
"\\def\\rmY{{\\mathbf{Y}}}\n", |
|
"\\def\\rmZ{{\\mathbf{Z}}}\n", |
|
"\\def\\ermA{{\\textnormal{A}}}\n", |
|
"\\def\\ermB{{\\textnormal{B}}}\n", |
|
"\\def\\ermC{{\\textnormal{C}}}\n", |
|
"\\def\\ermD{{\\textnormal{D}}}\n", |
|
"\\def\\ermE{{\\textnormal{E}}}\n", |
|
"\\def\\ermF{{\\textnormal{F}}}\n", |
|
"\\def\\ermG{{\\textnormal{G}}}\n", |
|
"\\def\\ermH{{\\textnormal{H}}}\n", |
|
"\\def\\ermI{{\\textnormal{I}}}\n", |
|
"\\def\\ermJ{{\\textnormal{J}}}\n", |
|
"\\def\\ermK{{\\textnormal{K}}}\n", |
|
"\\def\\ermL{{\\textnormal{L}}}\n", |
|
"\\def\\ermM{{\\textnormal{M}}}\n", |
|
"\\def\\ermN{{\\textnormal{N}}}\n", |
|
"\\def\\ermO{{\\textnormal{O}}}\n", |
|
"\\def\\ermP{{\\textnormal{P}}}\n", |
|
"\\def\\ermQ{{\\textnormal{Q}}}\n", |
|
"\\def\\ermR{{\\textnormal{R}}}\n", |
|
"\\def\\ermS{{\\textnormal{S}}}\n", |
|
"\\def\\ermT{{\\textnormal{T}}}\n", |
|
"\\def\\ermU{{\\textnormal{U}}}\n", |
|
"\\def\\ermV{{\\textnormal{V}}}\n", |
|
"\\def\\ermW{{\\textnormal{W}}}\n", |
|
"\\def\\ermX{{\\textnormal{X}}}\n", |
|
"\\def\\ermY{{\\textnormal{Y}}}\n", |
|
"\\def\\ermZ{{\\textnormal{Z}}}\n", |
|
"\\def\\vzero{{\\mathbf{0}}}\n", |
|
"\\def\\vone{{\\mathbf{1}}}\n", |
|
"\\def\\vmu{{\\bm{\\mu}}}\n", |
|
"\\def\\vtheta{{\\bm{\\theta}}}\n", |
|
"\\def\\va{{\\mathbf{a}}}\n", |
|
"\\def\\vb{{\\mathbf{b}}}\n", |
|
"\\def\\vc{{\\mathbf{c}}}\n", |
|
"\\def\\vd{{\\mathbf{d}}}\n", |
|
"\\def\\ve{{\\mathbf{e}}}\n", |
|
"\\def\\vf{{\\mathbf{f}}}\n", |
|
"\\def\\vg{{\\mathbf{g}}}\n", |
|
"\\def\\vh{{\\mathbf{h}}}\n", |
|
"\\def\\vi{{\\mathbf{i}}}\n", |
|
"\\def\\vj{{\\mathbf{j}}}\n", |
|
"\\def\\vk{{\\mathbf{k}}}\n", |
|
"\\def\\vl{{\\mathbf{l}}}\n", |
|
"\\def\\vm{{\\mathbf{m}}}\n", |
|
"\\def\\vn{{\\mathbf{n}}}\n", |
|
"\\def\\vo{{\\mathbf{o}}}\n", |
|
"\\def\\vp{{\\mathbf{p}}}\n", |
|
"\\def\\vq{{\\mathbf{q}}}\n", |
|
"\\def\\vr{{\\mathbf{r}}}\n", |
|
"\\def\\vs{{\\mathbf{s}}}\n", |
|
"\\def\\vt{{\\mathbf{t}}}\n", |
|
"\\def\\vu{{\\mathbf{u}}}\n", |
|
"\\def\\vv{{\\mathbf{v}}}\n", |
|
"\\def\\vw{{\\mathbf{w}}}\n", |
|
"\\def\\vx{{\\mathbf{x}}}\n", |
|
"\\def\\vy{{\\mathbf{y}}}\n", |
|
"\\def\\vz{{\\mathbf{z}}}\n", |
|
"\\def\\vphi{{\\boldsymbol{\\phi}}}\n", |
|
"\\def\\valpha{{\\boldsymbol{\\alpha}}}\n", |
|
"\\def\\vsigma{{\\boldsymbol{\\sigma}}}\n", |
|
"\\def\\vexp{{\\boldsymbol{\\exp{}}}}\n", |
|
"\\def\\evalpha{{\\alpha}}\n", |
|
"\\def\\evbeta{{\\beta}}\n", |
|
"\\def\\evepsilon{{\\epsilon}}\n", |
|
"\\def\\evlambda{{\\lambda}}\n", |
|
"\\def\\evomega{{\\omega}}\n", |
|
"\\def\\evmu{{\\mu}}\n", |
|
"\\def\\evpsi{{\\psi}}\n", |
|
"\\def\\evsigma{{\\sigma}}\n", |
|
"\\def\\evtheta{{\\theta}}\n", |
|
"\\def\\eva{{a}}\n", |
|
"\\def\\evb{{b}}\n", |
|
"\\def\\evc{{c}}\n", |
|
"\\def\\evd{{d}}\n", |
|
"\\def\\eve{{e}}\n", |
|
"\\def\\evf{{f}}\n", |
|
"\\def\\evg{{g}}\n", |
|
"\\def\\evh{{h}}\n", |
|
"\\def\\evi{{i}}\n", |
|
"\\def\\evj{{j}}\n", |
|
"\\def\\evk{{k}}\n", |
|
"\\def\\evl{{l}}\n", |
|
"\\def\\evm{{m}}\n", |
|
"\\def\\evn{{n}}\n", |
|
"\\def\\evo{{o}}\n", |
|
"\\def\\evp{{p}}\n", |
|
"\\def\\evq{{q}}\n", |
|
"\\def\\evr{{r}}\n", |
|
"\\def\\evs{{s}}\n", |
|
"\\def\\evt{{t}}\n", |
|
"\\def\\evu{{u}}\n", |
|
"\\def\\evv{{v}}\n", |
|
"\\def\\evw{{w}}\n", |
|
"\\def\\evx{{x}}\n", |
|
"\\def\\evy{{y}}\n", |
|
"\\def\\evz{{z}}\n", |
|
"\\def\\mA{{\\mathbf{A}}}\n", |
|
"\\def\\mB{{\\mathbf{B}}}\n", |
|
"\\def\\mC{{\\mathbf{C}}}\n", |
|
"\\def\\mD{{\\mathbf{D}}}\n", |
|
"\\def\\mE{{\\mathbf{E}}}\n", |
|
"\\def\\mF{{\\mathbf{F}}}\n", |
|
"\\def\\mG{{\\mathbf{G}}}\n", |
|
"\\def\\mH{{\\mathbf{H}}}\n", |
|
"\\def\\mI{{\\mathbf{I}}}\n", |
|
"\\def\\mJ{{\\mathbf{J}}}\n", |
|
"\\def\\mK{{\\mathbf{K}}}\n", |
|
"\\def\\mL{{\\mathbf{L}}}\n", |
|
"\\def\\mM{{\\mathbf{M}}}\n", |
|
"\\def\\mN{{\\mathbf{N}}}\n", |
|
"\\def\\mO{{\\mathbf{O}}}\n", |
|
"\\def\\mP{{\\mathbf{P}}}\n", |
|
"\\def\\mQ{{\\mathbf{Q}}}\n", |
|
"\\def\\mR{{\\mathbf{R}}}\n", |
|
"\\def\\mS{{\\mathbf{S}}}\n", |
|
"\\def\\mT{{\\mathbf{T}}}\n", |
|
"\\def\\mU{{\\mathbf{U}}}\n", |
|
"\\def\\mV{{\\mathbf{V}}}\n", |
|
"\\def\\mW{{\\mathbf{W}}}\n", |
|
"\\def\\mX{{\\mathbf{X}}}\n", |
|
"\\def\\mY{{\\mathbf{Y}}}\n", |
|
"\\def\\mZ{{\\mathbf{Z}}}\n", |
|
"\\def\\mBeta{{\\bm{\\beta}}}\n", |
|
"\\def\\mPhi{{\\bm{\\Phi}}}\n", |
|
"\\def\\mLambda{{\\bm{\\Lambda}}}\n", |
|
"\\def\\mSigma{{\\bm{\\Sigma}}}\n", |
|
"\\DeclareMathAlphabet{\\mathsfit}{\\encodingdefault}{\\sfdefault}{m}{sl}\n", |
|
"\\SetMathAlphabet{\\mathsfit}{bold}{\\encodingdefault}{\\sfdefault}{bx}{n}\n", |
|
"\\newcommand{\\tens}[1]{\\bm{\\mathsfit{#1}}}\n", |
|
"\\def\\tA{{\\tens{A}}}\n", |
|
"\\def\\tB{{\\tens{B}}}\n", |
|
"\\def\\tC{{\\tens{C}}}\n", |
|
"\\def\\tD{{\\tens{D}}}\n", |
|
"\\def\\tE{{\\tens{E}}}\n", |
|
"\\def\\tF{{\\tens{F}}}\n", |
|
"\\def\\tG{{\\tens{G}}}\n", |
|
"\\def\\tH{{\\tens{H}}}\n", |
|
"\\def\\tI{{\\tens{I}}}\n", |
|
"\\def\\tJ{{\\tens{J}}}\n", |
|
"\\def\\tK{{\\tens{K}}}\n", |
|
"\\def\\tL{{\\tens{L}}}\n", |
|
"\\def\\tM{{\\tens{M}}}\n", |
|
"\\def\\tN{{\\tens{N}}}\n", |
|
"\\def\\tO{{\\tens{O}}}\n", |
|
"\\def\\tP{{\\tens{P}}}\n", |
|
"\\def\\tQ{{\\tens{Q}}}\n", |
|
"\\def\\tR{{\\tens{R}}}\n", |
|
"\\def\\tS{{\\tens{S}}}\n", |
|
"\\def\\tT{{\\tens{T}}}\n", |
|
"\\def\\tU{{\\tens{U}}}\n", |
|
"\\def\\tV{{\\tens{V}}}\n", |
|
"\\def\\tW{{\\tens{W}}}\n", |
|
"\\def\\tX{{\\tens{X}}}\n", |
|
"\\def\\tY{{\\tens{Y}}}\n", |
|
"\\def\\tZ{{\\tens{Z}}}\n", |
|
"\\def\\gA{{\\mathcal{A}}}\n", |
|
"\\def\\gB{{\\mathcal{B}}}\n", |
|
"\\def\\gC{{\\mathcal{C}}}\n", |
|
"\\def\\gD{{\\mathcal{D}}}\n", |
|
"\\def\\gE{{\\mathcal{E}}}\n", |
|
"\\def\\gF{{\\mathcal{F}}}\n", |
|
"\\def\\gG{{\\mathcal{G}}}\n", |
|
"\\def\\gH{{\\mathcal{H}}}\n", |
|
"\\def\\gI{{\\mathcal{I}}}\n", |
|
"\\def\\gJ{{\\mathcal{J}}}\n", |
|
"\\def\\gK{{\\mathcal{K}}}\n", |
|
"\\def\\gL{{\\mathcal{L}}}\n", |
|
"\\def\\gM{{\\mathcal{M}}}\n", |
|
"\\def\\gN{{\\mathcal{N}}}\n", |
|
"\\def\\gO{{\\mathcal{O}}}\n", |
|
"\\def\\gP{{\\mathcal{P}}}\n", |
|
"\\def\\gQ{{\\mathcal{Q}}}\n", |
|
"\\def\\gR{{\\mathcal{R}}}\n", |
|
"\\def\\gS{{\\mathcal{S}}}\n", |
|
"\\def\\gT{{\\mathcal{T}}}\n", |
|
"\\def\\gU{{\\mathcal{U}}}\n", |
|
"\\def\\gV{{\\mathcal{V}}}\n", |
|
"\\def\\gW{{\\mathcal{W}}}\n", |
|
"\\def\\gX{{\\mathcal{X}}}\n", |
|
"\\def\\gY{{\\mathcal{Y}}}\n", |
|
"\\def\\gZ{{\\mathcal{Z}}}\n", |
|
"\\def\\sA{{\\mathbb{A}}}\n", |
|
"\\def\\sB{{\\mathbb{B}}}\n", |
|
"\\def\\sC{{\\mathbb{C}}}\n", |
|
"\\def\\sD{{\\mathbb{D}}}\n", |
|
"\\def\\sF{{\\mathbb{F}}}\n", |
|
"\\def\\sG{{\\mathbb{G}}}\n", |
|
"\\def\\sH{{\\mathbb{H}}}\n", |
|
"\\def\\sI{{\\mathbb{I}}}\n", |
|
"\\def\\sJ{{\\mathbb{J}}}\n", |
|
"\\def\\sK{{\\mathbb{K}}}\n", |
|
"\\def\\sL{{\\mathbb{L}}}\n", |
|
"\\def\\sM{{\\mathbb{M}}}\n", |
|
"\\def\\sN{{\\mathbb{N}}}\n", |
|
"\\def\\sO{{\\mathbb{O}}}\n", |
|
"\\def\\sP{{\\mathbb{P}}}\n", |
|
"\\def\\sQ{{\\mathbb{Q}}}\n", |
|
"\\def\\sR{{\\mathbb{R}}}\n", |
|
"\\def\\sS{{\\mathbb{S}}}\n", |
|
"\\def\\sT{{\\mathbb{T}}}\n", |
|
"\\def\\sU{{\\mathbb{U}}}\n", |
|
"\\def\\sV{{\\mathbb{V}}}\n", |
|
"\\def\\sW{{\\mathbb{W}}}\n", |
|
"\\def\\sX{{\\mathbb{X}}}\n", |
|
"\\def\\sY{{\\mathbb{Y}}}\n", |
|
"\\def\\sZ{{\\mathbb{Z}}}\n", |
|
"\\def\\emLambda{{\\Lambda}}\n", |
|
"\\def\\emA{{A}}\n", |
|
"\\def\\emB{{B}}\n", |
|
"\\def\\emC{{C}}\n", |
|
"\\def\\emD{{D}}\n", |
|
"\\def\\emE{{E}}\n", |
|
"\\def\\emF{{F}}\n", |
|
"\\def\\emG{{G}}\n", |
|
"\\def\\emH{{H}}\n", |
|
"\\def\\emI{{I}}\n", |
|
"\\def\\emJ{{J}}\n", |
|
"\\def\\emK{{K}}\n", |
|
"\\def\\emL{{L}}\n", |
|
"\\def\\emM{{M}}\n", |
|
"\\def\\emN{{N}}\n", |
|
"\\def\\emO{{O}}\n", |
|
"\\def\\emP{{P}}\n", |
|
"\\def\\emQ{{Q}}\n", |
|
"\\def\\emR{{R}}\n", |
|
"\\def\\emS{{S}}\n", |
|
"\\def\\emT{{T}}\n", |
|
"\\def\\emU{{U}}\n", |
|
"\\def\\emV{{V}}\n", |
|
"\\def\\emW{{W}}\n", |
|
"\\def\\emX{{X}}\n", |
|
"\\def\\emY{{Y}}\n", |
|
"\\def\\emZ{{Z}}\n", |
|
"\\def\\emSigma{{\\Sigma}}\n", |
|
"\\newcommand{\\etens}[1]{\\mathsfit{#1}}\n", |
|
"\\def\\etLambda{{\\etens{\\Lambda}}}\n", |
|
"\\def\\etA{{\\etens{A}}}\n", |
|
"\\def\\etB{{\\etens{B}}}\n", |
|
"\\def\\etC{{\\etens{C}}}\n", |
|
"\\def\\etD{{\\etens{D}}}\n", |
|
"\\def\\etE{{\\etens{E}}}\n", |
|
"\\def\\etF{{\\etens{F}}}\n", |
|
"\\def\\etG{{\\etens{G}}}\n", |
|
"\\def\\etH{{\\etens{H}}}\n", |
|
"\\def\\etI{{\\etens{I}}}\n", |
|
"\\def\\etJ{{\\etens{J}}}\n", |
|
"\\def\\etK{{\\etens{K}}}\n", |
|
"\\def\\etL{{\\etens{L}}}\n", |
|
"\\def\\etM{{\\etens{M}}}\n", |
|
"\\def\\etN{{\\etens{N}}}\n", |
|
"\\def\\etO{{\\etens{O}}}\n", |
|
"\\def\\etP{{\\etens{P}}}\n", |
|
"\\def\\etQ{{\\etens{Q}}}\n", |
|
"\\def\\etR{{\\etens{R}}}\n", |
|
"\\def\\etS{{\\etens{S}}}\n", |
|
"\\def\\etT{{\\etens{T}}}\n", |
|
"\\def\\etU{{\\etens{U}}}\n", |
|
"\\def\\etV{{\\etens{V}}}\n", |
|
"\\def\\etW{{\\etens{W}}}\n", |
|
"\\def\\etX{{\\etens{X}}}\n", |
|
"\\def\\etY{{\\etens{Y}}}\n", |
|
"\\def\\etZ{{\\etens{Z}}}\n", |
|
"\\newcommand{\\pdata}{p_{\\rm{data}}}\n", |
|
"\\newcommand{\\ptrain}{\\hat{p}_{\\rm{data}}}\n", |
|
"\\newcommand{\\Ptrain}{\\hat{P}_{\\rm{data}}}\n", |
|
"\\newcommand{\\pmodel}{p_{\\rm{model}}}\n", |
|
"\\newcommand{\\Pmodel}{P_{\\rm{model}}}\n", |
|
"\\newcommand{\\ptildemodel}{\\tilde{p}_{\\rm{model}}}\n", |
|
"\\newcommand{\\pencode}{p_{\\rm{encoder}}}\n", |
|
"\\newcommand{\\pdecode}{p_{\\rm{decoder}}}\n", |
|
"\\newcommand{\\precons}{p_{\\rm{reconstruct}}}\n", |
|
"\\newcommand{\\laplace}{\\mathrm{Laplace}} \n", |
|
"\\newcommand{\\E}{\\mathbb{E}}\n", |
|
"\\newcommand{\\Ls}{\\mathcal{L}}\n", |
|
"\\newcommand{\\R}{\\mathbb{R}}\n", |
|
"\\newcommand{\\emp}{\\tilde{p}}\n", |
|
"\\newcommand{\\lr}{\\alpha}\n", |
|
"\\newcommand{\\reg}{\\lambda}\n", |
|
"\\newcommand{\\rect}{\\mathrm{rectifier}}\n", |
|
"\\newcommand{\\softmax}{\\operatorname{softmax}}\n", |
|
"\\newcommand{\\mlp}{\\operatorname{MLP}}\n", |
|
"\\newcommand{\\attn}{\\operatorname{attn}}\n", |
|
"\\newcommand{\\sigmoid}{\\sigma}\n", |
|
"\\newcommand{\\softplus}{\\zeta}\n", |
|
"\\newcommand{\\KL}{D_{\\mathrm{KL}}}\n", |
|
"\\newcommand{\\Var}{\\mathrm{Var}}\n", |
|
"\\newcommand{\\standarderror}{\\mathrm{SE}}\n", |
|
"\\newcommand{\\Cov}{\\mathrm{Cov}}\n", |
|
"\\newcommand{\\normlzero}{L^0}\n", |
|
"\\newcommand{\\normlone}{L^1}\n", |
|
"\\newcommand{\\normltwo}{L^2}\n", |
|
"\\newcommand{\\normlp}{L^p}\n", |
|
"\\newcommand{\\normmax}{L^\\infty}\n", |
|
"\\newcommand{\\parents}{Pa} \n", |
|
"\\DeclareMathOperator*{\\argmax}{arg\\,max}\n", |
|
"\\DeclareMathOperator*{\\argmin}{arg\\,min}\n", |
|
"\\DeclareMathOperator{\\sign}{sign}\n", |
|
"\\DeclareMathOperator{\\Tr}{Tr}\n", |
|
"\\let\\ab\\allowbreak\n", |
|
"\\newcommand{\\fix}{\\marginpar{FIX}}\n", |
|
"\\newcommand{\\new}{\\marginpar{NEW}}\n", |
|
"\\newcommand{\\model}{\\textsc{Abc}\\xspace}\n", |
|
"\\newcommand{\\modelmlp}{\\textsc{Abc}$_\\text{MLP}$\\xspace}\n", |
|
"\\newcommand{\\modelexp}{\\textsc{Abc}$_{\\exp}$\\xspace}\n", |
|
"\\newcommand{\\modelrandom}{\\textsc{Abc}$_{\\text{RD}}$\\xspace}\n", |
|
"\\newcommand{\\modelwindow}{\\textsc{Abc}$_{\\text{WD}}$\\xspace}\n", |
|
"\\newcommand{\\modelcluster}{\\textsc{Abc}$_{\\text{CL}}$\\xspace}\n", |
|
"\\newcommand{\\STAB}[1]{\\begin{tabular}{@{}c@{}}#1\\end{tabular}}\n", |
|
"\\newcommand{\\resolved}[1]{}\n", |
|
"\\newcommand{\\base}[0]{\\textsc{Base}\\xspace}\n", |
|
"\\newcommand{\\com}[1]{}\n", |
|
"\\newcommand{\\firststep}{selective annotation\\xspace}\n", |
|
"\\newcommand{\\FirstStep}{Selective Annotation\\xspace}\n", |
|
"\\newcommand{\\Firststep}{Selective annotation\\xspace}\n", |
|
"\\newcommand{\\votek}{vote-$k$\\xspace}\n", |
|
"\\newcommand{\\Votek}{Vote-$k$\\xspace}\n", |
|
"\\usepackage{xspace,mfirstuc,tabulary}\n", |
|
"\\definecolor{lightgray}{gray}{0.9}\n", |
|
"\\colorlet{soulgreen}{green!30}\n", |
|
"\\definecolor{red}{HTML}{FF0000}\n", |
|
"\\definecolor{blue}{HTML}{0000FF}\n", |
|
"\\definecolor{darkgreen}{HTML}{228B22}\n", |
|
"\\definecolor{dblue}{HTML}{007FFF}\n", |
|
"\\usepackage{pifont}\n", |
|
"\\newcommand{\\xmark}{\\textcolor{red}{\\ding{55}}}\n", |
|
"\\newcommand{\\cmark}{\\textcolor{darkgreen}{\\ding{51}}}\n", |
|
"\\usepackage{listings}\n", |
|
"\\definecolor{mymauve}{rgb}{0.58,0,0.82}\n", |
|
"\\lstset{ \n", |
|
" backgroundcolor=\\color{white}, \n", |
|
" basicstyle=\\footnotesize\\ttfamily, \n", |
|
" breakatwhitespace=false, \n", |
|
" breaklines=true, \n", |
|
" captionpos=b, \n", |
|
" commentstyle=\\color{mygreen}, \n", |
|
" deletekeywords={...}, \n", |
|
" escapeinside={\\\n", |
|
" extendedchars=true, \n", |
|
" firstnumber=1000, \n", |
|
" keepspaces=true, \n", |
|
" keywordstyle=\\color{blue}, \n", |
|
" language=Octave, \n", |
|
" morekeywords={*,...}, \n", |
|
" showspaces=false, \n", |
|
" showstringspaces=false, \n", |
|
" showtabs=false, \n", |
|
" stepnumber=2, \n", |
|
" stringstyle=\\color{mymauve}, \n", |
|
" tabsize=2,\t \n", |
|
" title=\\lstname, \n", |
|
" escapeinside={(*@}{@*)}\n", |
|
"}\n", |
|
"\\title{\n", |
|
"\\FirstStep Makes Language Models Better Few-Shot Learners\n", |
|
"}\n", |
|
"\\author{\\textbf{Hongjin Su}$^\\spadesuit$ \\ \\ \n", |
|
" \\textbf{Jungo Kasai}$^{\\clubsuit\\diamondsuit}$ \\ \\ \n", |
|
" \\textbf{Chen Henry Wu}$^{\\heartsuit}$ \\ \\ \n", |
|
" \\textbf{Weijia Shi}$^\\clubsuit$ \\ \\ \n", |
|
" \\textbf{Tianlu Wang}$^{\\vardiamond}$ \\ \\\n", |
|
" \\textbf{Jiayi Xin}$^{\\spadesuit}$\\\\\n", |
|
" \\textbf{Rui Zhang}$^{\\bigstar}$\\ \\ \n", |
|
" \\textbf{Mari Ostendorf}$^{\\clubsuit}$\n", |
|
" \\ \\ \n", |
|
" \\textbf{Luke Zettlemoyer}$^{\\clubsuit\\vardiamond}$\n", |
|
" \\ \\ \n", |
|
" \\textbf{Noah A.\\ Smith}$^{\\clubsuit\\diamondsuit}$\\ \\ \n", |
|
" \\textbf{Tao Yu}$^{\\spadesuit\\clubsuit}$\n", |
|
" \\\\ \n", |
|
" $^\\spadesuit$The University of Hong Kong \n", |
|
" \\quad\n", |
|
" $^\\clubsuit$University of Washington\n", |
|
" \\quad\n", |
|
" $^\\diamondsuit$Allen Institute for AI\n", |
|
" \\\\\n", |
|
" $^\\heartsuit$Carnegie Mellon University \\quad\n", |
|
" $^\\bigstar$Penn State University \n", |
|
" \\quad \n", |
|
" $^\\vardiamond$Meta AI\\\\\n", |
|
" \\\\\n", |
|
" {\\tt \\{hjsu,tyu\\}@cs.hku.hk,}\\ \\ {\\tt [email protected],} \\ \\ {\\tt [email protected]}\\\\\n", |
|
" {\\tt \\{jkasai,swj0419,lsz,nasmith\\}@cs.washington.edu}\n", |
|
"}\n", |
|
"\\iclrfinalcopy \n", |
|
"\\begin{document}\n", |
|
"\\maketitle\n", |
|
"\\setlength{\\abovedisplayskip}{2pt}\n", |
|
"\\setlength{\\belowdisplayskip}{2pt}\n", |
|
"\\vspace{-0.5cm}\n", |
|
"\\begin{abstract}\n", |
|
"\\vspace{-0.1cm}\n", |
|
"Many recent approaches to natural language tasks are built on the remarkable abilities of large language models.\n", |
|
"Large language models can perform in-context learning, where they learn a new task from a few task demonstrations, without any parameter updates.\n", |
|
"This work examines the implications of in-context learning for the creation of datasets for new natural language tasks.\n", |
|
"On average, \\votek achieves a 12.9\\\n", |
|
"We hope that our studies will serve as a basis for data annotations as large language models are increasingly applied to new tasks.\\footnote{Our code is available at \\url{https://github.com/HKUNLP/icl-selective-annotation}}\n", |
|
"\\begin{figure}[h!]\n", |
|
"\\vspace{3mm}\n", |
|
"\\centering\n", |
|
" \\includegraphics[width=0.98\\textwidth]{images/pipeline-avg-main-results-v5.pdf}\n", |
|
"\\caption{\n", |
|
"\\textbf{Left}: Our two-step framework for in-context learning. \n", |
|
"Here we experiment with GPT-J and Codex-davinci-002.\n", |
|
"}\n", |
|
"\\label{fig:pipeline-avg-main-results}\n", |
|
"\\end{figure}\n", |
|
"\\end{abstract}\n", |
|
"\\section{Introduction}\n", |
|
"Much recent work builds approaches to natural language tasks on the impressive abilities of large language models (e.g., GPT-3; \\citealp{gpt3}).\n", |
|
"Large language models can perform downstream tasks by conditioning generation on a few task demonstrations, thereby avoiding the need for any parameter updates.\n", |
|
"This new, few-shot learning paradigm is called \\textit{in-context learning} and has become an attractive alternative to supervised finetuning \\citep{prompt_survey}. \n", |
|
"In this work, we study the implications of this remarkable capability of large language models for dataset creation and annotation.\n", |
|
"Although in-context learning was originally proposed for few-shot learning, recent works show that retrieving prompts from a large set of annotated examples is necessary to achieve good performances \\citep{liu-etal-2022-makes,rubin2022}. \n", |
|
"Each test sample only requires a few in-context examples in its prompt.\n", |
|
"The total annotation budget is the number of examples selected and annotated in the first step.\n", |
|
"achieves a 11.4\\\n", |
|
"It also outperforms strong finetuning methods by a large margin (Fig.\\ \\ref{fig:icl-vs-ft}) \n", |
|
"These results suggest that large language models do not require large annotated datasets (e.g., 10K) due to their ability to adapt to new tasks through simple prompting.\n", |
|
"In real-world scenarios, even collecting \\textit{unlabeled} data is non-trivial and introduces randomness.\n", |
|
"(Tab.~\\ref{tab:main_results}).\n", |
|
"(\\S\\ref{subsec:sample-selection-methods}).\n", |
|
"As in-context learning has been applied to increasingly more natural language processing applications, we hope that our annotation-efficient framework will provide useful guidance for both researchers and practitioners.\n", |
|
"\\section{\\FirstStep for In-Context Learning}\n", |
|
"\\label{sec:framework}\n", |
|
"In-context learning only requires a few annotated examples per test instance (\\emph{few-shot learning}), while avoiding expensive finetuning on the whole training data.\n", |
|
"It is, however, often assumed that all \\emph{annotated} training data are available for prompt retrieval (e.g., \\citealp{liu-etal-2022-makes,rubin2022}).\n", |
|
"Yet the implied total annotation costs are hardly discussed in previous work.\n", |
|
"We develop a better practice for few-shot learning with large language models by carefully studying the total annotation cost required for in-context learning.\n", |
|
"We formulate a general framework (Fig.\\ \\ref{fig:pipeline-avg-main-results} left) that consists of two steps: \\firststep (\\S\\ref{sec:sample_selection}) and prompt retrieval (\\S\\ref{sec:prompt_retrieval}).\n", |
|
"\\subsection{\\FirstStep}\n", |
|
"\\label{sec:sample_selection}\n", |
|
"The first step chooses examples to annotate \\emph{before} test time.\n", |
|
"This process thus determines the total annotation budget.\n", |
|
"This \\firststep process is largely ignored in the recent literature for in-context learning.\n", |
|
"\\paragraph{\\Votek}\n", |
|
"\\label{sec:sample_selection_method}\n", |
|
"The goal of \\firststep for in-context learning is to select diverse and representative examples; \n", |
|
"representativeness will help many test instances to find similar demonstrations, while diversity increases the total coverage.\n", |
|
"We develop \\votek, a graph-based method that promotes both diversity and representativeness.\n", |
|
"A detailed algorithm can be found in Appendix~\\ref{sec:details-sample-selection}. \n", |
|
"We first compute a vector representation for each \\emph{unlabeled} training instance using Sentence-BERT \\citep{reimers-gurevych-2019-sentence} by averaging the resulting vectors over the text input words.\\footnote{\\url{https://huggingface.co/sentence-transformers/all-mpnet-base-v2}.}\n", |
|
"We then use the embedding vectors to create a directed \n", |
|
"graph $G = (V, E)$ where the vertices $V$ are the unlabeled instances $\\mathcal{X}$ \n", |
|
"as defined above.\n", |
|
"For each vertex $v \\in V$, we create an edge to its $k$ nearest vertices in terms of the cosine similarity between the embeddings.\n", |
|
" Now let $\\mathcal{L}$ and $\\mathcal{U}$ denote the sets of already chosen (i.e., labeled) samples and remaining samples, respectively. \n", |
|
" Initially, $\\mathcal{L}=\\emptyset$.\n", |
|
" Every vertex $u \\in \\mathcal{U}$ is scored by a modified degree: \n", |
|
" \\begin{align*}\n", |
|
" \\mathrm{score}(u) = \\sum_{v \\in \\{v | (v, u) \\in E, v \\in \\mathcal{U}\\}} s (v), \\quad \\text{where} \\ s(v) = \\rho ^{- |\\{\\ell \\in \\mathcal{L}| (v, \\ell) \\in E \\}|}, \\quad \\rho > 1\n", |
|
" \\end{align*}\n", |
|
"where $s$ discounts $v$ that is close to the already selected instances, thereby encouraging diversity.\n", |
|
"In every iteration, we take $\\argmax_{u \\in \\mathcal{U}} \\mathrm{score}(u)$ and move it from $\\mathcal{U}$ to $\\mathcal{L}$.\n", |
|
"Subsequently, we use $\\mathcal{L}$ as the in-context learning examples for large language model, e.g.,GPT-J \\citep{gpt-j}, and generate a prediction for every instance in $\\mathcal{U}$.\n", |
|
"We then compute the average log probability over the generation output as the model's confidence score (Line~\\ref{line:lm-start} to Line~\\ref{line:lm-ends} in Algorithm~\\ref{alg:vote-k}).\n", |
|
"We then partition $\\mathcal{U}$ into $M$ equal-sized buckets, based on their confidence scores (e.g., if $M=100$, we group the unlabeled instances by percentile).\n", |
|
"\\subsection{Prompt Retrieval}\n", |
|
"\\label{sec:prompt_retrieval}\n", |
|
"Following recent work~\\citep{liu-etal-2022-makes}, we will compute embeddings for all annotated samples using Sentence-BERT and find the most similar examples to each test instance in terms of cosine similarity.\n" |
|
], |
|
"output": { |
|
"What experiments do you suggest doing?": [ |
|
"1. Datasets and tasks: The authors should conduct the experiments on diverse datasets and distinct tasks. For example, task types can span across text classification, multiple choice, dialogue and text generation.", |
|
"2. Measuring stability: The authors should perform selective annotation from the instances that are randomly subsampled from the original training data for each task. For each experiment, the authors should repeat the subsampling multiple times, and average the results over the different trials.", |
|
"3. Methods performance comparison across different datasets: The authors should apply their proposed vote-k selective annotation methods with in-context learning across different datasets. One potential baseline could be randomly-selected method. The author should also run the experiments under the settings with different annotation budgets.", |
|
"4. The influence of selective annotation on in-context learning and finetuning: The authors should make a comparison between the in-context learning and finetuning paradigms over varying annotation budgets on different tasks. The annotation budgets should range from dozens to hundreds. The authors should report both in-context learning and finetuning with two distinct selective methods: random and the proposed vote-k.", |
|
"5. Performance of various model sizes using different selective annotation methods: The author should report performance with varying sizes of language models using random and the proposed vote-k selective annotation methods.", |
|
"6. Effects of domain shift on performance gain: The authors should compare the performance gap between random and vote-k selective annotation methods under two situations: with or without domain shifts.", |
|
"7. The influence of random and similar prompt retrieval for different selective methods: The authors should apply both similar and random retrieval on random and vote-k selective methods and compare their performance.", |
|
"8. Comparisons of various selective annotation methods: The authors should compare their proposed vote-k methods with other selective annotation methods such as Maximizing facility location and Least-confidence methods." |
|
], |
|
"Why do you suggest these experiments?": [ |
|
"1. To demonstrate that the proposed method is effective in various scenarios.", |
|
"2. Given a set of unlabeled data, the proposed vote-k selective annotation algorithm is deterministic, without any randomness. However, in real scenarios, even getting unlabeled samples is not trivial, and getting unlabeled samples can be a process with large variance. This experiment is to show that the proposed vote-k can improve stability.", |
|
"3. To demonstrate that the proposed vote-k method is more effective on improving in-context learning performance.", |
|
"4. To better understand how context learning compares with finetuning under limited annotation budgets.", |
|
"5. To show that the proposed vote-k can consistently improve performance over varying model sizes thus proving its versatility.", |
|
"6. To demonstrate that selective annotation and prompt retrieval are particularly crucial when there is a domain shift in the evaluation data, as in many realistic scenarios.", |
|
"7. To demonstrate that combining selective annotation and prompt retrieval is crucial for the success of in-context learning.", |
|
"8. To demonstrate that the proposed vote-k is more effective than other selective annotation methods." |
|
] |
|
}, |
|
"paper_info": { |
|
"title": "Selective Annotation Makes Language Models Better Few-Shot Learners", |
|
"authors": [ |
|
"Hongjin Su", |
|
"Jungo Kasai", |
|
"Chen Henry Wu", |
|
"Weijia Shi", |
|
"Tianlu Wang", |
|
"Jiayi Xin", |
|
"Rui Zhang", |
|
"Mari Ostendorf", |
|
"Luke Zettlemoyer", |
|
"Noah A. Smith", |
|
"Tao Yu" |
|
], |
|
"abstract": "Many recent approaches to natural language tasks are built on the remarkable\nabilities of large language models. Large language models can perform\nin-context learning, where they learn a new task from a few task\ndemonstrations, without any parameter updates. This work examines the\nimplications of in-context learning for the creation of datasets for new\nnatural language tasks. Departing from recent in-context learning methods, we\nformulate an annotation-efficient, two-step framework: selective annotation\nthat chooses a pool of examples to annotate from unlabeled data in advance,\nfollowed by prompt retrieval that retrieves task examples from the annotated\npool at test time. Based on this framework, we propose an unsupervised,\ngraph-based selective annotation method, voke-k, to select diverse,\nrepresentative examples to annotate. Extensive experiments on 10 datasets\n(covering classification, commonsense reasoning, dialogue, and text/code\ngeneration) demonstrate that our selective annotation method improves the task\nperformance by a large margin. On average, vote-k achieves a 12.9%/11.4%\nrelative gain under an annotation budget of 18/100, as compared to randomly\nselecting examples to annotate. Compared to state-of-the-art supervised\nfinetuning approaches, it yields similar performance with 10-100x less\nannotation cost across 10 tasks. We further analyze the effectiveness of our\nframework in various scenarios: language models with varying sizes, alternative\nselective annotation methods, and cases where there is a test data domain\nshift. We hope that our studies will serve as a basis for data annotations as\nlarge language models are increasingly applied to new tasks. Our code is\navailable at https://github.com/HKUNLP/icl-selective-annotation.", |
|
"comments": null |
|
}, |
|
"raw_data": { |
|
"context_before_exp": [ |
|
"\\documentclass{article} \n", |
|
"\\usepackage{iclr2022_conference,times}\n", |
|
"\n", |
|
"\\usepackage[utf8]{inputenc} \n", |
|
"\\usepackage[T1]{fontenc} \n", |
|
"\\usepackage{hyperref} \n", |
|
"\\usepackage{url} \n", |
|
"\\usepackage{booktabs} \n", |
|
"\\usepackage{amsfonts} \n", |
|
"\\usepackage{nicefrac} \n", |
|
"\\usepackage{microtype} \n", |
|
"\\usepackage{xcolor} \n", |
|
"\n", |
|
"\\usepackage{standalone}\n", |
|
"\\usepackage{latexsym}\n", |
|
"\\usepackage{amsmath}\n", |
|
"\\usepackage{amssymb}\n", |
|
"\\usepackage{amsthm}\n", |
|
"\\usepackage{graphicx}\n", |
|
"\\usepackage{array}\n", |
|
"\\usepackage{tabu}\n", |
|
"\\usepackage{makecell}\n", |
|
"\\usepackage{paralist}\n", |
|
"\\usepackage{cases}\n", |
|
"\\usepackage{diagbox}\n", |
|
"\\usepackage{enumitem}\n", |
|
"\\usepackage{soul}\n", |
|
"\\usepackage{multirow}\n", |
|
"\\usepackage{verbatim}\n", |
|
"\\usepackage{tabulary}\n", |
|
"\\usepackage{booktabs}\n", |
|
"\\usepackage[mathscr]{euscript}\n", |
|
"\\usepackage{mathtools}\n", |
|
"\\usepackage{algorithm}\n", |
|
"\\usepackage{algpseudocode}\n", |
|
"\\usepackage{stmaryrd}\n", |
|
"\\usepackage{tikz-dependency}\n", |
|
"\\usepackage{subcaption}\n", |
|
"\\usetikzlibrary{automata,decorations.markings,arrows,positioning,matrix,calc,patterns,angles,quotes,calc}\n", |
|
"\\usepackage{adjustbox}\n", |
|
"\\usepackage{tabularx}\n", |
|
"\\usepackage{xspace}\n", |
|
"\\usepackage{tabulary}\n", |
|
"\\usepackage{afterpage}\n", |
|
"\\usepackage{hyperref}\n", |
|
"\\usepackage{url}\n", |
|
"\\usepackage{bm}\n", |
|
"\\usepackage{color}\n", |
|
"\\usepackage{graphicx}\n", |
|
"\\usepackage{slashbox}\n", |
|
"\\usepackage[toc,page]{appendix}\n", |
|
"\\usepackage{makecell}\n", |
|
"\\usepackage{boldline}\n", |
|
"\\usepackage{bbm}\n", |
|
"\n", |
|
"\\usepackage{wrapfig,lipsum,booktabs}\n", |
|
"\n", |
|
"\n", |
|
"\\definecolor{orange}{rgb}{1,0.5,0}\n", |
|
"\\definecolor{mdgreen}{rgb}{0.05,0.6,0.05}\n", |
|
"\\definecolor{mdblue}{rgb}{0,0,0.7}\n", |
|
"\\definecolor{dkblue}{rgb}{0,0,0.5}\n", |
|
"\\definecolor{dkgray}{rgb}{0.3,0.3,0.3}\n", |
|
"\\definecolor{slate}{rgb}{0.25,0.25,0.4}\n", |
|
"\\definecolor{gray}{rgb}{0.5,0.5,0.5}\n", |
|
"\\definecolor{ltgray}{rgb}{0.7,0.7,0.7}\n", |
|
"\\definecolor{purple}{rgb}{0.7,0,1.0}\n", |
|
"\\definecolor{lavender}{rgb}{0.65,0.55,1.0}\n", |
|
"\n", |
|
"\n", |
|
"\\definecolor{mypurple}{RGB}{111,61,121}\n", |
|
"\\definecolor{myblue}{RGB}{46,88,180}\n", |
|
"\\definecolor{myred}{RGB}{181,68,106}\n", |
|
"\\definecolor{myyellow}{RGB}{204,143,55}\n", |
|
"\\definecolor{amber}{rgb}{1.0, 0.75, 0.0}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\textred}[1]{\\textcolor{red}{#1}}\n", |
|
"\\newcommand{\\textblue}[1]{\\textcolor{blue}{#1}}\n", |
|
"\n", |
|
"\\newcommand{\\ensuretext}[1]{#1}\n", |
|
"\\newcommand{\\marker}[2]{\\ensuremath{^{\\textsc{#1}}_{\\textsc{#2}}}}\n", |
|
"\\newcommand{\\arkcomment}[3]{\\ensuretext{\\textcolor{#3}{[#1 #2]}}}\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\nascomment}[1]{\\arkcomment{\\marker{NA}{S}}{#1}{blue}}\n", |
|
"\\newcommand{\\jungo}[1]{\\arkcomment{\\marker{J}{K}}{#1}{brown}}\n", |
|
"\\newcommand{\\tao}[1]{\\arkcomment{\\marker{T}{Y}}{#1}{orange}}\n", |
|
"\\newcommand{\\hongjin}[1]{\\arkcomment{\\marker{H}{S}}{#1}{gray}}\n", |
|
"\\newcommand{\\tianlu}[1]{\\arkcomment{\\marker{T}{W}}{#1}{purple}}\n", |
|
"\\newcommand{\\chen}[1]{\\arkcomment{\\marker{C}{W}}{#1}{violet}}\n", |
|
"\\newcommand{\\rui}[1]{\\arkcomment{\\marker{R}{Z}}{#1}{amber}}\n", |
|
"\\newcommand{\\weijia}[1]{\\arkcomment{\\marker{W}{S}}{#1}{green}}\n", |
|
"\\newcommand{\\luke}[1]{\\arkcomment{\\marker{L}{Z}}{#1}{cyan}}\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\revcolor}{red}\n", |
|
"\n", |
|
"\\newcommand{\\rev}[1]{{#1}}\n", |
|
"\n", |
|
"\\newcommand{\\term}[1]{\\textbf{#1}} \n", |
|
"\\newcommand{\\tabincell}[2]{\\begin{tabular}{@{}#1@{}}#2\\end{tabular}}\n", |
|
"\\newcommand{\\interalia}[1]{\\citep[\\emph{inter alia}]{#1}}\n", |
|
"\n", |
|
"\\newcommand{\\argmaxinline}[1]{\\operatorname{argmax}_{#1}}\n", |
|
"\\newcommand{\\argmininline}[1]{\\operatorname{argmin}_{#1}}\n", |
|
"\\newcommand{\\argmaxname}{\\operatorname{argmax}}\n", |
|
"\\newcommand{\\argminname}{\\operatorname{argmin}}\n", |
|
"\\newcommand{\\relu}{\\operatorname{ReLU}}\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\rulesep}{\\unskip\\ \\vrule\\ }\n", |
|
"\n", |
|
"\\newcommand{\\norm}[1]{\\left\\lVert#1\\right\\rVert}\n", |
|
"\\DeclareSymbolFont{extraup}{U}{zavm}{m}{n}\n", |
|
"\\DeclareMathSymbol{\\vardiamond}{\\mathalpha}{extraup}{87}\n", |
|
"\n", |
|
"\\newcolumntype{L}[1]{>{\\raggedright\\let\\newline\\\\\\arraybackslash\\hspace{0pt}}m{#1}}\n", |
|
"\\newcolumntype{C}[1]{>{\\centering\\let\\newline\\\\\\arraybackslash\\hspace{0pt}}m{#1}}\n", |
|
"\\newcolumntype{R}[1]{>{\\raggedleft\\let\\newline\\\\\\arraybackslash\\hspace{0pt}}m{#1}}\n", |
|
"\n", |
|
"\\newtheorem{theorem}{Theorem}\n", |
|
"\\newtheorem{lemma}[theorem]{Lemma}\n", |
|
"\\newtheorem{proposition}[theorem]{Proposition}\n", |
|
"\\newtheorem{corollary}[theorem]{Corollary}\n", |
|
"\\theoremstyle{definition}\n", |
|
"\\newtheorem{definition}[theorem]{Definition}\n", |
|
"\\newtheorem{example}[theorem]{Example}\n", |
|
"\\theoremstyle{remark}\n", |
|
"\\newtheorem{remark}[theorem]{Remark}\n", |
|
"\n", |
|
"\\newcommand*{\\QEDA}{\\hfill\\ensuremath{\\blacksquare}}\n", |
|
"\\newcommand*{\\QEDB}{\\hfill\\ensuremath{\\square}}\n", |
|
"\\algrenewcommand{\\algorithmiccomment}[1]{\\leavevmode$\\triangleright$ #1}\n", |
|
"\\newcommand{\\pd}[2]{\\frac{\\partial #1}{\\partial #2}}\n", |
|
"\\setul{1pt}{.4pt}\n", |
|
"\n", |
|
"\\DeclareCaptionFont{tiny}{\\tiny}\n", |
|
"\n", |
|
"\\usepackage[shortcuts]{extdash} \n", |
|
"\n", |
|
"\\usepackage{blindtext}\n", |
|
"\\usepackage{graphicx}\n", |
|
"\\usepackage{capt-of}\n", |
|
"\\usepackage{booktabs}\n", |
|
"\\usepackage{varwidth}\n", |
|
"\\newsavebox\\tmpbox\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\usepackage{amsmath,amsfonts,bm}\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\figleft}{{\\em (Left)}}\n", |
|
"\\newcommand{\\figcenter}{{\\em (Center)}}\n", |
|
"\\newcommand{\\figright}{{\\em (Right)}}\n", |
|
"\\newcommand{\\figtop}{{\\em (Top)}}\n", |
|
"\\newcommand{\\figbottom}{{\\em (Bottom)}}\n", |
|
"\\newcommand{\\captiona}{{\\em (a)}}\n", |
|
"\\newcommand{\\captionb}{{\\em (b)}}\n", |
|
"\\newcommand{\\captionc}{{\\em (c)}}\n", |
|
"\\newcommand{\\captiond}{{\\em (d)}}\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\newterm}[1]{{\\bf #1}}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\def\\figref#1{figure~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\Figref#1{Figure~\\ref{#1}}\n", |
|
"\\def\\twofigref#1#2{figures \\ref{#1} and \\ref{#2}}\n", |
|
"\\def\\quadfigref#1#2#3#4{figures \\ref{#1}, \\ref{#2}, \\ref{#3} and \\ref{#4}}\n", |
|
"\n", |
|
"\\def\\secref#1{section~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\Secref#1{Section~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\twosecrefs#1#2{sections \\ref{#1} and \\ref{#2}}\n", |
|
"\n", |
|
"\\def\\secrefs#1#2#3{sections \\ref{#1}, \\ref{#2} and \\ref{#3}}\n", |
|
"\n", |
|
"\\def\\eqref#1{equation~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\Eqref#1{Equation~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\plaineqref#1{\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\chapref#1{chapter~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\Chapref#1{Chapter~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\rangechapref#1#2{chapters\\ref{#1}--\\ref{#2}}\n", |
|
"\n", |
|
"\\def\\algref#1{algorithm~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\Algref#1{Algorithm~\\ref{#1}}\n", |
|
"\\def\\twoalgref#1#2{algorithms \\ref{#1} and \\ref{#2}}\n", |
|
"\\def\\Twoalgref#1#2{Algorithms \\ref{#1} and \\ref{#2}}\n", |
|
"\n", |
|
"\\def\\partref#1{part~\\ref{#1}}\n", |
|
"\n", |
|
"\\def\\Partref#1{Part~\\ref{#1}}\n", |
|
"\\def\\twopartref#1#2{parts \\ref{#1} and \\ref{#2}}\n", |
|
"\n", |
|
"\\def\\ceil#1{\\lceil #1 \\rceil}\n", |
|
"\\def\\floor#1{\\lfloor #1 \\rfloor}\n", |
|
"\\def\\1{\\bm{1}}\n", |
|
"\\newcommand{\\train}{\\mathcal{D}}\n", |
|
"\\newcommand{\\valid}{\\mathcal{D_{\\mathrm{valid}}}}\n", |
|
"\\newcommand{\\test}{\\mathcal{D_{\\mathrm{test}}}}\n", |
|
"\n", |
|
"\\def\\eps{{\\epsilon}}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\def\\reta{{\\textnormal{$\\eta$}}}\n", |
|
"\\def\\ra{{\\textnormal{a}}}\n", |
|
"\\def\\rb{{\\textnormal{b}}}\n", |
|
"\\def\\rc{{\\textnormal{c}}}\n", |
|
"\\def\\rd{{\\textnormal{d}}}\n", |
|
"\\def\\re{{\\textnormal{e}}}\n", |
|
"\\def\\rf{{\\textnormal{f}}}\n", |
|
"\\def\\rg{{\\textnormal{g}}}\n", |
|
"\\def\\rh{{\\textnormal{h}}}\n", |
|
"\\def\\ri{{\\textnormal{i}}}\n", |
|
"\\def\\rj{{\\textnormal{j}}}\n", |
|
"\\def\\rk{{\\textnormal{k}}}\n", |
|
"\\def\\rl{{\\textnormal{l}}}\n", |
|
"\n", |
|
"\\def\\rn{{\\textnormal{n}}}\n", |
|
"\\def\\ro{{\\textnormal{o}}}\n", |
|
"\\def\\rp{{\\textnormal{p}}}\n", |
|
"\\def\\rq{{\\textnormal{q}}}\n", |
|
"\\def\\rr{{\\textnormal{r}}}\n", |
|
"\\def\\rs{{\\textnormal{s}}}\n", |
|
"\\def\\rt{{\\textnormal{t}}}\n", |
|
"\\def\\ru{{\\textnormal{u}}}\n", |
|
"\\def\\rv{{\\textnormal{v}}}\n", |
|
"\\def\\rw{{\\textnormal{w}}}\n", |
|
"\\def\\rx{{\\textnormal{x}}}\n", |
|
"\\def\\ry{{\\textnormal{y}}}\n", |
|
"\\def\\rz{{\\textnormal{z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\rvepsilon{{\\mathbf{\\epsilon}}}\n", |
|
"\\def\\rvtheta{{\\mathbf{\\theta}}}\n", |
|
"\\def\\rva{{\\mathbf{a}}}\n", |
|
"\\def\\rvb{{\\mathbf{b}}}\n", |
|
"\\def\\rvc{{\\mathbf{c}}}\n", |
|
"\\def\\rvd{{\\mathbf{d}}}\n", |
|
"\\def\\rve{{\\mathbf{e}}}\n", |
|
"\\def\\rvf{{\\mathbf{f}}}\n", |
|
"\\def\\rvg{{\\mathbf{g}}}\n", |
|
"\\def\\rvh{{\\mathbf{h}}}\n", |
|
"\\def\\rvu{{\\mathbf{i}}}\n", |
|
"\\def\\rvj{{\\mathbf{j}}}\n", |
|
"\\def\\rvk{{\\mathbf{k}}}\n", |
|
"\\def\\rvl{{\\mathbf{l}}}\n", |
|
"\\def\\rvm{{\\mathbf{m}}}\n", |
|
"\\def\\rvn{{\\mathbf{n}}}\n", |
|
"\\def\\rvo{{\\mathbf{o}}}\n", |
|
"\\def\\rvp{{\\mathbf{p}}}\n", |
|
"\\def\\rvq{{\\mathbf{q}}}\n", |
|
"\\def\\rvr{{\\mathbf{r}}}\n", |
|
"\\def\\rvs{{\\mathbf{s}}}\n", |
|
"\\def\\rvt{{\\mathbf{t}}}\n", |
|
"\\def\\rvu{{\\mathbf{u}}}\n", |
|
"\\def\\rvv{{\\mathbf{v}}}\n", |
|
"\\def\\rvw{{\\mathbf{w}}}\n", |
|
"\\def\\rvx{{\\mathbf{x}}}\n", |
|
"\\def\\rvy{{\\mathbf{y}}}\n", |
|
"\\def\\rvz{{\\mathbf{z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\erva{{\\textnormal{a}}}\n", |
|
"\\def\\ervb{{\\textnormal{b}}}\n", |
|
"\\def\\ervc{{\\textnormal{c}}}\n", |
|
"\\def\\ervd{{\\textnormal{d}}}\n", |
|
"\\def\\erve{{\\textnormal{e}}}\n", |
|
"\\def\\ervf{{\\textnormal{f}}}\n", |
|
"\\def\\ervg{{\\textnormal{g}}}\n", |
|
"\\def\\ervh{{\\textnormal{h}}}\n", |
|
"\\def\\ervi{{\\textnormal{i}}}\n", |
|
"\\def\\ervj{{\\textnormal{j}}}\n", |
|
"\\def\\ervk{{\\textnormal{k}}}\n", |
|
"\\def\\ervl{{\\textnormal{l}}}\n", |
|
"\\def\\ervm{{\\textnormal{m}}}\n", |
|
"\\def\\ervn{{\\textnormal{n}}}\n", |
|
"\\def\\ervo{{\\textnormal{o}}}\n", |
|
"\\def\\ervp{{\\textnormal{p}}}\n", |
|
"\\def\\ervq{{\\textnormal{q}}}\n", |
|
"\\def\\ervr{{\\textnormal{r}}}\n", |
|
"\\def\\ervs{{\\textnormal{s}}}\n", |
|
"\\def\\ervt{{\\textnormal{t}}}\n", |
|
"\\def\\ervu{{\\textnormal{u}}}\n", |
|
"\\def\\ervv{{\\textnormal{v}}}\n", |
|
"\\def\\ervw{{\\textnormal{w}}}\n", |
|
"\\def\\ervx{{\\textnormal{x}}}\n", |
|
"\\def\\ervy{{\\textnormal{y}}}\n", |
|
"\\def\\ervz{{\\textnormal{z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\rmA{{\\mathbf{A}}}\n", |
|
"\\def\\rmB{{\\mathbf{B}}}\n", |
|
"\\def\\rmC{{\\mathbf{C}}}\n", |
|
"\\def\\rmD{{\\mathbf{D}}}\n", |
|
"\\def\\rmE{{\\mathbf{E}}}\n", |
|
"\\def\\rmF{{\\mathbf{F}}}\n", |
|
"\\def\\rmG{{\\mathbf{G}}}\n", |
|
"\\def\\rmH{{\\mathbf{H}}}\n", |
|
"\\def\\rmI{{\\mathbf{I}}}\n", |
|
"\\def\\rmJ{{\\mathbf{J}}}\n", |
|
"\\def\\rmK{{\\mathbf{K}}}\n", |
|
"\\def\\rmL{{\\mathbf{L}}}\n", |
|
"\\def\\rmM{{\\mathbf{M}}}\n", |
|
"\\def\\rmN{{\\mathbf{N}}}\n", |
|
"\\def\\rmO{{\\mathbf{O}}}\n", |
|
"\\def\\rmP{{\\mathbf{P}}}\n", |
|
"\\def\\rmQ{{\\mathbf{Q}}}\n", |
|
"\\def\\rmR{{\\mathbf{R}}}\n", |
|
"\\def\\rmS{{\\mathbf{S}}}\n", |
|
"\\def\\rmT{{\\mathbf{T}}}\n", |
|
"\\def\\rmU{{\\mathbf{U}}}\n", |
|
"\\def\\rmV{{\\mathbf{V}}}\n", |
|
"\\def\\rmW{{\\mathbf{W}}}\n", |
|
"\\def\\rmX{{\\mathbf{X}}}\n", |
|
"\\def\\rmY{{\\mathbf{Y}}}\n", |
|
"\\def\\rmZ{{\\mathbf{Z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\ermA{{\\textnormal{A}}}\n", |
|
"\\def\\ermB{{\\textnormal{B}}}\n", |
|
"\\def\\ermC{{\\textnormal{C}}}\n", |
|
"\\def\\ermD{{\\textnormal{D}}}\n", |
|
"\\def\\ermE{{\\textnormal{E}}}\n", |
|
"\\def\\ermF{{\\textnormal{F}}}\n", |
|
"\\def\\ermG{{\\textnormal{G}}}\n", |
|
"\\def\\ermH{{\\textnormal{H}}}\n", |
|
"\\def\\ermI{{\\textnormal{I}}}\n", |
|
"\\def\\ermJ{{\\textnormal{J}}}\n", |
|
"\\def\\ermK{{\\textnormal{K}}}\n", |
|
"\\def\\ermL{{\\textnormal{L}}}\n", |
|
"\\def\\ermM{{\\textnormal{M}}}\n", |
|
"\\def\\ermN{{\\textnormal{N}}}\n", |
|
"\\def\\ermO{{\\textnormal{O}}}\n", |
|
"\\def\\ermP{{\\textnormal{P}}}\n", |
|
"\\def\\ermQ{{\\textnormal{Q}}}\n", |
|
"\\def\\ermR{{\\textnormal{R}}}\n", |
|
"\\def\\ermS{{\\textnormal{S}}}\n", |
|
"\\def\\ermT{{\\textnormal{T}}}\n", |
|
"\\def\\ermU{{\\textnormal{U}}}\n", |
|
"\\def\\ermV{{\\textnormal{V}}}\n", |
|
"\\def\\ermW{{\\textnormal{W}}}\n", |
|
"\\def\\ermX{{\\textnormal{X}}}\n", |
|
"\\def\\ermY{{\\textnormal{Y}}}\n", |
|
"\\def\\ermZ{{\\textnormal{Z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\vzero{{\\mathbf{0}}}\n", |
|
"\\def\\vone{{\\mathbf{1}}}\n", |
|
"\\def\\vmu{{\\bm{\\mu}}}\n", |
|
"\\def\\vtheta{{\\bm{\\theta}}}\n", |
|
"\\def\\va{{\\mathbf{a}}}\n", |
|
"\\def\\vb{{\\mathbf{b}}}\n", |
|
"\\def\\vc{{\\mathbf{c}}}\n", |
|
"\\def\\vd{{\\mathbf{d}}}\n", |
|
"\\def\\ve{{\\mathbf{e}}}\n", |
|
"\\def\\vf{{\\mathbf{f}}}\n", |
|
"\\def\\vg{{\\mathbf{g}}}\n", |
|
"\\def\\vh{{\\mathbf{h}}}\n", |
|
"\\def\\vi{{\\mathbf{i}}}\n", |
|
"\\def\\vj{{\\mathbf{j}}}\n", |
|
"\\def\\vk{{\\mathbf{k}}}\n", |
|
"\\def\\vl{{\\mathbf{l}}}\n", |
|
"\\def\\vm{{\\mathbf{m}}}\n", |
|
"\\def\\vn{{\\mathbf{n}}}\n", |
|
"\\def\\vo{{\\mathbf{o}}}\n", |
|
"\\def\\vp{{\\mathbf{p}}}\n", |
|
"\\def\\vq{{\\mathbf{q}}}\n", |
|
"\\def\\vr{{\\mathbf{r}}}\n", |
|
"\\def\\vs{{\\mathbf{s}}}\n", |
|
"\\def\\vt{{\\mathbf{t}}}\n", |
|
"\\def\\vu{{\\mathbf{u}}}\n", |
|
"\\def\\vv{{\\mathbf{v}}}\n", |
|
"\\def\\vw{{\\mathbf{w}}}\n", |
|
"\\def\\vx{{\\mathbf{x}}}\n", |
|
"\\def\\vy{{\\mathbf{y}}}\n", |
|
"\\def\\vz{{\\mathbf{z}}}\n", |
|
"\\def\\vphi{{\\boldsymbol{\\phi}}}\n", |
|
"\\def\\valpha{{\\boldsymbol{\\alpha}}}\n", |
|
"\\def\\vsigma{{\\boldsymbol{\\sigma}}}\n", |
|
"\\def\\vexp{{\\boldsymbol{\\exp{}}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\evalpha{{\\alpha}}\n", |
|
"\\def\\evbeta{{\\beta}}\n", |
|
"\\def\\evepsilon{{\\epsilon}}\n", |
|
"\\def\\evlambda{{\\lambda}}\n", |
|
"\\def\\evomega{{\\omega}}\n", |
|
"\\def\\evmu{{\\mu}}\n", |
|
"\\def\\evpsi{{\\psi}}\n", |
|
"\\def\\evsigma{{\\sigma}}\n", |
|
"\\def\\evtheta{{\\theta}}\n", |
|
"\\def\\eva{{a}}\n", |
|
"\\def\\evb{{b}}\n", |
|
"\\def\\evc{{c}}\n", |
|
"\\def\\evd{{d}}\n", |
|
"\\def\\eve{{e}}\n", |
|
"\\def\\evf{{f}}\n", |
|
"\\def\\evg{{g}}\n", |
|
"\\def\\evh{{h}}\n", |
|
"\\def\\evi{{i}}\n", |
|
"\\def\\evj{{j}}\n", |
|
"\\def\\evk{{k}}\n", |
|
"\\def\\evl{{l}}\n", |
|
"\\def\\evm{{m}}\n", |
|
"\\def\\evn{{n}}\n", |
|
"\\def\\evo{{o}}\n", |
|
"\\def\\evp{{p}}\n", |
|
"\\def\\evq{{q}}\n", |
|
"\\def\\evr{{r}}\n", |
|
"\\def\\evs{{s}}\n", |
|
"\\def\\evt{{t}}\n", |
|
"\\def\\evu{{u}}\n", |
|
"\\def\\evv{{v}}\n", |
|
"\\def\\evw{{w}}\n", |
|
"\\def\\evx{{x}}\n", |
|
"\\def\\evy{{y}}\n", |
|
"\\def\\evz{{z}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\mA{{\\mathbf{A}}}\n", |
|
"\\def\\mB{{\\mathbf{B}}}\n", |
|
"\\def\\mC{{\\mathbf{C}}}\n", |
|
"\\def\\mD{{\\mathbf{D}}}\n", |
|
"\\def\\mE{{\\mathbf{E}}}\n", |
|
"\\def\\mF{{\\mathbf{F}}}\n", |
|
"\\def\\mG{{\\mathbf{G}}}\n", |
|
"\\def\\mH{{\\mathbf{H}}}\n", |
|
"\\def\\mI{{\\mathbf{I}}}\n", |
|
"\\def\\mJ{{\\mathbf{J}}}\n", |
|
"\\def\\mK{{\\mathbf{K}}}\n", |
|
"\\def\\mL{{\\mathbf{L}}}\n", |
|
"\\def\\mM{{\\mathbf{M}}}\n", |
|
"\\def\\mN{{\\mathbf{N}}}\n", |
|
"\\def\\mO{{\\mathbf{O}}}\n", |
|
"\\def\\mP{{\\mathbf{P}}}\n", |
|
"\\def\\mQ{{\\mathbf{Q}}}\n", |
|
"\\def\\mR{{\\mathbf{R}}}\n", |
|
"\\def\\mS{{\\mathbf{S}}}\n", |
|
"\\def\\mT{{\\mathbf{T}}}\n", |
|
"\\def\\mU{{\\mathbf{U}}}\n", |
|
"\\def\\mV{{\\mathbf{V}}}\n", |
|
"\\def\\mW{{\\mathbf{W}}}\n", |
|
"\\def\\mX{{\\mathbf{X}}}\n", |
|
"\\def\\mY{{\\mathbf{Y}}}\n", |
|
"\\def\\mZ{{\\mathbf{Z}}}\n", |
|
"\\def\\mBeta{{\\bm{\\beta}}}\n", |
|
"\\def\\mPhi{{\\bm{\\Phi}}}\n", |
|
"\\def\\mLambda{{\\bm{\\Lambda}}}\n", |
|
"\\def\\mSigma{{\\bm{\\Sigma}}}\n", |
|
"\n", |
|
"\n", |
|
"\\DeclareMathAlphabet{\\mathsfit}{\\encodingdefault}{\\sfdefault}{m}{sl}\n", |
|
"\\SetMathAlphabet{\\mathsfit}{bold}{\\encodingdefault}{\\sfdefault}{bx}{n}\n", |
|
"\\newcommand{\\tens}[1]{\\bm{\\mathsfit{#1}}}\n", |
|
"\\def\\tA{{\\tens{A}}}\n", |
|
"\\def\\tB{{\\tens{B}}}\n", |
|
"\\def\\tC{{\\tens{C}}}\n", |
|
"\\def\\tD{{\\tens{D}}}\n", |
|
"\\def\\tE{{\\tens{E}}}\n", |
|
"\\def\\tF{{\\tens{F}}}\n", |
|
"\\def\\tG{{\\tens{G}}}\n", |
|
"\\def\\tH{{\\tens{H}}}\n", |
|
"\\def\\tI{{\\tens{I}}}\n", |
|
"\\def\\tJ{{\\tens{J}}}\n", |
|
"\\def\\tK{{\\tens{K}}}\n", |
|
"\\def\\tL{{\\tens{L}}}\n", |
|
"\\def\\tM{{\\tens{M}}}\n", |
|
"\\def\\tN{{\\tens{N}}}\n", |
|
"\\def\\tO{{\\tens{O}}}\n", |
|
"\\def\\tP{{\\tens{P}}}\n", |
|
"\\def\\tQ{{\\tens{Q}}}\n", |
|
"\\def\\tR{{\\tens{R}}}\n", |
|
"\\def\\tS{{\\tens{S}}}\n", |
|
"\\def\\tT{{\\tens{T}}}\n", |
|
"\\def\\tU{{\\tens{U}}}\n", |
|
"\\def\\tV{{\\tens{V}}}\n", |
|
"\\def\\tW{{\\tens{W}}}\n", |
|
"\\def\\tX{{\\tens{X}}}\n", |
|
"\\def\\tY{{\\tens{Y}}}\n", |
|
"\\def\\tZ{{\\tens{Z}}}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\def\\gA{{\\mathcal{A}}}\n", |
|
"\\def\\gB{{\\mathcal{B}}}\n", |
|
"\\def\\gC{{\\mathcal{C}}}\n", |
|
"\\def\\gD{{\\mathcal{D}}}\n", |
|
"\\def\\gE{{\\mathcal{E}}}\n", |
|
"\\def\\gF{{\\mathcal{F}}}\n", |
|
"\\def\\gG{{\\mathcal{G}}}\n", |
|
"\\def\\gH{{\\mathcal{H}}}\n", |
|
"\\def\\gI{{\\mathcal{I}}}\n", |
|
"\\def\\gJ{{\\mathcal{J}}}\n", |
|
"\\def\\gK{{\\mathcal{K}}}\n", |
|
"\\def\\gL{{\\mathcal{L}}}\n", |
|
"\\def\\gM{{\\mathcal{M}}}\n", |
|
"\\def\\gN{{\\mathcal{N}}}\n", |
|
"\\def\\gO{{\\mathcal{O}}}\n", |
|
"\\def\\gP{{\\mathcal{P}}}\n", |
|
"\\def\\gQ{{\\mathcal{Q}}}\n", |
|
"\\def\\gR{{\\mathcal{R}}}\n", |
|
"\\def\\gS{{\\mathcal{S}}}\n", |
|
"\\def\\gT{{\\mathcal{T}}}\n", |
|
"\\def\\gU{{\\mathcal{U}}}\n", |
|
"\\def\\gV{{\\mathcal{V}}}\n", |
|
"\\def\\gW{{\\mathcal{W}}}\n", |
|
"\\def\\gX{{\\mathcal{X}}}\n", |
|
"\\def\\gY{{\\mathcal{Y}}}\n", |
|
"\\def\\gZ{{\\mathcal{Z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\sA{{\\mathbb{A}}}\n", |
|
"\\def\\sB{{\\mathbb{B}}}\n", |
|
"\\def\\sC{{\\mathbb{C}}}\n", |
|
"\\def\\sD{{\\mathbb{D}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\sF{{\\mathbb{F}}}\n", |
|
"\\def\\sG{{\\mathbb{G}}}\n", |
|
"\\def\\sH{{\\mathbb{H}}}\n", |
|
"\\def\\sI{{\\mathbb{I}}}\n", |
|
"\\def\\sJ{{\\mathbb{J}}}\n", |
|
"\\def\\sK{{\\mathbb{K}}}\n", |
|
"\\def\\sL{{\\mathbb{L}}}\n", |
|
"\\def\\sM{{\\mathbb{M}}}\n", |
|
"\\def\\sN{{\\mathbb{N}}}\n", |
|
"\\def\\sO{{\\mathbb{O}}}\n", |
|
"\\def\\sP{{\\mathbb{P}}}\n", |
|
"\\def\\sQ{{\\mathbb{Q}}}\n", |
|
"\\def\\sR{{\\mathbb{R}}}\n", |
|
"\\def\\sS{{\\mathbb{S}}}\n", |
|
"\\def\\sT{{\\mathbb{T}}}\n", |
|
"\\def\\sU{{\\mathbb{U}}}\n", |
|
"\\def\\sV{{\\mathbb{V}}}\n", |
|
"\\def\\sW{{\\mathbb{W}}}\n", |
|
"\\def\\sX{{\\mathbb{X}}}\n", |
|
"\\def\\sY{{\\mathbb{Y}}}\n", |
|
"\\def\\sZ{{\\mathbb{Z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\def\\emLambda{{\\Lambda}}\n", |
|
"\\def\\emA{{A}}\n", |
|
"\\def\\emB{{B}}\n", |
|
"\\def\\emC{{C}}\n", |
|
"\\def\\emD{{D}}\n", |
|
"\\def\\emE{{E}}\n", |
|
"\\def\\emF{{F}}\n", |
|
"\\def\\emG{{G}}\n", |
|
"\\def\\emH{{H}}\n", |
|
"\\def\\emI{{I}}\n", |
|
"\\def\\emJ{{J}}\n", |
|
"\\def\\emK{{K}}\n", |
|
"\\def\\emL{{L}}\n", |
|
"\\def\\emM{{M}}\n", |
|
"\\def\\emN{{N}}\n", |
|
"\\def\\emO{{O}}\n", |
|
"\\def\\emP{{P}}\n", |
|
"\\def\\emQ{{Q}}\n", |
|
"\\def\\emR{{R}}\n", |
|
"\\def\\emS{{S}}\n", |
|
"\\def\\emT{{T}}\n", |
|
"\\def\\emU{{U}}\n", |
|
"\\def\\emV{{V}}\n", |
|
"\\def\\emW{{W}}\n", |
|
"\\def\\emX{{X}}\n", |
|
"\\def\\emY{{Y}}\n", |
|
"\\def\\emZ{{Z}}\n", |
|
"\\def\\emSigma{{\\Sigma}}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\etens}[1]{\\mathsfit{#1}}\n", |
|
"\\def\\etLambda{{\\etens{\\Lambda}}}\n", |
|
"\\def\\etA{{\\etens{A}}}\n", |
|
"\\def\\etB{{\\etens{B}}}\n", |
|
"\\def\\etC{{\\etens{C}}}\n", |
|
"\\def\\etD{{\\etens{D}}}\n", |
|
"\\def\\etE{{\\etens{E}}}\n", |
|
"\\def\\etF{{\\etens{F}}}\n", |
|
"\\def\\etG{{\\etens{G}}}\n", |
|
"\\def\\etH{{\\etens{H}}}\n", |
|
"\\def\\etI{{\\etens{I}}}\n", |
|
"\\def\\etJ{{\\etens{J}}}\n", |
|
"\\def\\etK{{\\etens{K}}}\n", |
|
"\\def\\etL{{\\etens{L}}}\n", |
|
"\\def\\etM{{\\etens{M}}}\n", |
|
"\\def\\etN{{\\etens{N}}}\n", |
|
"\\def\\etO{{\\etens{O}}}\n", |
|
"\\def\\etP{{\\etens{P}}}\n", |
|
"\\def\\etQ{{\\etens{Q}}}\n", |
|
"\\def\\etR{{\\etens{R}}}\n", |
|
"\\def\\etS{{\\etens{S}}}\n", |
|
"\\def\\etT{{\\etens{T}}}\n", |
|
"\\def\\etU{{\\etens{U}}}\n", |
|
"\\def\\etV{{\\etens{V}}}\n", |
|
"\\def\\etW{{\\etens{W}}}\n", |
|
"\\def\\etX{{\\etens{X}}}\n", |
|
"\\def\\etY{{\\etens{Y}}}\n", |
|
"\\def\\etZ{{\\etens{Z}}}\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\pdata}{p_{\\rm{data}}}\n", |
|
"\n", |
|
"\\newcommand{\\ptrain}{\\hat{p}_{\\rm{data}}}\n", |
|
"\\newcommand{\\Ptrain}{\\hat{P}_{\\rm{data}}}\n", |
|
"\n", |
|
"\\newcommand{\\pmodel}{p_{\\rm{model}}}\n", |
|
"\\newcommand{\\Pmodel}{P_{\\rm{model}}}\n", |
|
"\\newcommand{\\ptildemodel}{\\tilde{p}_{\\rm{model}}}\n", |
|
"\n", |
|
"\\newcommand{\\pencode}{p_{\\rm{encoder}}}\n", |
|
"\\newcommand{\\pdecode}{p_{\\rm{decoder}}}\n", |
|
"\\newcommand{\\precons}{p_{\\rm{reconstruct}}}\n", |
|
"\n", |
|
"\\newcommand{\\laplace}{\\mathrm{Laplace}} \n", |
|
"\n", |
|
"\\newcommand{\\E}{\\mathbb{E}}\n", |
|
"\\newcommand{\\Ls}{\\mathcal{L}}\n", |
|
"\\newcommand{\\R}{\\mathbb{R}}\n", |
|
"\\newcommand{\\emp}{\\tilde{p}}\n", |
|
"\\newcommand{\\lr}{\\alpha}\n", |
|
"\\newcommand{\\reg}{\\lambda}\n", |
|
"\\newcommand{\\rect}{\\mathrm{rectifier}}\n", |
|
"\\newcommand{\\softmax}{\\operatorname{softmax}}\n", |
|
"\\newcommand{\\mlp}{\\operatorname{MLP}}\n", |
|
"\\newcommand{\\attn}{\\operatorname{attn}}\n", |
|
"\\newcommand{\\sigmoid}{\\sigma}\n", |
|
"\\newcommand{\\softplus}{\\zeta}\n", |
|
"\\newcommand{\\KL}{D_{\\mathrm{KL}}}\n", |
|
"\\newcommand{\\Var}{\\mathrm{Var}}\n", |
|
"\\newcommand{\\standarderror}{\\mathrm{SE}}\n", |
|
"\\newcommand{\\Cov}{\\mathrm{Cov}}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\newcommand{\\normlzero}{L^0}\n", |
|
"\\newcommand{\\normlone}{L^1}\n", |
|
"\\newcommand{\\normltwo}{L^2}\n", |
|
"\\newcommand{\\normlp}{L^p}\n", |
|
"\\newcommand{\\normmax}{L^\\infty}\n", |
|
"\n", |
|
"\\newcommand{\\parents}{Pa} \n", |
|
"\n", |
|
"\\DeclareMathOperator*{\\argmax}{arg\\,max}\n", |
|
"\\DeclareMathOperator*{\\argmin}{arg\\,min}\n", |
|
"\n", |
|
"\\DeclareMathOperator{\\sign}{sign}\n", |
|
"\\DeclareMathOperator{\\Tr}{Tr}\n", |
|
"\\let\\ab\\allowbreak\n", |
|
"\\newcommand{\\fix}{\\marginpar{FIX}}\n", |
|
"\\newcommand{\\new}{\\marginpar{NEW}}\n", |
|
"\\newcommand{\\model}{\\textsc{Abc}\\xspace}\n", |
|
"\\newcommand{\\modelmlp}{\\textsc{Abc}$_\\text{MLP}$\\xspace}\n", |
|
"\\newcommand{\\modelexp}{\\textsc{Abc}$_{\\exp}$\\xspace}\n", |
|
"\\newcommand{\\modelrandom}{\\textsc{Abc}$_{\\text{RD}}$\\xspace}\n", |
|
"\\newcommand{\\modelwindow}{\\textsc{Abc}$_{\\text{WD}}$\\xspace}\n", |
|
"\\newcommand{\\modelcluster}{\\textsc{Abc}$_{\\text{CL}}$\\xspace}\n", |
|
"\\newcommand{\\STAB}[1]{\\begin{tabular}{@{}c@{}}#1\\end{tabular}}\n", |
|
"\\newcommand{\\resolved}[1]{}\n", |
|
"\\newcommand{\\base}[0]{\\textsc{Base}\\xspace}\n", |
|
"\n", |
|
"\\newcommand{\\com}[1]{}\n", |
|
"\\newcommand{\\firststep}{selective annotation\\xspace}\n", |
|
"\\newcommand{\\FirstStep}{Selective Annotation\\xspace}\n", |
|
"\\newcommand{\\Firststep}{Selective annotation\\xspace}\n", |
|
"\\newcommand{\\votek}{vote-$k$\\xspace}\n", |
|
"\\newcommand{\\Votek}{Vote-$k$\\xspace}\n", |
|
"\n", |
|
"\\usepackage{xspace,mfirstuc,tabulary}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\definecolor{lightgray}{gray}{0.9}\n", |
|
"\n", |
|
"\n", |
|
"\\colorlet{soulgreen}{green!30}\n", |
|
"\\definecolor{red}{HTML}{FF0000}\n", |
|
"\\definecolor{blue}{HTML}{0000FF}\n", |
|
"\\definecolor{darkgreen}{HTML}{228B22}\n", |
|
"\\definecolor{dblue}{HTML}{007FFF}\n", |
|
"\\usepackage{pifont}\n", |
|
"\\newcommand{\\xmark}{\\textcolor{red}{\\ding{55}}}\n", |
|
"\\newcommand{\\cmark}{\\textcolor{darkgreen}{\\ding{51}}}\n", |
|
"\n", |
|
"\\usepackage{listings}\n", |
|
"\n", |
|
"\\definecolor{mymauve}{rgb}{0.58,0,0.82}\n", |
|
"\\lstset{ \n", |
|
" backgroundcolor=\\color{white}, \n", |
|
" basicstyle=\\footnotesize\\ttfamily, \n", |
|
" breakatwhitespace=false, \n", |
|
" breaklines=true, \n", |
|
" captionpos=b, \n", |
|
" commentstyle=\\color{mygreen}, \n", |
|
" deletekeywords={...}, \n", |
|
" escapeinside={\\\n", |
|
" extendedchars=true, \n", |
|
" firstnumber=1000, \n", |
|
" \n", |
|
" keepspaces=true, \n", |
|
" keywordstyle=\\color{blue}, \n", |
|
" language=Octave, \n", |
|
" morekeywords={*,...}, \n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
" showspaces=false, \n", |
|
" showstringspaces=false, \n", |
|
" showtabs=false, \n", |
|
" stepnumber=2, \n", |
|
" stringstyle=\\color{mymauve}, \n", |
|
" tabsize=2,\t \n", |
|
" title=\\lstname, \n", |
|
" escapeinside={(*@}{@*)}\n", |
|
"}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\title{\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\FirstStep Makes Language Models Better Few-Shot Learners\n", |
|
"\n", |
|
"}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\author{\\textbf{Hongjin Su}$^\\spadesuit$ \\ \\ \n", |
|
" \\textbf{Jungo Kasai}$^{\\clubsuit\\diamondsuit}$ \\ \\ \n", |
|
" \\textbf{Chen Henry Wu}$^{\\heartsuit}$ \\ \\ \n", |
|
" \\textbf{Weijia Shi}$^\\clubsuit$ \\ \\ \n", |
|
" \\textbf{Tianlu Wang}$^{\\vardiamond}$ \\ \\\n", |
|
" \\textbf{Jiayi Xin}$^{\\spadesuit}$\\\\\n", |
|
" \\textbf{Rui Zhang}$^{\\bigstar}$\\ \\ \n", |
|
" \\textbf{Mari Ostendorf}$^{\\clubsuit}$\n", |
|
" \\ \\ \n", |
|
" \\textbf{Luke Zettlemoyer}$^{\\clubsuit\\vardiamond}$\n", |
|
" \\ \\ \n", |
|
" \\textbf{Noah A.\\ Smith}$^{\\clubsuit\\diamondsuit}$\\ \\ \n", |
|
" \\textbf{Tao Yu}$^{\\spadesuit\\clubsuit}$\n", |
|
" \\\\ \n", |
|
" $^\\spadesuit$The University of Hong Kong \n", |
|
" \\quad\n", |
|
" $^\\clubsuit$University of Washington\n", |
|
" \\quad\n", |
|
" $^\\diamondsuit$Allen Institute for AI\n", |
|
" \\\\\n", |
|
" $^\\heartsuit$Carnegie Mellon University \\quad\n", |
|
" $^\\bigstar$Penn State University \n", |
|
" \\quad \n", |
|
" $^\\vardiamond$Meta AI\\\\\n", |
|
" \\\\\n", |
|
" {\\tt \\{hjsu,tyu\\}@cs.hku.hk,}\\ \\ {\\tt [email protected],} \\ \\ {\\tt [email protected]}\\\\\n", |
|
" {\\tt \\{jkasai,swj0419,lsz,nasmith\\}@cs.washington.edu}\n", |
|
"}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\iclrfinalcopy \n", |
|
"\\begin{document}\n", |
|
"\n", |
|
"\\maketitle\n", |
|
"\n", |
|
"\\setlength{\\abovedisplayskip}{2pt}\n", |
|
"\\setlength{\\belowdisplayskip}{2pt}\n", |
|
"\n", |
|
"\n", |
|
"\\vspace{-0.5cm}\n", |
|
"\\begin{abstract}\n", |
|
"\\vspace{-0.1cm}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"Many recent approaches to natural language tasks are built on the remarkable abilities of large language models.\n", |
|
"Large language models can perform in-context learning, where they learn a new task from a few task demonstrations, without any parameter updates.\n", |
|
"This work examines the implications of in-context learning for the creation of datasets for new natural language tasks.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"Departing from recent in-context learning methods, we formulate an annotation-efficient, two-step framework: \\textit{\\firststep} that chooses a pool of examples to annotate from \\emph{unlabeled} data in advance, followed by prompt retrieval that retrieves task examples from the annotated pool at test time.\n", |
|
"Based on this framework, we propose an unsupervised, graph-based \\firststep method, \\votek, to select diverse, representative examples to annotate. \n", |
|
" \n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"Extensive experiments on 10 datasets (covering classification, commonsense reasoning, dialogue, and text/code generation) demonstrate that our \\firststep method improves the task performance by a large margin. \n", |
|
"\n", |
|
"\n", |
|
"On average, \\votek achieves a 12.9\\\n", |
|
"\n", |
|
"Compared to state-of-the-art supervised finetuning approaches, it yields similar performance with 10-100$\\times$ less annotation cost across 10 tasks.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"We further analyze the effectiveness of our framework in various scenarios: language models with varying sizes, alternative \\firststep methods, and cases where there is a test data domain shift.\n", |
|
"We hope that our studies will serve as a basis for data annotations as large language models are increasingly applied to new tasks.\\footnote{Our code is available at \\url{https://github.com/HKUNLP/icl-selective-annotation}}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\begin{figure}[h!]\n", |
|
"\\vspace{3mm}\n", |
|
"\\centering\n", |
|
" \\includegraphics[width=0.98\\textwidth]{images/pipeline-avg-main-results-v5.pdf}\n", |
|
"\\caption{\n", |
|
"\\textbf{Left}: Our two-step framework for in-context learning. \n", |
|
"Instead of assuming access to large labeled data, we first select a small number of (diverse and representative) unlabeled examples to annotate before test time.\n", |
|
"At test time, we retrieve in-context examples from the small annotated pool.\n", |
|
"\n", |
|
"\n", |
|
"\\textbf{Right}: In-context learning performance over varying annotation budgets averaged over three representative tasks (HellaSwag commonsense reasoning, MRPC paraphrase detection, and MWOZ dialogue state tracking).\n", |
|
"\n", |
|
"Here we experiment with GPT-J and Codex-davinci-002.\n", |
|
"\n", |
|
"Two \\firststep methods are presented: \\textit{random selection} and our \\textit{\\votek} method.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"We observe that an appropriate \\firststep method largely improves the in-context learning performance with smaller variance over random selection under varying annotation budgets.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"}\n", |
|
"\\label{fig:pipeline-avg-main-results}\n", |
|
"\\end{figure}\n", |
|
"\n", |
|
"\\end{abstract}\n", |
|
"\\section{Introduction}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"Much recent work builds approaches to natural language tasks on the impressive abilities of large language models (e.g., GPT-3; \\citealp{gpt3}).\n", |
|
"Large language models can perform downstream tasks by conditioning generation on a few task demonstrations, thereby avoiding the need for any parameter updates.\n", |
|
"This new, few-shot learning paradigm is called \\textit{in-context learning} and has become an attractive alternative to supervised finetuning \\citep{prompt_survey}. \n", |
|
"In this work, we study the implications of this remarkable capability of large language models for dataset creation and annotation.\n", |
|
"We extensively examine how to reduce the manual annotation cost while retaining high in-context learning performance.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"Although in-context learning was originally proposed for few-shot learning, recent works show that retrieving prompts from a large set of annotated examples is necessary to achieve good performances \\citep{liu-etal-2022-makes,rubin2022}. \n", |
|
"In particular, they show that the performance substantially improves when similar examples (under some embedding function) are retrieved as in-context examples specifically for each test input \\citep{liu-etal-2022-makes}.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"Each test sample only requires a few in-context examples in its prompt.\n", |
|
"Different test instances, however, require different in-context examples with their associated annotations, necessitating a large set of annotated examples.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"Distinct from these recent efforts, we establish a two-step framework to better understand and improve the annotation efficiency (Fig.\\ \\ref{fig:pipeline-avg-main-results}): the first step is \\textit{\\firststep} that picks a small number of instances to get annotated before test time, followed by \\textit{prompt retrieval} that retrieves in-context examples for each test instance from the annotated data.\n", |
|
"The total annotation budget is the number of examples selected and annotated in the first step.\n", |
|
"The second step is bounded by the number of examples that can fit as input to a language model. Based on this framework, we propose an unsupervised, graph-based \\firststep method, named vote-$k$, that selects diverse and representative instances to be annotated. \n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"Our extensive experiments over 10 datasets across diverse tasks (covering classification, commonsense reasoning, dialogue, and text/code generation; see Tab.~\\ref{tab:main_results}) demonstrate that our graph-based \\firststep method, \\votek (\\S\\ref{sec:sample_selection_method}), substantially improves the in-context learning performance by balancing the diversity and representativeness of annotated samples. \n", |
|
"For instance, \\votek, combined with similarity-based prompt retrieval \\citep{liu-etal-2022-makes,rubin2022}, \n", |
|
"achieves a 11.4\\\n", |
|
"Moreover, the improvement is consistent across language models with varying sizes (2B-175B parameters) (\\S\\ref{sec:lm_sizes}).\n", |
|
"This finding is in contrast with finetuning, where we cannot see the effectiveness of \\firststep over random baseline, due to outliers~\\citep{karamcheti-etal-2021-mind} or training instability \\citep{darcy2022limitations}.\n", |
|
"\n", |
|
"\n", |
|
"We hypothesize that in-context learning \\textit{with similarity-based prompt retrieval} is more robust to small annotation sizes and outliers because only the most similar examples are retrieved for each test instance.\n", |
|
"Indeed, we observe that \\textit{random prompt retrieval} fails to benefit from \\firststep (\\S\\ref{sec:random_retrieval}), providing support for our hypothesis.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"Besides performance comparisons within a fixed annotation budget, we show that \\firststep provides better few-shot performance with 5-100$\\times$ \\textit{less annotation cost} for new natural language tasks.\n", |
|
"\n", |
|
"In-context learning with 18 examples selected by \\votek achieves higher performance than 100 randomly selected examples on 6 out of the 10 tasks.\n", |
|
"\n", |
|
"\n", |
|
"It also outperforms strong finetuning methods by a large margin (Fig.\\ \\ref{fig:icl-vs-ft}) \n", |
|
"\n", |
|
"and requires 10-100$\\times$ less annotations for similar performance (\\S\\ref{sec:in-context_finetuning}).\n", |
|
"\n", |
|
"We observe that in-context learning quickly (100 or 300 samples are annotated) converges to decent performance when \\votek \\firststep is applied.\n", |
|
"These results suggest that large language models do not require large annotated datasets (e.g., 10K) due to their ability to adapt to new tasks through simple prompting.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\Firststep also makes in-context learning much more \\textit{stable}. \n", |
|
"\n", |
|
"\n", |
|
"In real-world scenarios, even collecting \\textit{unlabeled} data is non-trivial and introduces randomness.\n", |
|
"We simulate such randomness in our experimental setting by subsampling the original unlabeled data multiple times.\n", |
|
"Our results suggest that \\votek \\firststep largely reduces the variance of in-context learning even in this setting \n", |
|
"(Tab.~\\ref{tab:main_results}).\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"Further analysis shows larger improvements when there is a domain shift between training and test data (e.g., text from different Amazon users; \\citealp{wilds}; \\S\\ref{sec:domain_shift}).\n", |
|
"\n", |
|
"Finally, when compared with previous \\firststep methods designed for supervised training/finetuning, we demonstrate that \\votek \\firststep consistently improves the performance \n", |
|
"\n", |
|
"(\\S\\ref{subsec:sample-selection-methods}).\n", |
|
"As in-context learning has been applied to increasingly more natural language processing applications, we hope that our annotation-efficient framework will provide useful guidance for both researchers and practitioners.\n", |
|
"\n", |
|
"\\section{\\FirstStep for In-Context Learning}\n", |
|
"\\label{sec:framework}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"In-context learning only requires a few annotated examples per test instance (\\emph{few-shot learning}), while avoiding expensive finetuning on the whole training data.\n", |
|
"It is, however, often assumed that all \\emph{annotated} training data are available for prompt retrieval (e.g., \\citealp{liu-etal-2022-makes,rubin2022}).\n", |
|
"Yet the implied total annotation costs are hardly discussed in previous work.\n", |
|
"We develop a better practice for few-shot learning with large language models by carefully studying the total annotation cost required for in-context learning.\n", |
|
"We also study how examples should be selected to annotate, in order to make in-context learning perform better for new tasks.\n", |
|
"We formulate a general framework (Fig.\\ \\ref{fig:pipeline-avg-main-results} left) that consists of two steps: \\firststep (\\S\\ref{sec:sample_selection}) and prompt retrieval (\\S\\ref{sec:prompt_retrieval}).\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\subsection{\\FirstStep}\n", |
|
"\\label{sec:sample_selection}\n", |
|
"The first step chooses examples to annotate \\emph{before} test time.\n", |
|
"This process thus determines the total annotation budget.\n", |
|
"\n", |
|
"\n", |
|
"This \\firststep process is largely ignored in the recent literature for in-context learning.\n", |
|
"We will demonstrate, however, that the annotation cost can be substantially reduced by choosing a small set of diverse, representative examples, while retaining the downstream performance (\\S\\ref{sec:experiments}). Formally, given a set of unlabeled samples $\\mathcal{X} = \\{x_i\\}_{i=1}^{N}$, \\firststep aims at selecting a subset $\\mathcal{L} \\subset \\mathcal{X}$ to be annotated, where $|\\mathcal{L}| = M$ is the annotation budget. \n", |
|
"\n", |
|
"We discuss our \\votek \\firststep method and other \\firststep baselines below.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\paragraph{\\Votek}\n", |
|
"\\label{sec:sample_selection_method}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"The goal of \\firststep for in-context learning is to select diverse and representative examples; \n", |
|
"representativeness will help many test instances to find similar demonstrations, while diversity increases the total coverage.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"We develop \\votek, a graph-based method that promotes both diversity and representativeness.\n", |
|
"A detailed algorithm can be found in Appendix~\\ref{sec:details-sample-selection}. \n", |
|
"We first compute a vector representation for each \\emph{unlabeled} training instance using Sentence-BERT \\citep{reimers-gurevych-2019-sentence} by averaging the resulting vectors over the text input words.\\footnote{\\url{https://huggingface.co/sentence-transformers/all-mpnet-base-v2}.}\n", |
|
"\n", |
|
"We then use the embedding vectors to create a directed \n", |
|
"\n", |
|
"\n", |
|
"graph $G = (V, E)$ where the vertices $V$ are the unlabeled instances $\\mathcal{X}$ \n", |
|
"\n", |
|
"as defined above.\n", |
|
"For each vertex $v \\in V$, we create an edge to its $k$ nearest vertices in terms of the cosine similarity between the embeddings.\n", |
|
" Now let $\\mathcal{L}$ and $\\mathcal{U}$ denote the sets of already chosen (i.e., labeled) samples and remaining samples, respectively. \n", |
|
" Initially, $\\mathcal{L}=\\emptyset$.\n", |
|
" Every vertex $u \\in \\mathcal{U}$ is scored by a modified degree: \n", |
|
" \\begin{align*}\n", |
|
" \\mathrm{score}(u) = \\sum_{v \\in \\{v | (v, u) \\in E, v \\in \\mathcal{U}\\}} s (v), \\quad \\text{where} \\ s(v) = \\rho ^{- |\\{\\ell \\in \\mathcal{L}| (v, \\ell) \\in E \\}|}, \\quad \\rho > 1\n", |
|
" \\end{align*}\n", |
|
"where $s$ discounts $v$ that is close to the already selected instances, thereby encouraging diversity.\n", |
|
"In every iteration, we take $\\argmax_{u \\in \\mathcal{U}} \\mathrm{score}(u)$ and move it from $\\mathcal{U}$ to $\\mathcal{L}$.\n", |
|
"We run $M/10$ of these iterations; after this process, the current labeled $\\mathcal{L}$ has $M/10$ samples (up to Line~\\ref{line:first} in Algorithm~\\ref{alg:vote-k}).\n", |
|
"Subsequently, we use $\\mathcal{L}$ as the in-context learning examples for large language model, e.g.,GPT-J \\citep{gpt-j}, and generate a prediction for every instance in $\\mathcal{U}$.\n", |
|
"We then compute the average log probability over the generation output as the model's confidence score (Line~\\ref{line:lm-start} to Line~\\ref{line:lm-ends} in Algorithm~\\ref{alg:vote-k}).\n", |
|
"\n", |
|
"\n", |
|
"We then partition $\\mathcal{U}$ into $M$ equal-sized buckets, based on their confidence scores (e.g., if $M=100$, we group the unlabeled instances by percentile).\n", |
|
"We add to $\\mathcal{L}$ the example with the maximum score from each of the first $9M/10$ buckets (discarding the $M/10$ buckets with the most confident examples), resulting in $|\\mathcal{L}|=M$ (Line~\\ref{line:it-start} to Line~\\ref{line:it-ends} in Algorithm~\\ref{alg:vote-k}). This further encourages diversity by selecting instances with varying confidence scores from in-context learning.\n", |
|
"We tuned $k$ and $\\rho$ in our preliminary experiments, and found that $k\\!=\\!150$ and $\\rho\\!=\\!10$ perform well across many datasets.\n", |
|
"We will explore other \\firststep methods from prior work on active learning or coreset selection (\\S\\ref{subsec:sample-selection-methods}) and see that \\votek outperforms these alternative methods.\n", |
|
"\n", |
|
"\n", |
|
"\\paragraph{Random and Other \\FirstStep Methods}\n", |
|
"To quantify the effect of \\firststep, we also provide random and other baselines.\n", |
|
"For randomly-selected annotation, we conduct experiments three times and report the average score.\n", |
|
"We will show that these baselines substantially underperform the \\votek method (\\S\\ref{sec:results}), demonstrating the importance of the \\firststep step to reduce the total annotation cost.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\subsection{Prompt Retrieval}\n", |
|
"\\label{sec:prompt_retrieval}\n", |
|
"Once we have a set of annotated examples $\\mathcal{L}$ from \\firststep, we retrieve a few examples from the annotated set as in-context examples for each test instance.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"Following recent work~\\citep{liu-etal-2022-makes}, we will compute embeddings for all annotated samples using Sentence-BERT and find the most similar examples to each test instance in terms of cosine similarity.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n" |
|
], |
|
"context_after_exp": [ |
|
"\\section{Experiments}\\label{sec:experiments}\n", |
|
"\n", |
|
"We conduct extensive experiments \n", |
|
"\n", |
|
"over 10 diverse datasets, spanning 9 distinct tasks, and show a better approach to few-shot learning than previously considered.\n", |
|
"\n", |
|
"In general, we find that the first step of \\firststep is particularly crucial to reduce the amount of required annotation.\n", |
|
"\n", |
|
"\n", |
|
"\\subsection{Datasets and Tasks}\n", |
|
"\\label{sec:datasets_tasks}\n", |
|
"\n", |
|
"We use 10 diverse NLP datasets across 9 tasks that are listed in Table~\\ref{tab:datasets_model}.\n", |
|
"\n", |
|
"These datasets involve different task formulations, thereby allowing for extensive evaluations in varying scenarios. \n", |
|
"Some of those are included in the widely-used GLUE benchmark \\citep{wang2019glue}.\n", |
|
"Appendix \\ref{sec:datasets_all} illustrates details of the 10 datasets with examples.\n", |
|
"\n", |
|
"For each dataset, we use the standard train/dev./test split available from the Transformers library \\citep{wolf-etal-2020-transformers}. In the \\firststep step, we remove all labels in the training data. \n", |
|
"For the datasets that have test data available publicly, we use the the test data for evaluation (SST-5, XSUM, MWoZ, and DBpedia).\n", |
|
"For the others, we follow prior work (e.g., \\citealp{jiang-etal-2020-smart,Lan2020ALBERT,gao-etal-2021-making}) and use the dev.\\ data for evaluation.\\footnote{The one exception is GeoQuery, where we concatenated the dev.\\ and test data to have reliable evaluations on larger data.}\n", |
|
"We evaluate the methods by accuracy for all classification and multiple-choice selection datasets, joint accuracy \\citep{budzianowski-etal-2018-multiwoz} for MWoZ, test suite accuracy \\citep{zhong-etal-2020-semantic} for GeoQuery, exact matching \\citep{rajpurkar-etal-2016-squad} for NQ, and ROUGE-L \\citep{Lin2004ROUGEAP} for XSum.\n", |
|
"\n", |
|
"\n", |
|
"\\begin{table*}[!h]\n", |
|
"\\begin{adjustbox}{width=0.96\\linewidth}\n", |
|
"\\begin{tabular}{@{}l@{}l@{}c@{}c@{}}\n", |
|
"\\toprule\n", |
|
"& Dataset & Task & In-Context Learning Models \\\\ \n", |
|
"\\midrule[.005em]\n", |
|
"\\multirow{5}*{\\textbf{Classification}} & MRPC \\citep{mrpc} & Paraphrase Detection\n", |
|
"&\n", |
|
"GPT-Neo, GPT-J, GPT-3\n", |
|
"\\\\\n", |
|
"& SST-5 \\citep{socher-etal-2013-recursive} & Sentiment Analysis & GPT-J\n", |
|
"\\\\\n", |
|
"& DBpedia \\citep{dbpedia15} & Topic Classification & \n", |
|
"GPT-J\n", |
|
"\\\\\n", |
|
"& MNLI \\citep{williams-etal-2018-broad} & Natural Language Inference & \n", |
|
"GPT-J\n", |
|
"\\\\\n", |
|
"& RTE \\citep{rte5} & \\ \\ Natural Language Inference \\ \\ & \n", |
|
"GPT-J\n", |
|
"\\\\\n", |
|
"\\midrule[.005em]\n", |
|
"\\multirow{1}*{\\textbf{Multiple-Choice}} \\ \\ & HellaSwag \\citep{zellers-etal-2019-hellaswag} & Commonsense \\par Reasoning &\n", |
|
"OPT, GPT-Neo, GPT-J, GPT-3\n", |
|
"\\\\\n", |
|
"\\midrule[.005em]\n", |
|
"\\multirow{1}*{\\textbf{Dialogue}} & MWoZ 2.4 \\citep{budzianowski-etal-2018-multiwoz} & Dialogue State Tracking & \n", |
|
"Codex-\\{cushman, davinci-002\\}\n", |
|
"\\\\\n", |
|
"\\midrule[.005em]\n", |
|
"\\multirow{3}*{\\textbf{Generation}} & GeoQuery \\citep{geoquery96} & Semantic Parsing &\n", |
|
"Codex-davinci-002\n", |
|
"\\\\\n", |
|
"& NQ \\citep{kwiatkowski2019natural} & Open-Domain QA & \n", |
|
"Codex-davinci-002\n", |
|
"\\\\\n", |
|
"& XSUM \\citep{xsum2018} & Summarization & \n", |
|
"GPT-J\n", |
|
"\\\\\n", |
|
"\\bottomrule\n", |
|
"\\end{tabular}\n", |
|
"\\end{adjustbox}\n", |
|
"\\caption{\n", |
|
"All the 10 datasets and the in-context learning models used in our experiments. GPT-J and Codex-davinci-002 are used by default.\n", |
|
"Other in-context learning models are explored in analysis. \n", |
|
"\n", |
|
"}\n", |
|
"\\label{tab:datasets_model}\n", |
|
"\\end{table*}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\paragraph{Measuring Stability}\n", |
|
"Given a set of unlabeled data, our \\votek \\firststep algorithm is \\emph{deterministic}, without any randomness.\n", |
|
"However, we note that in real scenarios, even getting \\textit{unlabeled} samples is not trivial, and getting unlabeled samples can be a process with large variance. To simulate this real setting, we perform \\firststep from 3K instances that are randomly subsampled from the original training data for each task.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"For each experiment, we repeat this subsampling three times, and results are averaged over the three trials. \n", |
|
"We will find that \\votek still substantially improves stability over alternative \\firststep methods.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\subsection{In-Context Learning Models}\n", |
|
"We mainly perform experiments using GPT-J with 6B parameters \\citep{gpt-j} due to our computational budget.\n", |
|
"The exceptions are the MWoZ, GeoQuery, and NQ datasets, where we use Codex-davinci-002 \\citep{codex},\\footnote{The parameter size of Codex is not officially confirmed, but it is likely to be 175B.} a variant of GPT-3 finetuned on code data from the web.\n", |
|
"Codex is particularly effective for structured prediction such as semantic parsing, and we found it is indeed effective on three datasets (MWoZ, GeoQuery, and NQ) in our preliminary experiments.\n", |
|
"We will explore the effectiveness of selective annotation on the largest publically available language models, OPT-175B~\\citep{Zhang2022OPTOP} for HellaSwag (Fig.~\\ref{fig:hellaswag_opt}) and Codex-davinci-002 for MWoZ, over varying annotation budgets.\n", |
|
"We will also explore other language models with different sizes for three representative tasks (HellaSwag, MWoZ, and SST-5) in \\S\\ref{sec:lm_sizes}: GPT-3 with 175B \\citep{gpt3} and GPT-Neo with 2.7B parameters \\citep{gpt-neo}.\n", |
|
"\n", |
|
"Our later experiments will show the same patterns among \\firststep methods over these different language models.\n", |
|
"For the classification and multiple-choice tasks, we compute the average log score for each choice and choose the maximum one.\n", |
|
"For generation tasks, we simply perform beam-search decoding.\n", |
|
"\n", |
|
"\n", |
|
"See Appendix \\ref{sec:prompt_templates} for our in-context learning prompt templates for all 10 datasets.\n", |
|
"For every test instance, we feed as much retrieved samples as possible into the language model until the maximum token length is reached. \n", |
|
"\n", |
|
"On average, the number of samples $N$ fed into the language model is 13.4 across different experiments.\n", |
|
"The in-context examples are concatenated in the ascending order of the similarity so that more similar examples benefit from the recency bias~\\citep{lu-etal-2022-fantastically}. \n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\subsection{Main Results}\n", |
|
"\\label{sec:results}\n", |
|
"\n", |
|
"\\begin{table}[h!]\n", |
|
"\\addtolength{\\tabcolsep}{-4.7pt}\n", |
|
"\\centering\n", |
|
"\\small\n", |
|
"\\begin{tabular}{@{} cc m{0.001em} ccccc m{-0.05mm} c m{-0.05em} c m{0.001em} ccc @{}}\n", |
|
"\\toprule[.1em]\n", |
|
"\n", |
|
" \\multicolumn{2}{c}{\\textbf{Method}}\n", |
|
"&& \\multicolumn{5}{c}{\\textbf{Classification}}\n", |
|
"&& \\multicolumn{1}{c}{\\textbf{Multi-Choice}}\n", |
|
"&& \\multicolumn{1}{c}{\\textbf{Dialogue}}\n", |
|
"&& \\multicolumn{3}{c}{\\textbf{Generation}}\n", |
|
"\\\\\n", |
|
"\\cmidrule(lr){1-2}\n", |
|
"\\cmidrule(lr){4-8}\n", |
|
"\\cmidrule(lr){9-10}\n", |
|
"\\cmidrule(lr){12-13}\n", |
|
"\\cmidrule(lr){14-16}\n", |
|
"\n", |
|
"$|\\mathcal{L}|$\n", |
|
"&Selection\n", |
|
"\n", |
|
"&& MRPC & SST-5 & MNLI & DBpedia & RTE\n", |
|
"&& HSwag\n", |
|
"&& MWoZ \n", |
|
"&& GeoQ & NQ & XSum\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\midrule[.1em]\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"100\n", |
|
"&Random\n", |
|
"\n", |
|
"&& 63.5\n", |
|
"& 44.2\n", |
|
"& 37.4\n", |
|
"& 89.8\n", |
|
"& 51.5\n", |
|
"&& 65.2\n", |
|
"&& 47.2\n", |
|
"&& 78.6\n", |
|
"& 30.8\n", |
|
"& 15.3\n", |
|
"\\\\\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"100\n", |
|
"&\\Votek\n", |
|
"\n", |
|
"&& \\textbf{70.7}\n", |
|
"& \\textbf{53.0}\n", |
|
"& \\textbf{47.3}\n", |
|
"& \\textbf{93.4}\n", |
|
"& \\textbf{55.5}\n", |
|
"&& \\textbf{70.7}\n", |
|
"&& \\textbf{51.4}\n", |
|
"&& \\textbf{82.8}\n", |
|
"& \\textbf{33.6}\n", |
|
"& \\textbf{17.2}\n", |
|
"\\\\\n", |
|
"\n", |
|
"100\n", |
|
"\n", |
|
"& $\\Delta$ Absolute gain\n", |
|
"&& \\textblue{+7.2}\n", |
|
"& \\textblue{+8.8}\n", |
|
"& \\textblue{+9.9}\n", |
|
"& \\textblue{+3.6}\n", |
|
"& \\textblue{+4.0}\n", |
|
"&& \\textblue{+5.5}\n", |
|
"&& \\textblue{+4.2}\n", |
|
"&& \\textblue{+4.2}\n", |
|
"& \\textblue{+2.8}\n", |
|
"& \\textblue{+1.9}\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\midrule[.05em]\n", |
|
"\n", |
|
"18\n", |
|
"&Random\n", |
|
"\n", |
|
"&& 59.6\n", |
|
"& 39.8\n", |
|
"& 36.7\n", |
|
"& 77.6\n", |
|
"& 50.4\n", |
|
"&& 62.5\n", |
|
"&& 33.6\n", |
|
"&& 62.4\n", |
|
"& 29.8\n", |
|
"& 13.6\n", |
|
"\\\\\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"18\n", |
|
"&\\Votek\n", |
|
"\n", |
|
"&& \\textbf{64.2}\n", |
|
"& \\textbf{47.6}\n", |
|
"& \\textbf{41.0}\n", |
|
"& \\textbf{87.1}\n", |
|
"& \\textbf{54.3}\n", |
|
"&& \\textbf{67.4}\n", |
|
"&& \\textbf{42.8}\n", |
|
"&& \\textbf{72.5}\n", |
|
"& \\textbf{32.3}\n", |
|
"& \\textbf{15.2}\n", |
|
"\\\\\n", |
|
"\n", |
|
"18\n", |
|
"\n", |
|
"& $\\Delta$ Absolute gain\n", |
|
"&& \\textblue{+4.8}\n", |
|
"& \\textblue{+7.8}\n", |
|
"& \\textblue{+4.3}\n", |
|
"& \\textblue{+9.5}\n", |
|
"& \\textblue{+3.9}\n", |
|
"&& \\textblue{+4.9}\n", |
|
"&& \\textblue{+8.8}\n", |
|
"&& \\textblue{+9.9}\n", |
|
"& \\textblue{+2.5}\n", |
|
"& \\textblue{+1.6}\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\bottomrule[.1em]\n", |
|
"\\end{tabular}\n", |
|
"\\caption{In-context learning results with randomly-selected and \\votek \\firststep methods on all 10 datasets, with an annotation budget of 100 or 18. \n", |
|
"There is no prompt retrieval step when only 18 samples are annotated since all annotated samples can fit into the in-context learning model's input. \n", |
|
"\n", |
|
"Across the board, \\firststep with \\votek substantially outperforms the randomly-selected annotation baseline for in-context learning.\n", |
|
"\n", |
|
"Further, \\votek largely reduces the variance over three trials (see the min and max results in Appendix~\\ref{app:main-results}), making in-context learning more stable. \n", |
|
"\n", |
|
"\n", |
|
"}\n", |
|
"\n", |
|
"\\label{tab:main_results}\n", |
|
"\\end{table}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"Seen in Table \\ref{tab:main_results} are our results from all 10 diverse datasets with the annotation budgets of $|\\mathcal{L}| \\in \\{18, 100\\}$.\n", |
|
"18 is chosen so that all annotated examples can be fit to the prompt for the language models without prompt retrieval.\n", |
|
"Over all datasets, \\votek \\firststep outperforms the random baseline by a large margin (5.2\\\n", |
|
"Even when only 18 examples are annotated and fixed as the in-context examples for all testing instances (no prompt retrieval step), in-context learning with \\votek still improves the randomly-selected annotation baseline (5.8\\\n", |
|
"\n", |
|
"Particularly noteworthy is that in-context learning with 18 examples selected by \\votek achieves higher performance than the one with 100 randomly selected examples on 6 out of 10 tasks.\n", |
|
"\n", |
|
"Moreover, \\votek is a deterministic \\firststep method, conditioned on a set of unlabeled samples. Therefore, the variance of \\votek comes solely from how the unlabeled samples are collected, largely improving the robustness of in-context learning. \n", |
|
"\n", |
|
"We therefore recommend that researchers and practitioners use \\firststep (e.g., our \\votek method) to better benefit from the few-shot learning capability of large language models with stability.\n", |
|
"Our later experiments will also illustrate that \\votek consistently outperforms alternative \\firststep methods (\\S\\ref{subsec:sample-selection-methods}).\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\section{Analysis}\\label{sec:analysis}\n", |
|
"Our extensive experiments demonstrated that \\firststep is important for the success of in-context learning. \n", |
|
"\n", |
|
"Here we conduct detailed analysis to provide further guidance for researchers and practitioners of few-shot in-context learning.\n", |
|
"\n", |
|
"\n", |
|
"We analyze \\firststep for in-context learning from a variety of perspectives: comparisons to finetuning methods (\\S\\ref{sec:in-context_finetuning}), varying language model sizes (\\S\\ref{sec:lm_sizes}), test data domain shifts (\\S\\ref{sec:domain_shift}), prompt retrieval methods (\\S\\ref{sec:random_retrieval}), and alternative \\firststep methods (\\S\\ref{subsec:sample-selection-methods}). \n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\subsection{In-Context Learning vs.\\ Finetuning}\n", |
|
"\\label{sec:in-context_finetuning}\n", |
|
"\n", |
|
"\\begin{figure}[h!]\n", |
|
"\\centering\n", |
|
" \\includegraphics[width=\\textwidth]{images/main_icl_vs_finetune.pdf}\n", |
|
"\\caption{Comparisons between the in-context learning and finetuning paradigms over varying annotation budgets on three representative tasks: HellaSwag commonsene reasoning, MRPC paraphrase detection, and MWoZ dialogue state tracking.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"Four configurations are presented: finetuning with examples that are randomly selected to annotate (FT-random) or selected by our \\votek \\firststep method (\\S\\ref{sec:sample_selection_method}; FT-vote-\\textit{k}) and in-context learning with randomly-selected annotation (ICL-random) or \\votek selection (ICL-vote-\\textit{k}).\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"See \\S\\ref{sec:in-context_finetuning} for experimental details.\n", |
|
"\n", |
|
"Selective annotation largely improves the in-context learning performance compared to randomly-selected annotation even when the annotation budget is 18.\n", |
|
"In-context learning with wisely-selected labeled samples is a much better few-shot practice than a strong finetuning method.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"}\n", |
|
"\\label{fig:icl-vs-ft}\n", |
|
"\\end{figure}\n", |
|
"\n", |
|
"As discussed earlier, in-context learning is an alternative learning paradigm to conventional finetuning.\n", |
|
"Through the lens of our two-step framework, we observed that \\firststep and prompt retrieval are key to the success of in-context learning.\n", |
|
"A new question now arises: how does in-context learning compare with finetuning under limited annotation budgets? \n", |
|
"We empirically compare the two paradigms in this section. \n", |
|
"\n", |
|
"\n", |
|
"We experiment with three representative tasks: MRPC (classification), HellaSwag (multiple-choice), and MWoZ (dialogue).\n", |
|
"Strong, state-of-the-art pretrained models are used for finetuning: large-sized RoBERTa \\citep{Liu2019RoBERTaAR} for MRPC and HellaSwag and DS2-T5 \\citep{Shin2022DialogueSA} for MWoZ.\n", |
|
"In-context learning uses GPT-J for MRPC, GPT-J and OPT 175B (Fig~\\ref{fig:hellaswag_opt}) for HellaSwag, and Codex-davinci-002 for MWoZ.\n", |
|
"Note that we do not aim to conduct head-to-head comparisons with exactly the same pretrained model; finetuning a large left-to-right language model (e.g., GPT-J and GPT-3) is computationally (and thus financially) infeasible in many cases.\n", |
|
"Here we examine the two paradigms from the practical perspective and benefit from the advantage of in-context learning, which requires no parameter updates of massive language models.\n", |
|
"\n", |
|
"Fig.\\ \\ref{fig:icl-vs-ft} compares the two paradigms across varying annotation sizes ($\\{18, 100, 300, 800\\}$).\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"Over all three tasks, we observe that in-context learning with \\votek selection outperforms the finetuning performance of state-of-the-art pretrained language models. \n", |
|
"Specifically, we find that to achieve similar performance to \\votek with $|\\mathcal{L}| = $ 18 or 100, finetuning requires 1000 annotated examples for HellaSwag and 800 for MWoZ (\\textbf{10-100$\\times$ annotation cost}).\n", |
|
"Note that the in-context learning performance usually converges when 100 or 300 examples are carefully selected and annotated, suggesting that a large annotated dataset is unnecessary for in-context learning to achieve strong performance.\n", |
|
"Interestingly, \\firststep helps in-context learning, but \\emph{not} finetuning.\n", |
|
"This result is consistent with recent work showing that many active learning algorithms perform similarly to random baseline, when pretrained language models are finetuned \\citep{karamcheti-etal-2021-mind,darcy2022limitations}.\n", |
|
"They proposed that it might be due to outliers and the instability of finetuning on a limited number of annotated samples. \n", |
|
"\n", |
|
"We hypothesize that in-context learning \\textit{with similarity-based prompt retrieval} is more robust to outliers and small annotation sizes because only the most similar examples are retrieved for each test instance. \n", |
|
"We find two pieces of evidence for this hypothesis. First, \\S~\\ref{sec:random_retrieval} shows that \\textit{random} (as opposed to similarity-based) prompt retrieval does not benefit from \\votek \\firststep. Second, in Appendix~\\ref{app:remove_outlier_FT}, we show that explicitly removing outliers also helps finetuning to benefit from \\votek. \n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\subsection{Language Models with Various Sizes}\n", |
|
"\\label{sec:lm_sizes}\n", |
|
"\\begin{figure}[h!]\n", |
|
"\\centering\n", |
|
" \\includegraphics[width=0.98\\textwidth]{images/model_size.pdf}\n", |
|
"\\caption{\n", |
|
"Comparisons of various models with 100 annotated examples. \n", |
|
"\\Votek \\firststep consistently improves in-context learning with pretrained language models of varying sizes. \n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"}\n", |
|
"\\label{fig:model_size}\n", |
|
"\\end{figure}\n", |
|
"\n", |
|
"\n", |
|
"Fig.\\ \\ref{fig:model_size} shows performance with varying sizes of language models (GPT-Neo 2B, \\citealp{gpt-neo}; GPT-J 6B, \\citealp{gpt-j}; GPT-3, \\citealp{gpt3}) on \n", |
|
"HellaSwag commonsense reasoning, SST-5 sentiment analysis, and MWoZ dialogue state tracking. \n", |
|
"\n", |
|
"In general, when a smaller model is used, the performance gap between random and \\votek selection is larger.\n", |
|
"In the HellaSwag task, \\votek outperforms randomly-selected annotation by 7.5\\\n", |
|
"Nonetheless, we see consistent performance gains from \\votek selection over varying sizes.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\subsection{Effects of Domain Shift}\n", |
|
"\\label{sec:domain_shift}\n", |
|
"Recent work observed that when a large, pretrained language model is finetuned, the performance gain from active learning is limited \\citep{darcy2022limitations}, but it can be larger if there is a domain shift between training and evaluation \\citep{tamkin22}.\n", |
|
"We have demonstrated that \\firststep consistently improves in-context learning, but here we explore cases of domain shifts.\n", |
|
"\n", |
|
"\\begin{table}[h!] \n", |
|
"\\centering\n", |
|
"\n", |
|
"\\begin{tabular}{@{} cc m{0.001em} cc m{0.001em} cc @{}}\n", |
|
"\\toprule[.1em]\n", |
|
"\n", |
|
" \\multicolumn{2}{c}{\\textbf{Method}}\n", |
|
"&& \\multicolumn{2}{c}{\\textbf{CivilComments}}\n", |
|
"&& \\multicolumn{2}{c}{\\textbf{Amazon}}\n", |
|
"\\\\\n", |
|
"\\cmidrule(r){1-2}\n", |
|
"\\cmidrule(lr){4-5}\n", |
|
"\\cmidrule(l){7-8}\n", |
|
"\n", |
|
"$|\\mathcal{L}|$ & Selection\n", |
|
"&& Random & Domain\n", |
|
"&& Random & Domain\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\midrule[.1em]\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"100 & Random\n", |
|
"&& 73.8\n", |
|
"& 66.8\n", |
|
"&& 50.3\n", |
|
"& 30.7\n", |
|
"\\\\\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"100 &\n", |
|
"\\Votek\n", |
|
"&& \\textbf{79.3}\n", |
|
"& \\textbf{76.7}\n", |
|
"&& \\textbf{56.3}\n", |
|
"& \\textbf{39.0}\n", |
|
"\\\\\n", |
|
"\n", |
|
"100 & $\\Delta$ Absolute gain\n", |
|
"&& \\textblue{+5.5}\n", |
|
"& \\textblue{+9.9}\n", |
|
"&& \\textblue{+6.0}\n", |
|
"& \\textblue{+8.3}\n", |
|
"\\\\\n", |
|
"\n", |
|
"\n", |
|
"\\bottomrule[.1em]\n", |
|
"\\end{tabular}\n", |
|
"\\caption{Effects of domain shift. Random splits and domain splits are compared \\citep{wilds}.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"}\n", |
|
"\n", |
|
"\\label{tab:domain_shift}\n", |
|
"\\end{table}\n", |
|
"\n", |
|
"\n", |
|
"Following \\citet{tamkin22}, we use two natural language datasets from the WILDS benchmark \\citep{wilds}: \\textbf{CivilComments} (toxicity classification; \\citealp{civilcomments}) and \\textbf{Amazon} (review classification; \\citealp{ni-etal-2019-justifying}).\n", |
|
"Each comes with both a random split and a domain split: the former splits data randomly and the latter is based on the domains (demographic identities for CivilComments and users for Amazon), simulating cases where a model is deployed in a new scenario unseen during annotations.\n", |
|
"Similar to \\S\\ref{sec:results}, we conduct experiments with GPT-J under two settings: random/\\votek \\firststep, followed by similarity-based prompt retrieval. Both \\firststep and prompt retrieval are conducted on the source domain. \n", |
|
"\n", |
|
"Tab.~\\ref{tab:domain_shift} shows our results. \n", |
|
"We see that the gain from \\votek is more pronounced under the domain splits: e.g., \\textblue{9.9} vs.\\ \\textblue{5.5} accuracy point improvements on CivilComments. \n", |
|
"This suggests that \\firststep and prompt retrieval are particularly crucial when there is a domain shift in the evaluation data, as in many realistic scenarios \\citep{wilds,longpre2022active}.\n", |
|
"\n", |
|
"\\subsection{Random Prompt Retrieval}\n", |
|
"\\label{sec:random_retrieval}\n", |
|
"\n", |
|
"\\begin{table}[!th] \n", |
|
"\\centering\n", |
|
"\n", |
|
"\n", |
|
"\\begin{tabular}{@{} ccc m{0.001em} ccc }\n", |
|
"\\toprule[.1em]\n", |
|
"\n", |
|
" \\multicolumn{3}{c}{\\textbf{Method}}\n", |
|
"&& \n", |
|
" \\multicolumn{3}{c}{\\textbf{Dataset}}\n", |
|
"\\\\\n", |
|
"\\cmidrule(lr){1-3}\n", |
|
"\\cmidrule(lr){5-7}\n", |
|
"\n", |
|
"$|\\mathcal{L}|$ & Selection\n", |
|
"& Retrieval\n", |
|
"&& HellaSwag\n", |
|
"& SST-5 \n", |
|
"& MWoZ\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\midrule[.1em]\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"100 & \\Votek\n", |
|
"& Similar\n", |
|
"&& \\textbf{70.7}\n", |
|
"& \\textbf{53.0}\n", |
|
"& \\textbf{51.4} \n", |
|
"\\\\\n", |
|
"\n", |
|
"100 & Random\n", |
|
"& Similar \n", |
|
"&& 65.2\n", |
|
"& 44.2\n", |
|
"& 47.2\n", |
|
"\\\\\n", |
|
"\n", |
|
"100 & \\Votek\n", |
|
"& Random\n", |
|
"&& 62.5\n", |
|
"& 41.6\n", |
|
"& 35.6\n", |
|
"\\\\\n", |
|
"\n", |
|
"100 & Random\n", |
|
"& Random\n", |
|
"&& 63.2\n", |
|
"& 40.6\n", |
|
"& 43.8\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\bottomrule[.1em]\n", |
|
"\\end{tabular}\n", |
|
"\\caption{Comparison of random and similar prompt retrieval. \n", |
|
"Random retrieval fails to benefit from diverse and representative annotated examples from \\votek. \n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"}\n", |
|
"\\label{tab:random_retrieval}\n", |
|
"\\end{table}\n", |
|
"\n", |
|
"We have performed similarity-based prompt retrieval so far.\n", |
|
"Here we experiment with a random baseline for the prompt retrieval step to quantify the effect of prompt retrieval (Tab.~\\ref{tab:random_retrieval}).\n", |
|
"Interestingly, when random prompt retrieval is performed, \\votek does not necessarily improve upon the randomly-selected annotation baseline: e.g., 62.5 vs.\\ 63.2 on HellaSwag and 35.7 vs.\\ 43.8 on MWoZ.\n", |
|
"This suggests that random prompt retrieval fails to benefit from diverse, representative 100 samples, selected by \\votek \\firststep. \n", |
|
"\n", |
|
"Combining \\firststep and prompt retrieval is thus crucial for the success of in-context learning.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\subsection{Alternative \\FirstStep Methods}\n", |
|
"\n", |
|
"\n", |
|
"\\begin{table*}[h!]\n", |
|
"\\centering\n", |
|
"\n", |
|
"\\begin{tabular}{@{}ccccccc@{}}\n", |
|
"\\toprule\n", |
|
" & Random & MFL & Diversity & Least-confidence & Fast \\votek & \\Votek \\\\\n", |
|
"\\midrule\n", |
|
"HellaSwag & 65.2\n", |
|
"& 66.5\n", |
|
"& 68.2\n", |
|
"& 68.4\n", |
|
"& 69.5\n", |
|
"& \\textbf{70.7}\n", |
|
"\\\\\n", |
|
"SST-5 & 44.2\n", |
|
"& 45.6\n", |
|
"& 48.5\n", |
|
"& 46.2\n", |
|
"& 51.9\n", |
|
"& \\textbf{53.0}\n", |
|
"\\\\\n", |
|
"MWoZ & 47.2\n", |
|
"& 48.3\n", |
|
"& 49.2\n", |
|
"& 49.4\n", |
|
"& 50.2\n", |
|
"& \\textbf{51.4}\n", |
|
"\\\\\n", |
|
"\\bottomrule\n", |
|
"\\end{tabular}\n", |
|
"\\caption{\n", |
|
"\\label{all_sample_selection_method}\n", |
|
"Comparisons of various selective annotation methods with 100 annotated examples. Performance is averaged over three random trials.\n", |
|
"\n", |
|
"\\Votek outperforms all the other \\firststep methods. Fast \\votek, a faster version of voke-k without the need for confidence score computations, can achieve similar performance to \\votek while being more computationally efficient.\n", |
|
"\n", |
|
"}\n", |
|
"\\end{table*}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\label{subsec:sample-selection-methods}\n", |
|
"Here we explore four additional methods for \\firststep: \n", |
|
"\\begin{compactitem}\n", |
|
"\\item \\textbf{Maximizing facility location} (MFL; \\citealp{Lin2009HowTS}) aims at optimizing the representativeness of the selected samples. Since this objective satisfies the submodular objective, maximization can be approximated via a greedy algorithm (see Appendix \\ref{sec:Submodularity-Based Sample Selection}).\n", |
|
"\\item \\textbf{Diversity} focuses on maximizing the diversity of the embeddings for selected examples in the first step (Appendix \\ref{sec:Embedding Diversity}).\n", |
|
"\\item \\textbf{Least-confidence} \\citep{lewis1994sequential} iteratively adds least-confident examples to the annotated set.\n", |
|
"\\item \\textbf{Fast \\votek} is a fast, efficient alternative to our \\votek method (\\S\\ref{sec:sample_selection_method}) that does not use confidence scores.\n", |
|
"It picks $M$ samples with the largest \\votek scores.\n", |
|
"It avoids using the pretrained language model to compute a confidence score for every instance, resulting in a 10+ times speedup.\n", |
|
"\n", |
|
"\\end{compactitem}\n", |
|
"\n", |
|
"\n", |
|
"Notice that MFL, diversity, and least-confidence do not have hyperparameters other than the annotation budget. \n", |
|
"As shown in Tab.\\ \\ref{all_sample_selection_method}, \\votek outperforms all the other methods.\n", |
|
"\n", |
|
"\n", |
|
"It is noteworthy, however, that fast \\votek can achieve similar performance to \\votek. \n", |
|
"Fast \\votek is thus an attractive method for researchers and practitioners with a limited computational budget.\n", |
|
"\n", |
|
"Like \\votek, MFL also optimizes representativeness and Diversity also optimizes diversity. In particular, MFL defines representativeness as a sum over distances from the selected examples to all other examples, and Diversity defines diversity as the distances between selected examples. Since they do not significantly outperform randomly-selected annotation, we conjecture that jointly optimize diversity and representativeness is needed for \\firststep. \n", |
|
"Moreover, the way \\votek defines and diversity are also different from the baselines: \\votek defines representativeness as the number of neighbors during similarity-based prompt retrieval, which is effectively tailored to in-context learning; \\votek directly optimizes for the diversity of selected samples using the in-context learning model's prediction confidence. \n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\section{Related Work}\\label{sec:related}\n", |
|
"\\paragraph{In-Context Learning} \n", |
|
"In-context learning with large language models has recently received an increasing amount of interest, partly due to its flexibility and sample efficiency \\citep{prompt_survey}.\n", |
|
"Several recent works proposed methods to improve in-context learning in many aspects: e.g., meta-training \\citep{chen-etal-2022-meta,min-etal-2022-metaicl}, task instructions \\citep{efrat20,mishra-etal-2022-cross,Wei2021FinetunedLM,sahn22}, or task formulation \\citep{holtzman-etal-2021-surface,calibrate,min-etal-2022-noisy}.\n", |
|
"In this paradigm, the choice of in-context (i.e., demonstration) examples has been shown crucial \\citep{liu-etal-2022-makes,rubin2022,lu-etal-2022-fantastically}, while recent work raised questions as to the degree to which correct labels are necessary \\citep{min2022rethinking}.\n", |
|
"This work proposes an annotation-efficient in-context learning framework by focusing on the choice of examples and its implications on the annotation cost.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\paragraph{Active Learning}\n", |
|
"\n", |
|
"Active learning aims to enable machine learning models to achieve similar or greater performance with fewer labeled training instances~\\citep{CohnAL94,settles.tr09}.\n", |
|
"Our \\firststep step for in-context learning shares the same goal of reducing the annotation cost.\n", |
|
"Most active learning methods involve iterative parameter updates (e.g., \\citealp{active_image_recog,kasai-etal-2019-low}), which are computationally expensive for large language models used in in-context learning.\n", |
|
"\n", |
|
"Similar to our \\votek algorithm, \\citet{Lin2009HowTS} used the facility location \n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"objective to optimize representativeness.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"We observed that this objective largely underperforms \\votek for in-context learning, probably due to the fact the \\votek (1) is effectively tailored to the prompt retrieval step of in-context learning and (2) directly optimizes the diversity of selected samples (see \\S\\ref{subsec:sample-selection-methods}). \n", |
|
"More recently, the effectiveness of active learning has been questioned when large-scale pretrained models are finetuned for various tasks \\citep{karamcheti-etal-2021-mind,darcy2022limitations}.\n", |
|
"Our experiments (\\S\\ref{sec:experiments}) showed that \\firststep helps reduce the annotation cost of in-context learning, departing from the recent observations on finetuning with active learning.\n", |
|
"We hypothesize that it is because in-context learning with similarity-based prompt retrieval is more robust to outliers since each test instance only retrieves its most similar examples. \n", |
|
"This is supported by \\S~\\ref{sec:random_retrieval}, where \\textit{random} prompt retrieval does not benefit from \\firststep.\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\section{Conclusion}\\label{sec:conclusion}\n", |
|
"Much recent work illustrated the ability of large language models to adapt to new tasks simply from a few demonstration examples.\n", |
|
"We presented in-depth studies on the implications of this ability for dataset annotation through the lens of \\firststep and introduced an annotation-efficient practice. The best \\firststep method explored in this paper, our \\votek method, selects diverse, representative examples to annotate. \n", |
|
"In terms of the task performance, \\votek improves the performance on 10 diverse tasks by a large margin.\n", |
|
"\n", |
|
"Moreover, \\votek \\firststep yields similar performance to state-of-the-art supervised finetuning with 10-100$\\times$ less annotation cost. \n", |
|
"We further show that the effectiveness of \\votek is consistent with different language model sizes and domain shifts between training and test data.\n", |
|
"We hope that our findings will help researchers and practitioners efficiently design new natural language tasks and beyond.\n", |
|
"\\section*{Acknowledgements}\n", |
|
"We thank Sewon Min, Pradeep Dasigi, Yanda Chen, Yushi Hu, Alisa Liu, and the ARK group at UW for their helpful feedback on this work.\n", |
|
"\n", |
|
"\n", |
|
"\\bibliography{custom}\n", |
|
"\\bibliographystyle{iclr2022_conference}\n", |
|
"\n", |
|
"\n", |
|
"\\clearpage\n", |
|
"\\appendix\n", |
|
"\\begin{appendices}\n", |
|
"\\section{Datasets and Tasks}\n", |
|
"\\label{sec:datasets_all}\n", |
|
"\n", |
|
"\n", |
|
"\\begin{table*}\n", |
|
"\\small\n", |
|
"\\begin{tabular}{C{1.5cm}|C{2.5cm}|L{8.5cm}}\n", |
|
"\\toprule\n", |
|
"Dataset & Task & Examples \\\\ \n", |
|
"\\midrule[.005em]\n", |
|
"HellaSwag & Commonsense \\par Reasoning &\n", |
|
"\\textcolor{gray}{A woman is outside with a bucket and a dog. The dog is running\n", |
|
"around trying to avoid a bath. She\u2026} \\par\n", |
|
"\\phantom{\\cmark}A) rinses the bucket off with soap and blow dry the dog\u2019s head.\\par\n", |
|
"\\phantom{\\cmark}B) uses a hose to keep it from getting soapy.\\par\n", |
|
"\\cmark C) gets the dog wet, then it runs away again.\\par\n", |
|
"\\phantom{\\cmark}D) gets into a bath tub with the dog.\n", |
|
"\\\\\n", |
|
"\\midrule[.005em]\n", |
|
"MRPC & Paraphrase Detection\n", |
|
"&\n", |
|
"\\textcolor{gray}{Sales rose 37 per cent year-on-year to 1.76bn, beating expectations.\n", |
|
"Sales for the quarter beat expectations, rising 37 percent year-on-year to 1.76 billion euros.}\\par\n", |
|
"$\\rightarrow$\\ \\cmark \\ Paraphrase\n", |
|
"\\\\\n", |
|
"\\midrule[.005em]\n", |
|
"SST & Sentiment Analysis & \\textcolor{gray}{A warm, funny, engaging film.$\\rightarrow$}Positive \\par\n", |
|
"\\textcolor{gray}{Suffers from the lack of a compelling narrative.$\\rightarrow$}Negative\n", |
|
" \\\\\n", |
|
"\\midrule[.005em]\n", |
|
"MWoZ 2.4 & Dialogue State Tracking & \\textcolor{gray}{I am looking for ALexender b\\&b}\\par\n", |
|
"\\textcolor{gray}{Dialogue state:} alexander bed and breakfast \n", |
|
"\\\\\n", |
|
"\\midrule[.005em]\n", |
|
"GeoQuery & Semantic Parsing & \\textcolor{gray}{What is the area of California?}\n", |
|
"\\begin{lstlisting}[\n", |
|
" language=SQL,\n", |
|
" aboveskip=-0.1 \\baselineskip,\n", |
|
" belowskip=-2.2 \\baselineskip,\n", |
|
" ]\n", |
|
"SELECT state.area FROM state WHERE state.state_name='california'\n", |
|
"\\end{lstlisting}\n", |
|
"\\vspace{-3cm}\n", |
|
"\\\\\n", |
|
"\\midrule[.005em]\n", |
|
"DBpedia & Topic Classification & \\textcolor{gray}{The keeled box turtle (Cuora mouhotii syn. Pyxidea mouhotii) is a species of turtle in the family Geoemydidae. It is native to Asia where it occurs in China India Laos Burma Vietnam Thailand and Bhutan. Other common names include keel-backed terrapin and jagged-shelled turtle.}\\par\n", |
|
"\\textcolor{gray}{Topic:} animal \n", |
|
"\\\\\n", |
|
"\\midrule[.005em]\n", |
|
"MNLI & Natural Language Inference & \\textcolor{gray}{The F/A-18-E/F program eliminated over 40 percent of the parts used to build predecessor aircraft to make the design more robust for manufacturing and identified critical manufacturing processes, bringing them under control before the start of production.\n", |
|
"The new design with robustness also increased the safety of machines.}\\par\n", |
|
"$\\rightarrow$\\ \\cmark \\ neutral\n", |
|
"\\\\\n", |
|
"\\midrule[.005em]\n", |
|
"RTE & Natural Language Inference & \\textcolor{gray}{Judie Vivian, chief executive at ProMedica, a medical service company that helps sustain the 2-year-old Vietnam Heart Institute in Ho Chi Minh City (formerly Saigon), said that so far about 1,500 children have received treatment. The previous name of Ho Chi Minh City was Saigon.}\\par\n", |
|
"$\\rightarrow$\\ \\cmark \\ entailment\n", |
|
"\\\\\n", |
|
"\\midrule[.005em]\n", |
|
"Natural Questions & Open-Domain QA & \\textcolor{gray}{when was music first played on the radio}\\par\n", |
|
"$\\rightarrow$\\ \\cmark \\ 1917\n", |
|
"\\\\\n", |
|
"\\midrule[.005em]\n", |
|
"XSUM & Summarization & \\textcolor{gray}{Bliss said there was a shortage of neonatal nurses and doctors, and safety standards were not being met.\n", |
|
"......\n", |
|
"Dr Jenny Calvert, of the Wales Neonatal Network, said they are working to further develop medical training in neonatology to help recruit more trainee doctors.}\n", |
|
"\\textcolor{gray}{Summary:} Neonatal services across Wales are overstretched and under pressure with the safety of vulnerable babies at risk, according to a charity.\n", |
|
"\\\\\n", |
|
"\\bottomrule\n", |
|
"\\end{tabular}\n", |
|
"\\caption{\n", |
|
"All of the 10 datasets with examples used in our experiments.\n", |
|
"The 10 datasets span various formations, including classification (SST-5, \\citealp{socher-etal-2013-recursive}; MRPC, \\citealp{mrpc}), multiple-choice selection (HellaSwag, \\citealp{zellers-etal-2019-hellaswag}), and code/text generation (MWoZ 2.4, \\citealp{budzianowski-etal-2018-multiwoz}; GeoQuery, \\citealp{geoquery96}; NQ, \\citealp{kwiatkowski2019natural}).}\n", |
|
"\\label{tab:datasets_all}\n", |
|
"\\end{table*}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\clearpage\n", |
|
"\n", |
|
"\\section{Prompt Templates}\n", |
|
"\\label{sec:prompt_templates}\n", |
|
"\n", |
|
"\\subsection{HellaSwag}\\par\n", |
|
"\\textbf{Input:}\\par\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"The topic is Grooming dog. Two women attempt to wash two dogs. they get \n", |
|
"in the tub with the dogs and do shampoo, soap, and then rinse the dogs.\n", |
|
"......\n", |
|
"The topic is Bathing dog. A couple is outside with a bucket and a dog. \n", |
|
"The dog is running around trying to avoid a bath. they\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\textbf{Output:}\n", |
|
"\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"get the dog wet, then it runs away again.\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\subsection{MRPC}\n", |
|
"\\textbf{Input:}\n", |
|
"\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"Are the following two sentences 'equivalent' or 'not equivalent'?\n", |
|
"This was around the time Congress was debating a resolution granting the \n", |
|
"President broad authority to wage war ..\n", |
|
"Within four days , the House and Senate overwhelmingly endorsed a \n", |
|
"resolution granting the president authority to go to war ..\n", |
|
"answer:not equivalent\n", |
|
"......\n", |
|
"Are the following two sentences 'equivalent' or 'not equivalent'?\n", |
|
"Kerry last month outlined a U.N. resolution authorizing a military force \n", |
|
"under U.S. command and transferring responsibility to the United Nations \n", |
|
"for the political and humanitarian efforts ..\n", |
|
"Kerry outlined last month a UN resolution authorizing a military force \n", |
|
"under US command and transferring responsibility for political and \n", |
|
"humanitarian efforts to the UN ..\n", |
|
"answer:\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\textbf{Output:}\n", |
|
"\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"equivalent\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\subsection{SST5}\n", |
|
"\n", |
|
"\\textbf{Input:}\n", |
|
"\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"How do you feel about the following sentence?\n", |
|
"the movie 's blatant derivativeness is one reason it 's so lackluster .\n", |
|
"answer:negative\n", |
|
"......\n", |
|
"How do you feel about the following sentence?\n", |
|
"the movie 's something-borrowed construction feels less the product of \n", |
|
"loving , well integrated homage and more like a mere excuse for the wan , \n", |
|
"thinly sketched story .\n", |
|
"answer:\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\textbf{Output:}\n", |
|
"\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"negative\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\clearpage\n", |
|
"\n", |
|
"\\subsection{MultiWoz}\n", |
|
"\\textbf{Input:}\n", |
|
"\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"CREATE TABLE hotel(\n", |
|
" name text,\n", |
|
" ......,\n", |
|
" internet text CHECK (internet IN (dontcare, yes, no))\n", |
|
")\n", |
|
"/*\n", |
|
"4 example rows:\n", |
|
"SELECT * FROM hotel LIMIT 4;\n", |
|
"name pricerange type parking book_number_of_days book_day book_people \n", |
|
"area stars internet\n", |
|
"a and b guest house moderate guest house dontcare 3 friday 5 east 4 yes\n", |
|
"......\n", |
|
"/*\n", |
|
"......\n", |
|
"-- Using valid SQLite, answer the following multi-turn conversational \n", |
|
"questions for the tables provided above.\n", |
|
"Example #1\n", |
|
"[context] hotel-area: west, hotel-stars: 3, hotel-internet: yes\n", |
|
"[system] the hobsons house is available in that area .\n", |
|
"Q: [user] that sounds like it will work . can i book that for 3 nights\n", |
|
"starting wednesday ?\n", |
|
"SQL: SELECT * FROM hotel WHERE book_day = wednesday AND book_people = 1 \n", |
|
"AND book_number_of_days = 3 AND name = hobsons house;\n", |
|
"......\n", |
|
"Example #22\n", |
|
"[context] hotel-parking: yes, hotel-pricerange: moderate, hotel-type: \n", |
|
"guest house, hotel-stars: 4\n", |
|
"[system] there are 9 in the area . i recommend the warkworth house .\n", |
|
"Q: [user] can you book that 1 for 4 nights starting on wednesday ?\n", |
|
"SQL: SELECT * FROM\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\textbf{Output:}\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"hotel WHERE book_day = wednesday AND book_number_of_days = 4 AND name = \n", |
|
"warkworth house;\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\subsection{GeoQuery}\n", |
|
"\\textbf{Input:}\n", |
|
"\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"CREATE TABLE \"border_info\" (\"state_name\" text, \"border\" text)\n", |
|
"/*\n", |
|
"state_name border\n", |
|
" alabama tennessee\n", |
|
" alabama georgia\n", |
|
" alabama florida\n", |
|
"*/\n", |
|
"......\n", |
|
"-- Using valid SQLite, answer the following questions for the tables \n", |
|
"provided above.\n", |
|
"-- which state has the longest river\n", |
|
"SELECT RIVERalias0.TRAVERSE FROM RIVER AS RIVERalias0 WHERE RIVERalias0.\n", |
|
"LENGTH = ( SELECT MAX( RIVERalias1.LENGTH ) FROM RIVER AS RIVERalias1 ) ;\n", |
|
"......\n", |
|
"-- what is the longest river in the state with the highest point\n", |
|
"SELECT\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\clearpage\n", |
|
"\n", |
|
"\\textbf{Output:}\n", |
|
"\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"RIVERalias0.RIVER_NAME FROM HIGHLOW AS HIGHLOWalias0 , RIVER AS \n", |
|
"RIVERalias0 WHERE HIGHLOWalias0.HIGHEST_ELEVATION = ( SELECT MAX(\n", |
|
"HIGHLOWalias1.HIGHEST_ELEVATION ) FROM HIGHLOW AS HIGHLOWalias1 ) AND\n", |
|
"RIVERalias0.TRAVERSE = HIGHLOWalias0.STATE_NAME ORDER BY RIVERalias0.\n", |
|
"LENGTH DESC LIMIT 1 ;\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\subsection{DBpedia}\n", |
|
"\\textbf{Input:}\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"title: Cupressus funebris; content: Cupressus funebris (Chinese Weeping\n", |
|
"Cypress) is a species of cypress native to southwestern and central \n", |
|
"China. It may also occur naturally in Vietnam.\n", |
|
"plant\n", |
|
"......\n", |
|
"title: Keeled box turtle; content: The keeled box turtle (Cuora mouhotii \n", |
|
"syn. Pyxidea mouhotii) is a species of turtle in the family Geoemydidae. \n", |
|
"It is native to Asia where it occurs in China India Laos Burma Vietnam\n", |
|
"Thailand and Bhutan. Other common names include keel-backed terrapin and\n", |
|
"jagged-shelled turtle.\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\textbf{Output:}\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"animal\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\subsection{MNLI}\n", |
|
"\\textbf{Input:}\n", |
|
"\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"Ideally, the design fixes for the failures should be corrected prior to\n", |
|
"manufacturing production units.. Based on that information, is the claim \n", |
|
"The fixes should be addressed before they reach the assembly line if this \n", |
|
"was a smart plan. \"True\", \"False\", or \"Inconclusive\"?\n", |
|
"answer:Inconclusive\n", |
|
"......\n", |
|
"The F/A-18-E/F program eliminated over 40 percent of the parts used to \n", |
|
"build predecessor aircraft to make the design more robust for \n", |
|
"manufacturing and identified critical manufacturing processes, bringing \n", |
|
"them under control before the start of production.. Based on that \n", |
|
"information, is the claim The new design with robustness also increased \n", |
|
"the safety of machines. \"True\", \"False\", or \"Inconclusive\"?\n", |
|
"answer:\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\textbf{Output:}\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"Inconclusive\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\subsection{RTE}\n", |
|
"\\textbf{Input:}\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"After giving nearly 5,000 people a second chance at life, doctors are\n", |
|
"celebrating the 25th anniversary of Britian's first heart transplant \n", |
|
"which was performed at Cambridgeshire's Papworth Hospital in 1979..\\par\n", |
|
"question: The first heart transplant in Britian was performed in 1979.. \n", |
|
"True or False?\n", |
|
"answer:True\n", |
|
"......\n", |
|
"Judie Vivian, chief executive at ProMedica, a medical service company \n", |
|
"that helps sustain the 2-year-old Vietnam Heart Institute in Ho Chi Minh \n", |
|
"City (formerly Saigon), said that so far about 1,500 children have \n", |
|
"received treatment..\n", |
|
"question: The previous name of Ho Chi Minh City was Saigon.. True or \n", |
|
"False?\n", |
|
"answer:\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\textbf{Output:}\n", |
|
"\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"True\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\subsection{Natural Question}\n", |
|
"\n", |
|
"\\textbf{Input:}\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"Write an answer: who invented the radio during the industrial revolution\n", |
|
"other\n", |
|
"Guglielmo Marconi, 1st Marquis of Marconi\n", |
|
"......\n", |
|
"Write an answer: when was music first played on the radio\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\textbf{Output:}\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"other\n", |
|
"1917\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\subsection{XSUM}\n", |
|
"\\textbf{Input:}\n", |
|
"\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"Write a short summary\n", |
|
"Health Minister Mark Drakeford said the money would be used to improve \n", |
|
"areas of concern, including out-of-hours help and access to psychological \n", |
|
"treatment.\n", |
|
"......\n", |
|
"money won't get the help they need in a timely fashion,\" she said.\n", |
|
"TL;DR: An extra [Unicode token]7.6m a year will be invested to improve \n", |
|
"mental health services for children and young people in Wales.\n", |
|
"......\n", |
|
"write a short summary:\n", |
|
"Bliss said there was a shortage of neonatal nurses and doctors, and \n", |
|
"safety standards were not being met.\n", |
|
"......\n", |
|
"Dr Jenny Calvert, of the Wales Neonatal Network, said they are working to\n", |
|
"further develop medical training in neonatology to help recruit more \n", |
|
"trainee doctors.\n", |
|
"TL;DR:\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\vspace{-20pt}\n", |
|
"\n", |
|
"\\textbf{Output:}\n", |
|
"\n", |
|
"\\begin{lstlisting}[language={}]\n", |
|
"Neonatal services across Wales are overstretched and under pressure with \n", |
|
"the safety of vulnerable babies at risk, according to a charity.\n", |
|
"\\end{lstlisting}\n", |
|
"\n", |
|
"\\clearpage\n", |
|
"\n", |
|
"\n", |
|
"\\section{Detailed Main Results}\n", |
|
"\\label{app:main-results}\n", |
|
"\n", |
|
"This section provides a detailed version of our main results in Table~\\ref{tab:main_results}, where the maximum performance and minimum performances among the three trials are reported. Results are shown in Table~\\ref{tab:app-main-results-1} and Table~\\ref{tab:app-main-results-2}.\n", |
|
"\n", |
|
"\\begin{table}[h!]\n", |
|
"\\addtolength{\\tabcolsep}{-4.7pt}\n", |
|
"\\centering\n", |
|
"\\begin{tabular}{@{} cc m{0.001em} ccccc m{-0.05mm} @{}}\n", |
|
"\\toprule[.1em]\n", |
|
"\n", |
|
" \\multicolumn{2}{c}{\\textbf{Method}}\n", |
|
"&& \\multicolumn{5}{c}{\\textbf{Classification}}\n", |
|
"\\\\\n", |
|
"\\cmidrule(lr){1-2}\n", |
|
"\\cmidrule(l){4-8}\n", |
|
"\n", |
|
"$|\\mathcal{L}|$\n", |
|
"&Selection\n", |
|
"\n", |
|
"&& MRPC & SST-5 & MNLI & DBpedia & RTE\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\midrule[.1em]\n", |
|
"\n", |
|
"100\n", |
|
"&Random\n", |
|
"\n", |
|
"&& 63.5/66.0/60.5\n", |
|
"& 44.2/47.3/41.8\n", |
|
"& 37.4/41.0/33.2\n", |
|
"& 89.8/91.0/88.3\n", |
|
"& 51.5/53.9/48.4\n", |
|
"\\\\\n", |
|
"\n", |
|
"100\n", |
|
"&\\Votek\n", |
|
"\n", |
|
"&& \\textbf{70.7}/72.3/69.1\n", |
|
"& \\textbf{53.0}/54.7/51.2\n", |
|
"& \\textbf{47.3}/50.0/44.5\n", |
|
"& \\textbf{93.4}/94.1/92.6\n", |
|
"& \\textbf{55.5}/57.0/53.9\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\midrule[.05em]\n", |
|
"\n", |
|
"18\n", |
|
"&Random\n", |
|
"\n", |
|
"&& 59.6/64.8/52.7\n", |
|
"& 39.8/46.1/37.1\n", |
|
"& 36.7/40.6/30.9\n", |
|
"& 77.6/82.0/71.9\n", |
|
"& 50.4/53.5/45.7\n", |
|
"\\\\\n", |
|
"\n", |
|
"18\n", |
|
"&\\Votek\n", |
|
"\n", |
|
"&& \\textbf{64.2}/67.6/59.0\n", |
|
"& \\textbf{47.6}/50.0/44.5\n", |
|
"& \\textbf{41.0}/44.5/37.1\n", |
|
"& \\textbf{87.1}/90.6/85.2\n", |
|
"& \\textbf{54.3}/56.2/51.6\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\bottomrule[.1em]\n", |
|
"\\end{tabular}\n", |
|
"\\caption{Main result Table~\\ref{tab:main_results} with the mean/max/min results reported across three trials}\n", |
|
"\\label{tab:app-main-results-1}\n", |
|
"\\end{table}\n", |
|
"\n", |
|
"\n", |
|
"\\begin{table}[h!]\n", |
|
"\\addtolength{\\tabcolsep}{-4.7pt}\n", |
|
"\\centering\n", |
|
"\\begin{tabular}{@{} cc m{0.001em} c m{-0.05em} c m{0.001em} ccc @{}}\n", |
|
"\\toprule[.1em]\n", |
|
"\n", |
|
" \\multicolumn{2}{c}{\\textbf{Method}}\n", |
|
"&& \\multicolumn{1}{c}{\\textbf{Multi-Choice}}\n", |
|
"&& \\multicolumn{1}{c}{\\textbf{Dialogue}}\n", |
|
"&& \\multicolumn{3}{c}{\\textbf{Generation}}\n", |
|
"\\\\\n", |
|
"\\cmidrule(lr){1-2}\n", |
|
"\\cmidrule(lr){4-5}\n", |
|
"\\cmidrule(lr){6-7}\n", |
|
"\\cmidrule(l){8-10}\n", |
|
"\n", |
|
"$|\\mathcal{L}|$\n", |
|
"&Selection\n", |
|
"\n", |
|
"&& HSwag\n", |
|
"&& MWoZ \n", |
|
"&& GeoQ & NQ & XSum\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\midrule[.1em]\n", |
|
"\n", |
|
"100\n", |
|
"&Random\n", |
|
"\n", |
|
"&& 65.2/66.4/63.3\n", |
|
"&& 47.2/49.2/44.5\n", |
|
"&& 78.6/80.5/77.3\n", |
|
"& 30.8/32.8/28.1\n", |
|
"& 15.3/16.4/14.8\n", |
|
"\\\\\n", |
|
"\n", |
|
"100\n", |
|
"&\\Votek\n", |
|
"\n", |
|
"&& \\textbf{70.7}/71.5/69.5\n", |
|
"&& \\textbf{51.4}/53.1/49.6\n", |
|
"&& \\textbf{82.8}/83.6/82.0\n", |
|
"& \\textbf{33.6}/35.2/31.6\n", |
|
"& \\textbf{17.2}/17.6/16.4\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\midrule[.05em]\n", |
|
"\n", |
|
"18\n", |
|
"&Random\n", |
|
"\n", |
|
"&& 62.5/66.4/57.4\n", |
|
"&& 33.6/39.5/25.0\n", |
|
"&& 62.4/65.2/57.8\n", |
|
"& 29.8/31.6/26.6\n", |
|
"& 13.6/14.5/12.5\n", |
|
"\\\\\n", |
|
"\n", |
|
"18\n", |
|
"&\\Votek\n", |
|
"\n", |
|
"&& \\textbf{67.4}/71.1/64.8\n", |
|
"&& \\textbf{42.8}/47.7/40.2\n", |
|
"&& \\textbf{72.5}/74.2/69.5\n", |
|
"& \\textbf{32.3}/33.6/30.1\n", |
|
"& \\textbf{15.2}/16.0/14.5\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\bottomrule[.1em]\n", |
|
"\\end{tabular}\n", |
|
"\\caption{Main result Table~\\ref{tab:main_results} with the mean/max/min results reported across three trials.}\n", |
|
"\\label{tab:app-main-results-2}\n", |
|
"\\end{table}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\section{Evaluate HellaSwag on OPT-175B model}\n", |
|
"Here we show that \\votek also improves model performance for OPT-175B\n", |
|
"\\begin{figure}[h!]\n", |
|
"\\centering\n", |
|
" \\includegraphics[width=0.45\\textwidth]{images/Hellaswag_opt.pdf}\n", |
|
"\\caption{OPT-175B performance of ICL-random and ICL-\\votek on HellaSwag \n", |
|
"}\n", |
|
"\\label{fig:hellaswag_opt}\n", |
|
"\\end{figure}\n", |
|
"\n", |
|
"\\section{Removing outliers for finetuning}\n", |
|
"\n", |
|
"Here we show that explicitly removing outliers also helps finetuning to benefit from vote-$k$. \n", |
|
"\n", |
|
"\\label{app:remove_outlier_FT}\n", |
|
"\\begin{table}[h!]\n", |
|
"\\addtolength{\\tabcolsep}{-4.0pt}\n", |
|
"\\centering\n", |
|
"\\begin{tabular}{@{}ccccc@{}}\n", |
|
"\\toprule[.1em]\n", |
|
"& \\multicolumn{2}{c}{Outliers not removed } & \\multicolumn{2}{c}{10\\\n", |
|
"\\cmidrule(r){2-3}\\cmidrule(l){4-5}\n", |
|
"& FT-random\n", |
|
"& FT-\\votek\n", |
|
"& FT-random\n", |
|
"& FT-\\votek\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\midrule[.1em]\n", |
|
"\n", |
|
"HellaSwag\n", |
|
"& 55.6\n", |
|
"& 53.5\n", |
|
"& 56.8\n", |
|
"& 59.6\n", |
|
"\\\\\n", |
|
"\n", |
|
"MRPC\n", |
|
"& 56.3\n", |
|
"& 55.6\n", |
|
"& 57.9\n", |
|
"& 60.4\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\bottomrule[.1em]\n", |
|
"\\end{tabular}\n", |
|
"\\caption{Effects of \\votek in finetuning(FT) with annotation budget of 100. \\textit{10\\\n", |
|
"}\n", |
|
"\\label{tab:outlier_FT}\n", |
|
"\\end{table}\n", |
|
"\n", |
|
"\\section{Diversity and Representativeness of Selected Samples}\n", |
|
"\\label{sec:div_rep_analysis}\n", |
|
"We hypothesized that both representativeness and diversity are crucial for \\firststep (\\S\\ref{sec:sample_selection_method}).\n", |
|
"Here we evaluate the diversity and representativeness of samples that are selected by different methods, using the methods from prior work on active learning \\citep{margatina2021active}; their measures of diversity and representativeness use token overlap or embedding cosine similarities. \n", |
|
"As shown in Table \\ref{tab:diverse_represent}, \\votek improves both the diversity and the representativeness as compared to random selection. \n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\begin{table}[h!]\n", |
|
"\\addtolength{\\tabcolsep}{-4.0pt}\n", |
|
"\\centering\n", |
|
"\\small\n", |
|
"\\begin{tabular}{@{} c m{0.001em} ccc m{0.001em} ccc m{0.001em} ccc }\n", |
|
"\\toprule[.1em]\n", |
|
"\n", |
|
" \\textbf{Method}\n", |
|
"&& \n", |
|
" \\multicolumn{3}{c}{\\textbf{DIV-I}}\n", |
|
"&& \n", |
|
" \\multicolumn{3}{c}{\\textbf{DIV-F}}\n", |
|
"&& \n", |
|
" \\multicolumn{3}{c}{\\textbf{REPR.}}\n", |
|
"\\\\\n", |
|
"\\cmidrule(lr){1-1}\n", |
|
"\\cmidrule(lr){3-5}\n", |
|
"\\cmidrule(lr){7-9}\n", |
|
"\\cmidrule(lr){11-13}\n", |
|
"\n", |
|
"Selection\n", |
|
"&& HellaSwag\n", |
|
"& SST-5 \n", |
|
"& MWoZ\n", |
|
"&& HellaSwag\n", |
|
"& SST-5\n", |
|
"& MWoZ\n", |
|
"&& HellaSwag\n", |
|
"& SST-5\n", |
|
"& MWoZ\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\midrule[.1em]\n", |
|
"\n", |
|
"Random\n", |
|
"&& 0.182\\tiny{0.007}\n", |
|
"& 0.099\\tiny{0.003}\n", |
|
"& 0.368\\tiny{0.008}\n", |
|
"&& 0.415\\tiny{0.008}\n", |
|
"& 0.317\\tiny{0.004}\n", |
|
"& 0.675\\tiny{0.006}\n", |
|
"&& 0.558\\tiny{0.007}\n", |
|
"& 0.424\\tiny{0.003}\n", |
|
"& 0.696\\tiny{0.004}\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\Votek\n", |
|
"&& 0.191\n", |
|
"& 0.108\n", |
|
"& 0.379\n", |
|
"&& 0.425\n", |
|
"& 0.321\n", |
|
"& 0.683\n", |
|
"&& 0.565\n", |
|
"& 0.426\n", |
|
"& 0.702\n", |
|
"\\\\\n", |
|
"\n", |
|
"\\bottomrule[.1em]\n", |
|
"\\end{tabular}\n", |
|
"\\caption{DIV-I refers to diversity in input space, which measures the diversity of selected data in the input feature space, i.e., raw text; DIV-F refers to diversity in feature space, which measures the diversity in the dense feature space, i.e., sentence embeddings; REPR. refers to representativeness, which measures the representativeness of selected data. Subscripts stand for standard deviation. \n", |
|
"}\n", |
|
"\\label{tab:diverse_represent}\n", |
|
"\\end{table}\n", |
|
"\n", |
|
"\n", |
|
"\\section{Details of \\FirstStep Methods}\n", |
|
"\\label{sec:details-sample-selection}\n", |
|
"In this section, we provide details of \\firststep methods used in Section~\\ref{subsec:sample-selection-methods}. \n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\subsection{\\Votek \\FirstStep}\n", |
|
"Algorithm~\\ref{alg:vote-k} describes the \\votek \\firststep method introduced in Section~\\ref{sec:sample_selection}. \n", |
|
"\n", |
|
"\\subsection{Greedy Algorithm for Maximizing Facility Location}\n", |
|
"\\label{sec:Submodularity-Based Sample Selection}\n", |
|
"\\citet{Lin2009HowTS} proposed to maximize the facility location objective to optimize representativeness of the selected samples. Since this objective satisfies the submodular property, they applied a greedy algorithm as an approximation. \n", |
|
"Algorithm~\\ref{alg:submodularity} describes the \\firststep method adapted from this greedy algorithm. \n", |
|
"\n", |
|
"\n", |
|
"\\subsection{Embedding Diversity}\n", |
|
"\\label{sec:Embedding Diversity}\n", |
|
"\n", |
|
"This method aims to find diverse samples to annotate using embedding vectors.\n", |
|
"We first compute a vector representation for each \\emph{unlabeled} training instance by Sentence-BERT \\citep{reimers-gurevych-2019-sentence}, which is a variant of BERT \\citep{devlins2019bert}, finetuned to detect paraphrases.\\footnote{\\url{https://huggingface.co/sentence-transformers/all-mpnet-base-v2}.}\n", |
|
"For instance, consider an example from SST-5 sentiment analysis in Table \\ref{tab:datasets_all}:\\textit{A very well-made, funny and entertaining picture}.\n", |
|
"We simply run Sentence-BERT on this text input and average the resulting vectors over the words to obtain a vector representation.\n", |
|
"\n", |
|
"Once embeddings are computed for all training data, we use them to find a diverse set of training instances. \n", |
|
"The intuition here is that a diverse set of annotated examples facilitates the subsequent prompt retrieval step since similar in-context examples can be found for many test instances.\n", |
|
"To find a set of diverse embeddings, we take a simple, iterative approach: in every iteration, we choose an instance furthest from the already chosen ones.\n", |
|
"Specifically, let $\\mathcal{L}$ and $\\mathcal{U}$ denote the sets of already chosen (i.e., labeled) samples and unlabeled samples, respectively.\n", |
|
"Suppose also that $M$ is the target number of labeled examples (i.e., the annotation budget).\n", |
|
"Then, in every iteration, we choose the unlabeled sample that has the largest total cosine distance from $\\mathcal{L}$: $\\argmin_{u \\in \\mathcal{U}}\\sum_{\\ell \\in \\mathcal{L}} cos(u, \\ell)$.\n", |
|
"Here we abuse $u$ and $\\ell$ to mean both the instances and their embedding vectors from Sentence-BERT.\n", |
|
"The first labeled sample is randomly selected from the 3K unlabeled examples (\\S\\ref{sec:datasets_tasks}), and the iterative process continues until $|\\mathcal{L}|\\!=\\!M$.\n", |
|
"\n", |
|
"\\begin{algorithm}[t]\n", |
|
"\\small\n", |
|
"\\caption{Voke-k \\FirstStep}\n", |
|
"\\label{alg:vote-k}\n", |
|
"\\begin{algorithmic}[1]\n", |
|
"\\State \\textbf{Input:} $\\mathcal{X} = \\{x_i\\}_{i=1}^{N}$: a set of unlabeled samples; $M$: the number of samples to be selected; LM: inference language model. \n", |
|
"\\State \\textbf{Initialization:} $\\mathcal{L} = \\varnothing$, $\\mathcal{U} = \\mathcal{X}$. $G=(V, E)$, where $V = \\mathcal{X}$ and $(u, v) \\in E$ if $v$ is one of $u$'s $k$ nearest vertices in terms of the cosine similarity between the embeddings. \n", |
|
"\\While{$|\\mathcal{L}| < M / 10$}\n", |
|
" \\State $u^{*}=\\arg \\max_{u \\in \\mathcal{U}} \\sum_{v \\in \\{v | (v, u) \\in E, v \\in \\mathcal{U}\\}} s (v), \\quad \\text{where} \\ s(v) = \\rho ^{- |\\{\\ell \\in \\mathcal{L}| (v, \\ell) \\in E \\}|}, \\quad \\rho > 1$\n", |
|
" \\State $\\mathcal{L} = \\mathcal{L} \\cup\\left\\{u^{*}\\right\\}$\n", |
|
" \\State $\\mathcal{U} = \\mathcal{U} \\setminus\\left\\{u^{*}\\right\\}$\n", |
|
"\\EndWhile \\label{line:first}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\For{$u$ in $\\mathcal{U}$} \\label{line:lm-start}\n", |
|
" \\State $\\mathrm{score}(u)$ = $\\frac{1}{\\textbf{q}}\\sum_{t} \\textrm{log} p(q_t|\\textbf{q}_{<t},\\textbf{z};\\Theta) $, where $p$ is LM prediction function and $\\Theta$ is LM parameters\n", |
|
"\\EndFor \\label{line:lm-ends}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\For{$j = 1, \\ldots, 9$} \\label{line:it-start}\n", |
|
" \n", |
|
" \\State $\\mathcal{U}_j = \\mathrm{indices}[(j-1)|\\mathcal{U}|/10:j|\\mathcal{U}|/10]$ \n", |
|
" \\For{$i = 1, \\ldots, |\\mathcal{U}_j|$}\n", |
|
" \\State $u^{*}=\\arg \\max_{u \\in \\mathcal{U}_j} \\sum_{v \\in \\{v | (v, u) \\in E, v \\in \\mathcal{U}_j\\}} s (v), \\quad \\text{where} \\ s(v) = \\rho ^{- |\\{\\ell \\in \\mathcal{L}| (v, \\ell) \\in E \\}|}, \\quad \\rho > 1$ \n", |
|
" \n", |
|
" \\State $\\mathcal{L} = \\mathcal{L} \\cup\\left\\{u^{*}\\right\\}$\n", |
|
" \\State $\\mathcal{U}_j = \\mathcal{U}_j \\setminus\\left\\{u^{*}\\right\\}$\n", |
|
" \\EndFor\n", |
|
"\\EndFor\\label{line:it-ends}\n", |
|
"\\State \\textbf{Return:} $\\mathcal{L}$: selected samples.\n", |
|
"\\end{algorithmic}\n", |
|
"\\end{algorithm}\n", |
|
"\n", |
|
"\\begin{algorithm}[t]\n", |
|
"\\small\n", |
|
"\\caption{Greedy Algorithm for Facility Location Objective}\n", |
|
"\\label{alg:submodularity}\n", |
|
"\\begin{algorithmic}[1]\n", |
|
"\\State \\textbf{Input:} $\\mathcal{U} = \\{x_i\\}_{i=1}^{N}$: a set of unlabeled samples; $M$: the number of samples to be selected. \n", |
|
"\\State \\textbf{Initialization:} $\\mathcal{L} = \\varnothing$, $\\mathcal{U} = V$. $\\forall i, \\rho_i = -1$: maximum similarity of $x_i$ to selected samples. \n", |
|
"\\While{$|\\mathcal{L}| < M$}\n", |
|
" \\State $u^{*}=\\arg \\max_{u \\in \\mathcal{U}} \\sum_{i = 1}^{N}\\left(\\max \\left\\{0, \\cos(x_i, x_u) - \\rho_{i}\\right\\}\\right)$\n", |
|
" \\State $\\mathcal{L} = \\mathcal{L} \\cup\\left\\{u^{*}\\right\\}$\n", |
|
" \\State $\\mathcal{U} = \\mathcal{U} \\setminus\\left\\{u^{*}\\right\\}$\n", |
|
" \\State $\\forall i, \\rho_{i}=\\max \\left\\{\\rho_{i}, \\cos(x_i, x_{u^*})\\right\\}$ \\quad // update maximum similarity of each $x_i$ to selected samples\n", |
|
"\\EndWhile\n", |
|
"\\State \\textbf{Return:} $\\mathcal{L}$: selected samples.\n", |
|
"\\end{algorithmic}\n", |
|
"\\end{algorithm}\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\\end{appendices}\n", |
|
"\n", |
|
"\n", |
|
"\\end{document}\n" |
|
], |
|
"del_percentage": 0.05483 |
|
} |
|
} |