Maxwell Lyu commited on
Commit
1e3be86
·
1 Parent(s): aa2789b

add dummy citation

Browse files
Files changed (2) hide show
  1. app.py +5 -0
  2. content.py +24 -1
app.py CHANGED
@@ -210,6 +210,11 @@ with gr.Blocks(theme=gr.themes.Base()) as demo:
210
  ],
211
  outputs=[dataframe],
212
  )
 
 
 
 
 
213
 
214
  if __name__ == "__main__":
215
  demo.launch()
 
210
  ],
211
  outputs=[dataframe],
212
  )
213
+ gr.Code(
214
+ language="markdown",
215
+ label="Citation",
216
+ value=CITATION,
217
+ )
218
 
219
  if __name__ == "__main__":
220
  demo.launch()
content.py CHANGED
@@ -17,4 +17,27 @@ MARKDOWN_REDUCE_FUNCTION = (
17
  "Median: `lambda x: np.median(x)`\n"
18
  "Max: `lambda x: np.max(x)`\n"
19
  "Min: `lambda x: np.min(x)`\n"
20
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  "Median: `lambda x: np.median(x)`\n"
18
  "Max: `lambda x: np.max(x)`\n"
19
  "Min: `lambda x: np.min(x)`\n"
20
+ )
21
+
22
+ CITATION = """\
23
+ @inproceedings{lu-etal-2024-llamax,
24
+ title = "{LL}a{MAX}: Scaling Linguistic Horizons of {LLM} by Enhancing Translation Capabilities Beyond 100 Languages",
25
+ author = "Lu, Yinquan and
26
+ Zhu, Wenhao and
27
+ Li, Lei and
28
+ Qiao, Yu and
29
+ Yuan, Fei",
30
+ editor = "Al-Onaizan, Yaser and
31
+ Bansal, Mohit and
32
+ Chen, Yun-Nung",
33
+ booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
34
+ month = nov,
35
+ year = "2024",
36
+ address = "Miami, Florida, USA",
37
+ publisher = "Association for Computational Linguistics",
38
+ url = "https://aclanthology.org/2024.findings-emnlp.631",
39
+ doi = "10.18653/v1/2024.findings-emnlp.631",
40
+ pages = "10748--10772",
41
+ abstract = "Large Language Models (LLMs) demonstrate remarkable translation capabilities in high-resource language tasks, yet their performance in low-resource languages is hindered by insufficient multilingual data during pre-training. To address this, we conduct extensive multilingual continual pre-training on the LLaMA series models, enabling translation support across more than 100 languages. Through a comprehensive analysis of training strategies, such as vocabulary expansion and data augmentation, we develop LLaMAX. Remarkably, without sacrificing its generalization ability, LLaMAX achieves significantly higher translation performance compared to existing open-source LLMs (by more than 10 spBLEU points) and performs on-par with specialized translation model (M2M-100-12B) on the Flores-101 benchmark. Extensive experiments indicate that LLaMAX can serve as a robust multilingual foundation model. The code and the models are publicly available.",
42
+ }\
43
+ """