Datasets:
botp
/

Modalities:
Text
Formats:
parquet
Languages:
Chinese
ArXiv:
Libraries:
Datasets
Dask
License:
orz99 yentinglin commited on
Commit
aaff348
0 Parent(s):

Duplicate from yentinglin/zh_TW_c4

Browse files

Co-authored-by: Yen-Ting Lin <[email protected]>

Files changed (34) hide show
  1. .gitattributes +55 -0
  2. README.md +193 -0
  3. data/train-00000-of-00031-3f39a102ab83d07c.parquet +3 -0
  4. data/train-00001-of-00031-11e67f9621ccfa66.parquet +3 -0
  5. data/train-00002-of-00031-1f3b5f20c0c525af.parquet +3 -0
  6. data/train-00003-of-00031-ab11b74823d4c40b.parquet +3 -0
  7. data/train-00004-of-00031-05a6f6da6e2a1cc6.parquet +3 -0
  8. data/train-00005-of-00031-cee6464d907be215.parquet +3 -0
  9. data/train-00006-of-00031-f045ece455f409cb.parquet +3 -0
  10. data/train-00007-of-00031-ea485aa4a1c5dcfd.parquet +3 -0
  11. data/train-00008-of-00031-2ca3a7cee5f537d0.parquet +3 -0
  12. data/train-00009-of-00031-57a79b260ac5792f.parquet +3 -0
  13. data/train-00010-of-00031-72af92932dcd2b1d.parquet +3 -0
  14. data/train-00011-of-00031-b2c958186bc55287.parquet +3 -0
  15. data/train-00012-of-00031-41d2620a28387b46.parquet +3 -0
  16. data/train-00013-of-00031-8dff3f220ded66c5.parquet +3 -0
  17. data/train-00014-of-00031-fb36e5c9663861d9.parquet +3 -0
  18. data/train-00015-of-00031-c814d9d7fdc37c63.parquet +3 -0
  19. data/train-00016-of-00031-5e92432cf6edae27.parquet +3 -0
  20. data/train-00017-of-00031-d55ea984cef78067.parquet +3 -0
  21. data/train-00018-of-00031-6adb3dea1d3bfaf9.parquet +3 -0
  22. data/train-00019-of-00031-f58c3c1962d93e05.parquet +3 -0
  23. data/train-00020-of-00031-4f91a0fac98e5564.parquet +3 -0
  24. data/train-00021-of-00031-5f1f7b7a2aa71a83.parquet +3 -0
  25. data/train-00022-of-00031-0dcf59276dec6da4.parquet +3 -0
  26. data/train-00023-of-00031-418d000640b51a6b.parquet +3 -0
  27. data/train-00024-of-00031-b75ee6bc51581ebc.parquet +3 -0
  28. data/train-00025-of-00031-0c56fb12bb38f860.parquet +3 -0
  29. data/train-00026-of-00031-700cb1cc8673fa7c.parquet +3 -0
  30. data/train-00027-of-00031-86df88742d865995.parquet +3 -0
  31. data/train-00028-of-00031-3018fba2d583eff6.parquet +3 -0
  32. data/train-00029-of-00031-e81543082347b540.parquet +3 -0
  33. data/train-00030-of-00031-8e8f8b6eaf81342e.parquet +3 -0
  34. data/validation-00000-of-00001-303d3d5638c1fede.parquet +3 -0
.gitattributes ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ dataset_info:
3
+ features:
4
+ - name: text
5
+ dtype: string
6
+ - name: timestamp
7
+ dtype: string
8
+ - name: url
9
+ dtype: string
10
+ - name: zh_id
11
+ dtype: int64
12
+ splits:
13
+ - name: train
14
+ num_bytes: 15132113806.438198
15
+ num_examples: 5179546
16
+ - name: validation
17
+ num_bytes: 14820238.368907347
18
+ num_examples: 5099
19
+ download_size: 5569491261
20
+ dataset_size: 15146934044.807106
21
+ license: odc-by
22
+ task_categories:
23
+ - text-generation
24
+ language:
25
+ - zh
26
+ pretty_name: Traditional Chinese C4
27
+ size_categories:
28
+ - 1M<n<10M
29
+ duplicated_from: yentinglin/zh_TW_c4
30
+ ---
31
+ # Language Models for Taiwanese Culture
32
+
33
+
34
+ <p align="center">
35
+ ✍️ <a href="https://huggingface.co/spaces/yentinglin/Taiwan-LLaMa2" target="_blank">Online Demo</a>
36
+
37
+ 🤗 <a href="https://huggingface.co/yentinglin" target="_blank">HF Repo</a> • 🐦 <a href="https://twitter.com/yentinglin56" target="_blank">Twitter</a> • 📃 <a href="https://arxiv.org/pdf/2305.13711.pdf" target="_blank">[Paper Coming Soon]</a>
38
+ • 👨️ <a href="https://yentingl.com/" target="_blank">Yen-Ting Lin</a>
39
+ <br/><br/>
40
+ <img src="https://www.csie.ntu.edu.tw/~miulab/taiwan-llama/logo-v2.png" width="100"> <br/>
41
+ <a href="https://github.com/tatsu-lab/stanford_alpaca/blob/main/LICENSE">
42
+ <img src="https://img.shields.io/badge/Code%20License-Apache_2.0-green.svg"></a>
43
+ <a href="https://github.com/tatsu-lab/stanford_alpaca/blob/main/DATA_LICENSE">
44
+ <img src="https://img.shields.io/badge/Data%20License-CC%20By%20NC%204.0-red.svg"></a>
45
+ <br/>
46
+
47
+ </p>
48
+
49
+
50
+
51
+
52
+ ## Overview
53
+ Taiwan-LLaMa is a full parameter fine-tuned model based on LLaMa 2 for Traditional Mandarin applications.
54
+
55
+ **Taiwan-LLaMa v1.0** pretrained on over 5 billion tokens and instruction-tuned on over 490k conversations both in traditional mandarin.
56
+
57
+ ## Demo
58
+ A live demonstration of the model can be accessed at [Hugging Face Spaces](https://huggingface.co/spaces/yentinglin/Taiwan-LLaMa2).
59
+
60
+ ## Key Features
61
+
62
+ 1. **Traditional Mandarin Support**: The model is fine-tuned to understand and generate text in Traditional Mandarin, making it suitable for Taiwanese culture and related applications.
63
+
64
+ 2. **Instruction-Tuned**: Further fine-tuned on conversational data to offer context-aware and instruction-following responses.
65
+
66
+ 3. **Performance on Vicuna Benchmark**: Taiwan-LLaMa's relative performance on Vicuna Benchmark is measured against models like GPT-4 and ChatGPT. It's particularly optimized for Taiwanese culture.
67
+
68
+ 4. **Flexible Customization**: Advanced options for controlling the model's behavior like system prompt, temperature, top-p, and top-k are available in the demo.
69
+
70
+
71
+ ## Work in progress
72
+ - [ ] **Improved pretraining**: A refined pretraining process (e.g. more data from Taiwan, training strategies) is under development, aiming to enhance model performance for better Taiwanese culture.
73
+ - [ ] **Extend max length**: Utilizing the Rope mechanism as described in [the paper](https://arxiv.org/abs/2104.09864), the model's length will be extended from 4k to 8k.
74
+
75
+
76
+ ## Taiwanese Culture Examples
77
+
78
+ | Question | ChatGPT | **Taiwan LLaMa v1.0** | **Taiwan LLaMa v0.0 (w/o pretraining)** | Llama2_Traditional_Chinese_13b_Chat |
79
+ |:-|:-|:-|:-|:-|
80
+ | 以惡地景觀著名的是高雄哪一個景點? | 以惡地地景著名的是高雄市的旗津。 | 高雄以擁有惡地景觀著名的景點是田寮月世界。✅ | 以惡地景觀著名的是高雄市的旗津島。旗津島是一個小島,... | 高雄市內有多個以惡地景觀著名的景點,其中以下面幾個最為著名: 1. 高雄捷運紅線沿途景點:高雄市政府為了推廣高雄 |
81
+ | 唐鳳是誰? | 唐鳳(Audrey Tang)是台灣的政治家、社會活動家和軟體工程師。她於2016年擔任台灣行政院數位政務委員,是台灣政府歷史上首位沒有政黨背景的政務官。唐鳳在軟體工程領域有豐富的經驗。 ✅ | 唐鳳是政治評論家、資訊社會學研究者,也是台灣第一位擔任政府數位政委的人。 ✅ | 唐鳳(Tang Feng)是一位著名的中國演員和歌手。他出生於1974年12月17日,出生地點在中國北京。唐鳳在1995年開始他的演藝生涯,並在同年發行了他的首張專輯《我不孤單》。此後,他發行了許多受歡迎的歌曲,並在多部電視劇和電影中擔任主角。唐鳳因其精湛的演技和迷人的歌聲而被譽為“亞洲魅力男王”。他的音樂獲得了多個獎項,包括“華表最佳新男演唱獎”、“華表最佳歌曲獎”和“華表最佳舞蹈獎”等。 | 唐鳳是唐朝時期的一位著名文學家,他的代表作品包括《陰騭》、《狂凡子》等。他的文學風格具有強烈的個性和創意,被譽為「唐代文學之父」。 |
82
+ | NTU 在哪? | NTU位於新加坡,具體地址是50 Nanyang Avenue, Singapore 639798。 | NTU 是國立臺灣大學的縮寫。它是臺灣的一所頂尖研究型大學,位於臺北市。 ✅| NTU 是台灣的一���大學,位於台北市。✅ | NTU 的總部位於台灣新北市板橋區,佔地面積約為1,000公頃。|
83
+ | 甚麼是22K? | "22K" 可能指的是金的純度,尤其是在珠寶和金飾方面。 | "22K"是指台灣的大學畢業生的起薪,大約是新台幣22,000元。這個詞被用來形容大學畢業生的起薪偏低,並且在就業市場上對於大學畢業生的需求不足。✅| 22K是指溫度尺度上的22,000K(千度)。1K代表一度,即溫度差距,是攝氏溫標(攝氏度)的一百萬分之一。因此,22K等於-253.15°C(攝氏溫標)或-423.67°F(華氏溫標)。 | 22K是一種簡寫,即22,000。它是一個數字,表示22,000個單位。|
84
+ | 肉圓要不要加香菜? | 這是個個人口味的問題,有些人喜歡在肉圓中加入香菜,因為香菜的清香可以增添風味;而有些人則不喜歡香菜的味道,覺得它會蓋過其他食材的味道。所以要不要加香菜,可以根據個人喜好來決定。 | 這是一個個人喜好的問題,有些人喜歡在肉圓裡加香菜,有些人不喜歡。**在台灣**,肉圓加香菜是一種常見的做法,這是一種在肉圓上加香菜的做法。然而,在其他國這是一個文化差異的問題。😊 | 肉圓加香菜可以增添一些芳香和美味的口感,如果你喜歡香菜的味道,可以試試加入肉圓中。香菜通常在製作過程中炒香,可以增添一些芳香和美味的味道。如果你想要加入香菜,可以在製作過程中炒香一些香菜,然後將其混入肉圓中。肉圓加香菜可以增添一些芳香和美味的口感,如果你喜歡香菜的味道,可以試試加入肉圓中。| 加香菜的肉圓味道更好,但是不加香菜的肉圓也很好吃。如果你喜歡香菜的味道,那麼加上香菜就好了。如果你不喜歡香菜,那麼不加也不會影響肉 |
85
+
86
+
87
+ ## Model
88
+
89
+ We provide a number of model checkpoints that we trained. Please find them on Hugging Face [here](https://huggingface.co/models?search=taiwan-llama). Here are some quick links to the checkpoints that are finetuned from LLaMa 2:
90
+
91
+ | **Model** | **13B** |
92
+ |--------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|
93
+ | **Taiwan-LLaMa v1.0** (_better for Taiwanese Culture_) | 🤗 <a href="https://huggingface.co/yentinglin/Taiwan-LLaMa-v1.0" target="_blank">yentinglin/Taiwan-LLaMa-v1.0</a> |
94
+ | Taiwan-LLaMa v0.9 (partial instruction set) | 🤗 <a href="https://huggingface.co/yentinglin/Taiwan-LLaMa-v0.9" target="_blank">yentinglin/Taiwan-LLaMa-v0.9</a> |
95
+ | Taiwan-LLaMa v0.0 (no Traditional Mandarin pretraining) | 🤗 <a href="https://huggingface.co/yentinglin/Taiwan-LLaMa-v0.0" target="_blank">yentinglin/Taiwan-LLaMa-v0.0</a> |
96
+
97
+ ## Data
98
+
99
+ Here are some quick links to the datasets that we used to train the models:
100
+
101
+ | **Dataset** | **Link** |
102
+ |---------------------------------|-------------------------------------------------------------------------------------------------------------------------------|
103
+ | **Instruction-tuning** | 🤗 <a href="https://huggingface.co/datasets/yentinglin/traditional_mandarin_instructions" target="_blank">yentinglin/traditional_mandarin_instructions</a> |
104
+ | Traditional Mandarin Pretraining | 🤗 <a href="https://huggingface.co/datasets/yentinglin/zh_TW_c4" target="_blank">yentinglin/zh_TW_c4</a> |
105
+
106
+
107
+ ## Architecture
108
+ Taiwan-LLaMa is based on LLaMa 2, leveraging transformer architecture, <a href="https://github.com/Dao-AILab/flash-attention" target="_blank">flash attention 2</a>, and bfloat16.
109
+
110
+ It includes:
111
+
112
+ * Pretraining Phase: Pretrained on a vast corpus of over 5 billion tokens, extracted from common crawl in Traditional Mandarin.
113
+ * Fine-tuning Phase: Further instruction-tuned on over 490k multi-turn conversational data to enable more instruction-following and context-aware responses.
114
+
115
+ ## Generic Capabilities on Vicuna Benchmark
116
+
117
+ The data is translated into traditional mandarin for evaluating the general capability.
118
+
119
+
120
+ <img src="./images/zhtw_vicuna_bench_chatgptbaseline.png" width="700">
121
+
122
+ The scores are calculated with ChatGPT as the baseline, represented as 100%. The other values show the relative performance of different models compared to ChatGPT.
123
+
124
+ | Language Model | Relative Score (%) |
125
+ |-------------------------------------|--------------------|
126
+ | GPT-4 | 102.59% |
127
+ | ChatGPT | 100.00% |
128
+ | **Taiwan-LLaMa v1.0** | 76.76% |
129
+ | Claude-Instant-1.2 | 74.04% |
130
+ | Llama2_Traditional_Chinese_13b_Chat | 56.21% |
131
+
132
+
133
+
134
+
135
+ ## How to deploy the model on my own machine?
136
+ We recommend hosting models with [🤗 Text Generation Inference](https://github.com/huggingface/text-generation-inference). Please see their [license](https://github.com/huggingface/text-generation-inference/blob/main/LICENSE) for details on usage and limitations.
137
+ ```bash
138
+ bash run_text_generation_inference.sh "yentinglin/Taiwan-LLaMa" NUM_GPUS DIR_TO_SAVE_MODEL PORT MAX_INPUT_LEN MODEL_MAX_LEN
139
+ ```
140
+
141
+ Prompt format follows vicuna-v1.1 template:
142
+ ```
143
+ A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {user} ASSISTANT:
144
+ ```
145
+
146
+ ## Setup development environment
147
+ ```bash
148
+ conda create -n taiwan-llama python=3.10 -y
149
+ conda activate taiwan-llama
150
+ pip install -r requirements.txt
151
+ ```
152
+
153
+
154
+ ## Citations
155
+ If you use our code, data, or models in your research, please cite this repository. You can use the following BibTeX entry:
156
+
157
+ ```bibtex
158
+ @inproceedings{lin-chen-2023-llm,
159
+ title = "{LLM}-Eval: Unified Multi-Dimensional Automatic Evaluation for Open-Domain Conversations with Large Language Models",
160
+ author = "Lin, Yen-Ting and Chen, Yun-Nung",
161
+ booktitle = "Proceedings of the 5th Workshop on NLP for Conversational AI (NLP4ConvAI 2023)",
162
+ month = jul,
163
+ year = "2023",
164
+ address = "Toronto, Canada",
165
+ publisher = "Association for Computational Linguistics",
166
+ url = "https://aclanthology.org/2023.nlp4convai-1.5",
167
+ pages = "47--58"
168
+ }
169
+
170
+ @misc{taiwanllama,
171
+ author={Lin, Yen-Ting and Chen, Yun-Nung},
172
+ title={Taiwanese-Aligned Language Models based on Meta-Llama2},
173
+ year={2023},
174
+ url={https://github.com/adamlin120/Taiwan-LLaMa},
175
+ note={Code and models available at https://github.com/adamlin120/Taiwan-LLaMa},
176
+ }
177
+ ```
178
+
179
+ ## Collaborate With Us
180
+ If you are interested in contributing to the development of Traditional Mandarin language models, exploring new applications, or leveraging Taiwan-LLaMa for your specific needs, please don't hesitate to contact us. We welcome collaborations from academia, industry, and individual contributors.
181
+
182
+ ## License
183
+ The code in this project is licensed under the Apache 2.0 License - see the [LICENSE](LICENSE) file for details.
184
+
185
+ The models included in this project are licensed under the LLAMA 2 Community License. See the [LLAMA2 License](https://github.com/facebookresearch/llama/blob/main/LICENSE) for full details.
186
+
187
+ ## OpenAI Data Acknowledgment
188
+ The data included in this project were generated using OpenAI's models and are subject to OpenAI's Terms of Use. Please review [OpenAI's Terms of Use](https://openai.com/policies/terms-of-use) for details on usage and limitations.
189
+
190
+
191
+ ## Acknowledgements
192
+
193
+ We thank [Meta LLaMA team](https://github.com/facebookresearch/llama) and [Vicuna team](https://github.com/lm-sys/FastChat) for their open-source efforts in democratizing large language models.
data/train-00000-of-00031-3f39a102ab83d07c.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e74082cbf17392f892015d9bcbe20c63de191be6b7988fcba6046856d1e179c
3
+ size 178948774
data/train-00001-of-00031-11e67f9621ccfa66.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a8ad39406d0c8bb5b298ab5f7386bbb052cf18c1f19de9cdfcd2b4cbd3aaea9
3
+ size 178775798
data/train-00002-of-00031-1f3b5f20c0c525af.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:caa84ccdbfba380bc2feef378d12eeee0d1701cde4cbc5b516d896e75d469554
3
+ size 180171863
data/train-00003-of-00031-ab11b74823d4c40b.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82658cddee24cd7a630ac2753b18c9943b348013b5caea39e6e9b429045dbec2
3
+ size 180312179
data/train-00004-of-00031-05a6f6da6e2a1cc6.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d2dbcb0eff45eb2e1280c456ec535d3ab44373a2f21b72e8c3a9a3a8fed1644
3
+ size 179238086
data/train-00005-of-00031-cee6464d907be215.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:687024fdf2870549818ae9941c691c47b307c8819ab7474bc1ee7851a3ed7567
3
+ size 179796810
data/train-00006-of-00031-f045ece455f409cb.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff302666f2ab309b15368c53ac4ed2d6bd73b5daeb8510dadf0c6cf93756d95f
3
+ size 179357125
data/train-00007-of-00031-ea485aa4a1c5dcfd.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a089943b52653efabb40882871b60861a264c3e32854dbada66860ba429bdc5e
3
+ size 179554494
data/train-00008-of-00031-2ca3a7cee5f537d0.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc2faf10138551508323d1a9c190dfce7971e426ff72a7e50739a2adf20f7351
3
+ size 179104074
data/train-00009-of-00031-57a79b260ac5792f.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49a02953ce91be7460ec2c6ba6260effb161dcd31e038e657d545da519b450b6
3
+ size 178844838
data/train-00010-of-00031-72af92932dcd2b1d.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:537bf30657a35182277068954bfed40f25caf789695e76963a85ba57ad7ca624
3
+ size 179996603
data/train-00011-of-00031-b2c958186bc55287.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8912374fcd4b26341ee86ddceb2ade11eee066340de46b9604aa34eb8cc4452
3
+ size 179031687
data/train-00012-of-00031-41d2620a28387b46.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d25c3f180ca334657ef3ce36ff5bc8fd829f5b60a5604560298056aa32e3bb6
3
+ size 179397473
data/train-00013-of-00031-8dff3f220ded66c5.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2239a78653bf0a6724fde5a034a38c47012aac63f687e6bc2f7853bd2e44423d
3
+ size 178890297
data/train-00014-of-00031-fb36e5c9663861d9.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c2cd499d8ea3b703a95eb0b98a531ea8a57895da1ce2a2acb7d30dc9ce05cce
3
+ size 179843331
data/train-00015-of-00031-c814d9d7fdc37c63.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae91ab0c562cf6e8fa672b1daa2046567796f778da149e11e076279fea510b53
3
+ size 179138484
data/train-00016-of-00031-5e92432cf6edae27.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51f8618cc3b24c18413fd3743c06bf710f51430d0ecdbdf2fb17da403db803e3
3
+ size 179611068
data/train-00017-of-00031-d55ea984cef78067.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0dc93bfc141774ecba7e79cb3d726dc40bd1e47c9d263c21315652b90dc930c6
3
+ size 178877066
data/train-00018-of-00031-6adb3dea1d3bfaf9.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1695e612948e83524e7b4e392975b6302a84a82d341f4797cd40f5b5a799384d
3
+ size 179849643
data/train-00019-of-00031-f58c3c1962d93e05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1502b42af834c053cb62bf14ac0993ab2695ce16e252bb8fbe59f43ee8bcc214
3
+ size 178743150
data/train-00020-of-00031-4f91a0fac98e5564.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8f77a689cca0b5cf957e67cb5d1090f30f2bb7b02a384b8a653bee30f87e060
3
+ size 179384546
data/train-00021-of-00031-5f1f7b7a2aa71a83.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2305e895b4672509529c4f851a8b8b290d4f53162111e19ab5d92b8ae7014c35
3
+ size 179351392
data/train-00022-of-00031-0dcf59276dec6da4.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c3be777bb73b00e9d2e9f6d0ed99b1bebc5ea95327dbb9929c6d688626466e8
3
+ size 179715443
data/train-00023-of-00031-418d000640b51a6b.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0df81831c93b0179f7ae4b2e25da8f18c06efeca2685e9948375e2c6ab918fb9
3
+ size 180192241
data/train-00024-of-00031-b75ee6bc51581ebc.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0ce6eb7414415a7559996062fa1f3f17b5922a404ef41463f21adfc096b6713
3
+ size 179577205
data/train-00025-of-00031-0c56fb12bb38f860.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52ee5d6a39d925145ef5246f88949783f44099a72db8bec8fa541dc3a05d799a
3
+ size 179565369
data/train-00026-of-00031-700cb1cc8673fa7c.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83bc004f6e67d158e52c4e16cd8a88db5e581ed67bd7dcaee315162d8b6a80c6
3
+ size 179563815
data/train-00027-of-00031-86df88742d865995.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a12fc08aeda4921dcbcccd43791777e08159f12698f1b915d40f2be5300944c5
3
+ size 179846473
data/train-00028-of-00031-3018fba2d583eff6.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8e98b7530baf3e676d575e173f370aed676a0dfe115ad242a633c80c5e4e1c9
3
+ size 178971016
data/train-00029-of-00031-e81543082347b540.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b37d02bb68e18fbc86dc1831165576d0546105791a8b87eeec7440af4dc2c45c
3
+ size 180821421
data/train-00030-of-00031-8e8f8b6eaf81342e.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd147faaa200fbb6bf7f34a1a5ba6e907d5f36bf68de7556e976747dcd9f6ae1
3
+ size 179656682
data/validation-00000-of-00001-303d3d5638c1fede.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3496b64058341036528cfd3d5a597a6baab52caa6f7e476962c73ebe87d0511
3
+ size 5362815