init
Browse files- dataset_stats.py +30 -0
- stats.csv +7 -0
dataset_stats.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
from os.path import join as pj
|
4 |
+
from glob import glob
|
5 |
+
from statistics import mean
|
6 |
+
|
7 |
+
import pandas as pd
|
8 |
+
from datasets import load_dataset
|
9 |
+
|
10 |
+
|
11 |
+
def count_word(text):
|
12 |
+
return len(text.split())
|
13 |
+
|
14 |
+
|
15 |
+
if __name__ == '__main__':
|
16 |
+
data = ["chemprot", "citation_intent", "hyperpartisan_news", "rct-sample", "sciie"]
|
17 |
+
stats = {}
|
18 |
+
for d in data:
|
19 |
+
_data = load_dataset('', d)
|
20 |
+
stats[d] = {
|
21 |
+
'word/validation': mean([count_word(k['text']) for k in _data['validation']]),
|
22 |
+
'word/test': mean([count_word(k['text']) for k in _data['test']]),
|
23 |
+
'word/train': mean([count_word(k['text']) for k in _data['train']]),
|
24 |
+
'instance/dev': len(_data['validation']),
|
25 |
+
'instance/test': len(_data['test']),
|
26 |
+
'instance/train': len(_data['train'])
|
27 |
+
}
|
28 |
+
pd.DataFrame(stats).astype(int).to_csv('stats.csv')
|
29 |
+
|
30 |
+
|
stats.csv
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
,chemprot,citation_intent,hyperpartisan_news,rct-sample,sciie
|
2 |
+
word/validation,32,40,502,26,32
|
3 |
+
word/test,32,42,612,26,32
|
4 |
+
word/train,31,42,536,26,32
|
5 |
+
instance/dev,2427,114,64,30212,455
|
6 |
+
instance/test,3469,139,65,30135,974
|
7 |
+
instance/train,4169,1688,516,500,3219
|