File size: 1,326 Bytes
db30823
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import os
import json
import re
from datasets import load_dataset

os.makedirs("data/tweet_topic", exist_ok=True)
data = load_dataset("cardiffnlp/tweet_topic_multi")
re_user = re.compile(r'{@[^@^}]*@}')


def process(tmp):
    tmp = [i.to_dict() for _, i in tmp.iterrows()]
    for i in tmp:
        i.pop("label")
        text = i['text']
        users = re_user.findall(text)
        for u in users:
            text = text.replace(u, u.replace("{@", "@").replace("@}", "").replace(" ", "_"))
        text = text.replace("{{USERNAME}}", "@user").replace("{{URL}}", "{URL}")
        i['text'] = text
        i['condition'] = f'Topics: {", ".join([x.replace("_", " ") for x in i.pop("label_name")])}'
    return tmp

train = process(data["train_2020"].to_pandas())
train += process(data["train_2021"].to_pandas())
val = process(data["validation_2020"].to_pandas())
val += process(data["validation_2021"].to_pandas())
test = process(data["test_2021"].to_pandas())
os.makedirs("dataset/topic", exist_ok=True)
with open("dataset/topic/train.jsonl", "w") as f:
    f.write("\n".join([json.dumps(i) for i in train]))
with open("dataset/topic/validation.jsonl", "w") as f:
    f.write("\n".join([json.dumps(i) for i in val]))
with open("dataset/topic/test.jsonl", "w") as f:
    f.write("\n".join([json.dumps(i) for i in test]))