asahi417 commited on
Commit
7ad0deb
1 Parent(s): 26f2c66

fix dataset

Browse files
data/tweet_topic/test.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:af403aac629588548484961e79539be3b6ec19f5ef90c3cf421a6b726cf33d5d
3
- size 477695
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d1fc97512da2c46e2f6a282e68802ea150ca150d59d2c1768a8eb3d63952c1d
3
+ size 464724
data/tweet_topic/train.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4bd21f70d3a0b9e543ac36c9223d4e8e01d3a56a717ec991db94062537f34fb
3
- size 1323428
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb9937d1d9ee111e9f96d618ea863499e61b46a62aac48fe5b4a3197d0ec77ac
3
+ size 1286572
data/tweet_topic/validation.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:101bbcaf17f25cadd00fed901b772cf46b0c3b97978259ef8d110c60dc1dfdfb
3
- size 165104
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:091ed848c3e284e18cb304d636485b4878475c9a2ccbbffe6ad5c9fed6709373
3
+ size 160547
process/unify_sp_symbol.py CHANGED
@@ -3,57 +3,26 @@ import re
3
 
4
  from glob import glob
5
 
6
- for i in glob("data/tweet_intimacy/*.jsonl"):
 
 
 
 
 
 
 
 
 
 
 
7
  with open(i) as f:
8
  data = [json.loads(j) for j in f.readlines()]
9
  for d in data:
10
- for c in ['text']:
11
- tmp = d[c]
12
- tmp = re.sub(r"\bhttp\b", "{{URL}}", tmp)
13
- tmp = re.sub(r"@user", "{{USERNAME}}", tmp)
14
- tmp = re.sub(r"(@[\S]+)", r"{\1@}", tmp)
15
- d[c] = tmp
16
-
17
- with open(i, "w") as f:
18
- f.write("\n".join([json.dumps(j) for j in data]))
19
-
20
- for i in glob("data/tweet_qa/*.jsonl"):
21
- with open(i) as f:
22
- data = [json.loads(j) for j in f.readlines()]
23
- for d in data:
24
- for c in ['text', "paragraph", "question", "label_str"]:
25
- tmp = d[c]
26
- tmp = re.sub(r"(@[\S]+)", r"{\1@}", tmp)
27
- tmp = tmp.replace(")@}", '@})')
28
- d[c] = tmp
29
-
30
- with open(i, "w") as f:
31
- f.write("\n".join([json.dumps(j) for j in data]))
32
-
33
- for i in glob("data/tweet_similarity/*.jsonl"):
34
- with open(i) as f:
35
- data = [json.loads(j) for j in f.readlines()]
36
- for d in data:
37
- for c in ['text_1', "text_2"]:
38
- tmp = d[c]
39
- # tmp = re.sub(r"(@[\S]+)\b", r"{\1@}", tmp)
40
- tmp = tmp.replace("{@user@}", "{{USERNAME}}")
41
- d[c] = tmp
42
-
43
- with open(i, "w") as f:
44
- f.write("\n".join([json.dumps(j) for j in data]))
45
-
46
-
47
- for i in glob("data/tweet_intimacy/*.jsonl"):
48
- with open(i) as f:
49
- data = [json.loads(j) for j in f.readlines()]
50
- for d in data:
51
- for c in ['text']:
52
- tmp = d[c].replace("{{URL}}", "@url")
53
- # tmp = re.sub(r"\bhttp\b", "{{URL}}", tmp)
54
- tmp = re.sub(r"@user", "{{USERNAME}}", tmp)
55
- tmp = re.sub(r"(@[\S]+)", r"{\1@}", tmp)
56
- d[c] = tmp
57
-
58
  with open(i, "w") as f:
59
  f.write("\n".join([json.dumps(j) for j in data]))
 
3
 
4
  from glob import glob
5
 
6
+ # for i in glob("data/tweet_topic/*.jsonl"):
7
+ # with open(i) as f:
8
+ # data = [json.loads(j) for j in f.readlines()]
9
+ # for d in data:
10
+ # for c in ['text']:
11
+ # d[c] = d[c].replace("{{USERNAME}}", "@user").replace("{{URL}}", "{URL}")
12
+ # for t in re.findall(r'{@[^@^}]*@}', d[c]):
13
+ # d[c] = d[c].replace(t, t.replace("{@", "@").replace("@}", "").replace(" ", "_"))
14
+ # with open(i, "w") as f:
15
+ # f.write("\n".join([json.dumps(j) for j in data]))
16
+
17
+ for i in glob("data/tweet_ner7/*.jsonl"):
18
  with open(i) as f:
19
  data = [json.loads(j) for j in f.readlines()]
20
  for d in data:
21
+ d['text'] = d['text'].replace("{{USERNAME}}", "@user").replace("{{URL}}", "{URL}")
22
+ d['text'] = re.sub(r'\{@([^@].)@}', r'\1', d['text'])
23
+ for t in re.findall(r'{@[^@^}]*@}', d['text']):
24
+ t_new = t.replace("{@", "@").replace("@}", "").replace(" ", "_")
25
+ d['text'] = d['text'].replace(t, t_new)
26
+ d['text_tokenized'] =
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  with open(i, "w") as f:
28
  f.write("\n".join([json.dumps(j) for j in data]))