Matthew Franglen commited on
Commit
8e12b39
1 Parent(s): d136bc8

Split up some of the code

Browse files
Files changed (3) hide show
  1. src/convert.py +23 -89
  2. src/data.py +45 -0
  3. src/types.py +30 -0
src/convert.py CHANGED
@@ -1,51 +1,10 @@
1
- import ast
2
- import re
3
- from dataclasses import dataclass
4
- from typing import Optional, TypedDict
5
 
6
  import Levenshtein
7
  import pandas as pd
8
 
9
-
10
- def read_sem_eval_file(file: str) -> pd.DataFrame:
11
- df = pd.read_xml(file)[["text"]]
12
- return df
13
-
14
-
15
- def read_aste_file(file: str) -> pd.DataFrame:
16
- def triple_to_hashable(
17
- triple: tuple[list[int], list[int], str]
18
- ) -> tuple[tuple[int, ...], tuple[int, ...], str]:
19
- aspect_span, opinion_span, sentiment = triple
20
- return tuple(aspect_span), tuple(opinion_span), sentiment
21
-
22
- df = pd.read_csv(
23
- file,
24
- sep="####",
25
- header=None,
26
- names=["text", "triples"],
27
- engine="python",
28
- )
29
-
30
- # There are duplicate rows, some of which have the same triples and some don't
31
- # This deals with that by
32
- # * first dropping the pure duplicates,
33
- # * then parsing the triples and exploding them to one per row
34
- # * then dropping the exploded duplicates (have to convert triples back to string for this)
35
- # * then grouping the triples up again
36
- # * finally sorting the distinct triples
37
-
38
- # df = df.copy()
39
- df = df.drop_duplicates()
40
- df["triples"] = df.triples.apply(ast.literal_eval)
41
- df = df.explode("triples")
42
- df["triples"] = df.triples.apply(triple_to_hashable)
43
- df = df.drop_duplicates()
44
- df = df.groupby("text").agg(list)
45
- df = df.reset_index(drop=False)
46
- df["triples"] = df.triples.apply(set).apply(sorted)
47
-
48
- return df
49
 
50
 
51
  def get_original_text(
@@ -106,25 +65,6 @@ def has_unmapped_non_space(row: pd.Series) -> bool:
106
  return any(index is None for letter, index in letter_and_index if letter != " ")
107
 
108
 
109
- @dataclass(frozen=True)
110
- class WordSpan:
111
- start_index: int
112
- end_index: int # this is the letter after the end
113
-
114
-
115
- class CharacterIndices(TypedDict):
116
- aspect_start_index: int
117
- aspect_end_index: int
118
- aspect_term: str
119
- opinion_start_index: int
120
- opinion_end_index: int
121
- opinion_term: str
122
- sentiment: str
123
-
124
-
125
- word_pattern = re.compile(r"\S+")
126
-
127
-
128
  def row_to_character_indices(row: pd.Series) -> pd.Series:
129
  try:
130
  return pd.Series(
@@ -202,10 +142,7 @@ def to_character_indices(
202
  assert is_sequential(opinion_span), f"opinion span not sequential: {opinion_span}"
203
  assert sentiment in {"POS", "NEG", "NEU"}, f"unknown sentiment: {sentiment}"
204
 
205
- word_indices = [
206
- WordSpan(start_index=match.start(), end_index=match.end())
207
- for match in word_pattern.finditer(preprocessed)
208
- ]
209
 
210
  aspect_start_index, aspect_end_index = to_indices(aspect_span)
211
  aspect_term = text[aspect_start_index : aspect_end_index + 1]
@@ -218,15 +155,15 @@ def to_character_indices(
218
  "NEU": "neutral",
219
  }[sentiment]
220
 
221
- return {
222
- "aspect_start_index": aspect_start_index,
223
- "aspect_end_index": aspect_end_index,
224
- "aspect_term": aspect_term,
225
- "opinion_start_index": opinion_start_index,
226
- "opinion_end_index": opinion_end_index,
227
- "opinion_term": opinion_term,
228
- "sentiment": nice_sentiment,
229
- }
230
 
231
 
232
  def convert_sem_eval_text(
@@ -304,10 +241,7 @@ def aste_to_character_indices(
304
  assert is_sequential(opinion_span), f"opinion span not sequential: {opinion_span}"
305
  assert sentiment in {"POS", "NEG", "NEU"}, f"unknown sentiment: {sentiment}"
306
 
307
- word_indices = [
308
- WordSpan(start_index=match.start(), end_index=match.end())
309
- for match in word_pattern.finditer(text)
310
- ]
311
 
312
  aspect_start_index, aspect_end_index = to_indices(aspect_span)
313
  aspect_term = text[aspect_start_index : aspect_end_index + 1]
@@ -320,12 +254,12 @@ def aste_to_character_indices(
320
  "NEU": "neutral",
321
  }[sentiment]
322
 
323
- return {
324
- "aspect_start_index": aspect_start_index,
325
- "aspect_end_index": aspect_end_index,
326
- "aspect_term": aspect_term,
327
- "opinion_start_index": opinion_start_index,
328
- "opinion_end_index": opinion_end_index,
329
- "opinion_term": opinion_term,
330
- "sentiment": nice_sentiment,
331
- }
 
1
+ from typing import Optional
 
 
 
2
 
3
  import Levenshtein
4
  import pandas as pd
5
 
6
+ from .data import read_aste_file, read_sem_eval_file
7
+ from .types import CharacterIndices, WordSpan
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
 
10
  def get_original_text(
 
65
  return any(index is None for letter, index in letter_and_index if letter != " ")
66
 
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  def row_to_character_indices(row: pd.Series) -> pd.Series:
69
  try:
70
  return pd.Series(
 
142
  assert is_sequential(opinion_span), f"opinion span not sequential: {opinion_span}"
143
  assert sentiment in {"POS", "NEG", "NEU"}, f"unknown sentiment: {sentiment}"
144
 
145
+ word_indices = WordSpan.to_spans(preprocessed)
 
 
 
146
 
147
  aspect_start_index, aspect_end_index = to_indices(aspect_span)
148
  aspect_term = text[aspect_start_index : aspect_end_index + 1]
 
155
  "NEU": "neutral",
156
  }[sentiment]
157
 
158
+ return CharacterIndices(
159
+ aspect_start_index=aspect_start_index,
160
+ aspect_end_index=aspect_end_index,
161
+ aspect_term=aspect_term,
162
+ opinion_start_index=opinion_start_index,
163
+ opinion_end_index=opinion_end_index,
164
+ opinion_term=opinion_term,
165
+ sentiment=nice_sentiment,
166
+ )
167
 
168
 
169
  def convert_sem_eval_text(
 
241
  assert is_sequential(opinion_span), f"opinion span not sequential: {opinion_span}"
242
  assert sentiment in {"POS", "NEG", "NEU"}, f"unknown sentiment: {sentiment}"
243
 
244
+ word_indices = WordSpan.to_spans(text)
 
 
 
245
 
246
  aspect_start_index, aspect_end_index = to_indices(aspect_span)
247
  aspect_term = text[aspect_start_index : aspect_end_index + 1]
 
254
  "NEU": "neutral",
255
  }[sentiment]
256
 
257
+ return CharacterIndices(
258
+ aspect_start_index=aspect_start_index,
259
+ aspect_end_index=aspect_end_index,
260
+ aspect_term=aspect_term,
261
+ opinion_start_index=opinion_start_index,
262
+ opinion_end_index=opinion_end_index,
263
+ opinion_term=opinion_term,
264
+ sentiment=nice_sentiment,
265
+ )
src/data.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ from pathlib import Path
3
+
4
+ import pandas as pd
5
+
6
+
7
+ def read_sem_eval_file(file: str | Path) -> pd.DataFrame:
8
+ df = pd.read_xml(file)[["text"]]
9
+ return df
10
+
11
+
12
+ def read_aste_file(file: str | Path) -> pd.DataFrame:
13
+ df = pd.read_csv(
14
+ file,
15
+ sep="####",
16
+ header=None,
17
+ names=["text", "triples"],
18
+ engine="python",
19
+ )
20
+
21
+ # There are duplicate rows, some of which have the same triples and some don't
22
+ # This deals with that by
23
+ # * first dropping the pure duplicates,
24
+ # * then parsing the triples and exploding them to one per row
25
+ # * then dropping the exploded duplicates (have to convert triples back to string for this)
26
+ # * then grouping the triples up again
27
+ # * finally sorting the distinct triples
28
+
29
+ df = df.drop_duplicates()
30
+ df["triples"] = df.triples.apply(ast.literal_eval)
31
+ df = df.explode("triples")
32
+ df["triples"] = df.triples.apply(_triple_to_hashable)
33
+ df = df.drop_duplicates()
34
+ df = df.groupby("text").agg(list)
35
+ df = df.reset_index(drop=False)
36
+ df["triples"] = df.triples.apply(set).apply(sorted)
37
+
38
+ return df
39
+
40
+
41
+ def _triple_to_hashable(
42
+ triple: tuple[list[int], list[int], str]
43
+ ) -> tuple[tuple[int, ...], tuple[int, ...], str]:
44
+ aspect_span, opinion_span, sentiment = triple
45
+ return tuple(aspect_span), tuple(opinion_span), sentiment
src/types.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import re
4
+ from dataclasses import dataclass
5
+
6
+ word_pattern = re.compile(r"\S+")
7
+
8
+
9
+ @dataclass(frozen=True)
10
+ class WordSpan:
11
+ start_index: int
12
+ end_index: int # this is the letter after the end
13
+
14
+ @staticmethod
15
+ def to_spans(text: str) -> list[WordSpan]:
16
+ return [
17
+ WordSpan(start_index=match.start(), end_index=match.end())
18
+ for match in word_pattern.finditer(text)
19
+ ]
20
+
21
+
22
+ @dataclass(frozen=True)
23
+ class CharacterIndices:
24
+ aspect_start_index: int
25
+ aspect_end_index: int
26
+ aspect_term: str
27
+ opinion_start_index: int
28
+ opinion_end_index: int
29
+ opinion_term: str
30
+ sentiment: str