Datasets:
Matthew Franglen
commited on
Commit
•
59da9af
1
Parent(s):
c986f65
Create an entrypoint and split code up
Browse filesThe aste file that is produced by this matches the notebook output
- src/alignment.py +165 -0
- src/convert.py +11 -0
- src/main.py +44 -0
- src/sentiment.py +9 -0
- src/types.py +23 -3
src/alignment.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import asdict
|
2 |
+
from typing import Optional
|
3 |
+
|
4 |
+
import Levenshtein
|
5 |
+
import pandas as pd
|
6 |
+
|
7 |
+
from .types import CharacterIndices, Triplet, WordSpans
|
8 |
+
|
9 |
+
|
10 |
+
def find_closest_text(
|
11 |
+
*,
|
12 |
+
original: pd.Series,
|
13 |
+
replacement: pd.Series,
|
14 |
+
) -> pd.Series:
|
15 |
+
# Returns a series of the replacement values aligned to the original values
|
16 |
+
no_space_replacements = {text.replace(" ", ""): text for text in replacement}
|
17 |
+
result = original.str.replace(" ", "").map(no_space_replacements)
|
18 |
+
non_perfect_matches = result.isna().sum()
|
19 |
+
|
20 |
+
assert non_perfect_matches / len(original) <= 0.05, (
|
21 |
+
"Poor alignment with replacement text. "
|
22 |
+
f"{non_perfect_matches:,} of {len(original),} rows did not match well"
|
23 |
+
)
|
24 |
+
|
25 |
+
def closest(text: str) -> str:
|
26 |
+
distances = replacement.apply(
|
27 |
+
lambda comparison: Levenshtein.distance(text, comparison)
|
28 |
+
)
|
29 |
+
return replacement.iloc[distances.argmin()]
|
30 |
+
|
31 |
+
result.loc[result.isna()] = result[result.isna()].apply(closest)
|
32 |
+
return result
|
33 |
+
|
34 |
+
|
35 |
+
def to_character_indices_series(row: pd.Series) -> pd.Series:
|
36 |
+
result = to_character_indices(triplet=row.triples, text=row.text)
|
37 |
+
return pd.Series(asdict(result))
|
38 |
+
|
39 |
+
|
40 |
+
def to_character_indices(
|
41 |
+
*,
|
42 |
+
triplet: Triplet,
|
43 |
+
text: str,
|
44 |
+
) -> CharacterIndices:
|
45 |
+
aspect_span, opinion_span, _ = triplet
|
46 |
+
assert _is_sequential(aspect_span), f"aspect span not sequential: {aspect_span}"
|
47 |
+
assert _is_sequential(opinion_span), f"opinion span not sequential: {opinion_span}"
|
48 |
+
|
49 |
+
spans = WordSpans.make(text)
|
50 |
+
|
51 |
+
aspect_start_index, aspect_end_index = spans.to_indices(aspect_span)
|
52 |
+
aspect_term = text[aspect_start_index : aspect_end_index + 1]
|
53 |
+
opinion_start_index, opinion_end_index = spans.to_indices(opinion_span)
|
54 |
+
opinion_term = text[opinion_start_index : opinion_end_index + 1]
|
55 |
+
|
56 |
+
return CharacterIndices(
|
57 |
+
aspect_start_index=aspect_start_index,
|
58 |
+
aspect_end_index=aspect_end_index,
|
59 |
+
aspect_term=aspect_term,
|
60 |
+
opinion_start_index=opinion_start_index,
|
61 |
+
opinion_end_index=opinion_end_index,
|
62 |
+
opinion_term=opinion_term,
|
63 |
+
)
|
64 |
+
|
65 |
+
|
66 |
+
def to_aligned_character_indices(
|
67 |
+
*,
|
68 |
+
original: str,
|
69 |
+
replacement: str,
|
70 |
+
original_indices: CharacterIndices,
|
71 |
+
) -> CharacterIndices:
|
72 |
+
indices = _aligned_character_indices(original=original, replacement=replacement)
|
73 |
+
|
74 |
+
aspect_start_index = _aligned_start_index(
|
75 |
+
text=replacement,
|
76 |
+
original_index=original_indices.aspect_start_index,
|
77 |
+
indices=indices,
|
78 |
+
)
|
79 |
+
aspect_end_index = _aligned_end_index(
|
80 |
+
text=replacement,
|
81 |
+
original_index=original_indices.aspect_end_index,
|
82 |
+
indices=indices,
|
83 |
+
)
|
84 |
+
aspect_term = replacement[aspect_start_index : aspect_end_index + 1]
|
85 |
+
|
86 |
+
opinion_start_index = _aligned_start_index(
|
87 |
+
text=replacement,
|
88 |
+
original_index=original_indices.opinion_start_index,
|
89 |
+
indices=indices,
|
90 |
+
)
|
91 |
+
opinion_end_index = _aligned_end_index(
|
92 |
+
text=replacement,
|
93 |
+
original_index=original_indices.opinion_end_index,
|
94 |
+
indices=indices,
|
95 |
+
)
|
96 |
+
opinion_term = replacement[opinion_start_index : opinion_end_index + 1]
|
97 |
+
|
98 |
+
return CharacterIndices(
|
99 |
+
aspect_start_index=aspect_start_index,
|
100 |
+
aspect_end_index=aspect_end_index,
|
101 |
+
aspect_term=aspect_term,
|
102 |
+
opinion_start_index=opinion_start_index,
|
103 |
+
opinion_end_index=opinion_end_index,
|
104 |
+
opinion_term=opinion_term,
|
105 |
+
)
|
106 |
+
|
107 |
+
|
108 |
+
def _is_sequential(span: tuple[int, ...]) -> bool:
|
109 |
+
return all(span[index + 1] - span[index] == 1 for index in range(len(span) - 1))
|
110 |
+
|
111 |
+
|
112 |
+
def _aligned_character_indices(original: str, replacement: str) -> list[Optional[int]]:
|
113 |
+
indices: list[Optional[int]] = list(range(len(original)))
|
114 |
+
for operation, _source_position, destination_position in Levenshtein.editops(
|
115 |
+
original, replacement
|
116 |
+
):
|
117 |
+
if operation == "replace":
|
118 |
+
indices[destination_position] = None
|
119 |
+
elif operation == "insert":
|
120 |
+
indices.insert(destination_position, None)
|
121 |
+
elif operation == "delete":
|
122 |
+
del indices[destination_position]
|
123 |
+
return indices
|
124 |
+
|
125 |
+
|
126 |
+
def _aligned_start_index(
|
127 |
+
text: str, original_index: int, indices: list[Optional[int]]
|
128 |
+
) -> int:
|
129 |
+
closest_after = min(
|
130 |
+
index for index in indices if index is not None and index >= original_index
|
131 |
+
)
|
132 |
+
index = indices.index(closest_after)
|
133 |
+
|
134 |
+
# Not every character in the original text is aligned to a character in the
|
135 |
+
# replacement text. The replacement text may have deleted it, or replaced
|
136 |
+
# it. Can step back through each letter until the word boundary is found or
|
137 |
+
# an aligned character is found.
|
138 |
+
while index > 0:
|
139 |
+
if indices[index - 1] is not None:
|
140 |
+
break
|
141 |
+
if text[index - 1] == " ":
|
142 |
+
break
|
143 |
+
index -= 1
|
144 |
+
return index
|
145 |
+
|
146 |
+
|
147 |
+
def _aligned_end_index(
|
148 |
+
text: str, original_index: int, indices: list[Optional[int]]
|
149 |
+
) -> int:
|
150 |
+
closest_before = min(
|
151 |
+
index for index in indices if index is not None and index <= original_index
|
152 |
+
)
|
153 |
+
index = indices.index(closest_before)
|
154 |
+
|
155 |
+
# Not every character in the original text is aligned to a character in the
|
156 |
+
# replacement text. The replacement text may have deleted it, or replaced
|
157 |
+
# it. Can step back through each letter until the word boundary is found or
|
158 |
+
# an aligned character is found.
|
159 |
+
while index < len(indices) - 1:
|
160 |
+
if indices[index + 1] is not None:
|
161 |
+
break
|
162 |
+
if text[index + 1] == " ":
|
163 |
+
break
|
164 |
+
index += 1
|
165 |
+
return index
|
src/convert.py
CHANGED
@@ -263,3 +263,14 @@ def aste_to_character_indices(
|
|
263 |
opinion_term=opinion_term,
|
264 |
sentiment=nice_sentiment,
|
265 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
263 |
opinion_term=opinion_term,
|
264 |
sentiment=nice_sentiment,
|
265 |
)
|
266 |
+
|
267 |
+
|
268 |
+
label_to_sentiment = {
|
269 |
+
"POS": "positive",
|
270 |
+
"NEG": "negative",
|
271 |
+
"NEU": "neutral",
|
272 |
+
}
|
273 |
+
|
274 |
+
|
275 |
+
def to_nice_sentiment(label: str) -> str:
|
276 |
+
return label_to_sentiment[sentiment]
|
src/main.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
from typing import Annotated
|
3 |
+
|
4 |
+
import typer
|
5 |
+
|
6 |
+
from .alignment import to_character_indices_series
|
7 |
+
from .data import read_aste_file
|
8 |
+
from .sentiment import to_nice_sentiment
|
9 |
+
|
10 |
+
app = typer.Typer()
|
11 |
+
|
12 |
+
|
13 |
+
@app.command()
|
14 |
+
def aste(
|
15 |
+
aste_file: Annotated[Path, typer.Option()],
|
16 |
+
output_file: Annotated[Path, typer.Option()],
|
17 |
+
) -> None:
|
18 |
+
df = read_aste_file(aste_file)
|
19 |
+
df = df.explode("triples")
|
20 |
+
df = df.reset_index(drop=False)
|
21 |
+
df = df.merge(
|
22 |
+
df.apply(to_character_indices_series, axis="columns"),
|
23 |
+
left_index=True,
|
24 |
+
right_index=True,
|
25 |
+
)
|
26 |
+
df["sentiment"] = df.triples.apply(lambda triple: to_nice_sentiment(triple[2]))
|
27 |
+
df = df.drop(columns=["triples"])
|
28 |
+
|
29 |
+
print(df.sample(3))
|
30 |
+
|
31 |
+
df.to_parquet(output_file, compression="gzip")
|
32 |
+
|
33 |
+
|
34 |
+
@app.command()
|
35 |
+
def sem_eval(
|
36 |
+
aste_file: Annotated[Path, typer.Option()],
|
37 |
+
sem_eval_file: Annotated[Path, typer.Option()],
|
38 |
+
output_file: Annotated[Path, typer.Option()],
|
39 |
+
) -> None:
|
40 |
+
pass
|
41 |
+
|
42 |
+
|
43 |
+
if __name__ == "__main__":
|
44 |
+
app()
|
src/sentiment.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
label_to_sentiment = {
|
2 |
+
"POS": "positive",
|
3 |
+
"NEG": "negative",
|
4 |
+
"NEU": "neutral",
|
5 |
+
}
|
6 |
+
|
7 |
+
|
8 |
+
def to_nice_sentiment(label: str) -> str:
|
9 |
+
return label_to_sentiment[label]
|
src/types.py
CHANGED
@@ -2,6 +2,12 @@ from __future__ import annotations
|
|
2 |
|
3 |
import re
|
4 |
from dataclasses import dataclass
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
word_pattern = re.compile(r"\S+")
|
7 |
|
@@ -11,12 +17,27 @@ class WordSpan:
|
|
11 |
start_index: int
|
12 |
end_index: int # this is the letter after the end
|
13 |
|
|
|
|
|
|
|
|
|
|
|
14 |
@staticmethod
|
15 |
-
def
|
16 |
-
|
17 |
WordSpan(start_index=match.start(), end_index=match.end())
|
18 |
for match in word_pattern.finditer(text)
|
19 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
|
22 |
@dataclass(frozen=True)
|
@@ -27,4 +48,3 @@ class CharacterIndices:
|
|
27 |
opinion_start_index: int
|
28 |
opinion_end_index: int
|
29 |
opinion_term: str
|
30 |
-
sentiment: str
|
|
|
2 |
|
3 |
import re
|
4 |
from dataclasses import dataclass
|
5 |
+
from typing import Literal
|
6 |
+
|
7 |
+
AspectWordIndices = tuple[int, ...]
|
8 |
+
OpinionWordIndices = tuple[int, ...]
|
9 |
+
Sentiment = Literal["NEG", "NEU", "POS"]
|
10 |
+
Triplet = tuple[AspectWordIndices, OpinionWordIndices, Sentiment]
|
11 |
|
12 |
word_pattern = re.compile(r"\S+")
|
13 |
|
|
|
17 |
start_index: int
|
18 |
end_index: int # this is the letter after the end
|
19 |
|
20 |
+
|
21 |
+
@dataclass
|
22 |
+
class WordSpans:
|
23 |
+
spans: list[WordSpan]
|
24 |
+
|
25 |
@staticmethod
|
26 |
+
def make(text: str) -> WordSpans:
|
27 |
+
spans = [
|
28 |
WordSpan(start_index=match.start(), end_index=match.end())
|
29 |
for match in word_pattern.finditer(text)
|
30 |
]
|
31 |
+
return WordSpans(spans)
|
32 |
+
|
33 |
+
def to_indices(self, span: tuple[int, ...]) -> tuple[int, int]:
|
34 |
+
word_start = span[0]
|
35 |
+
word_start_span = self.spans[word_start]
|
36 |
+
|
37 |
+
word_end = span[-1]
|
38 |
+
word_end_span = self.spans[word_end]
|
39 |
+
|
40 |
+
return word_start_span.start_index, word_end_span.end_index - 1
|
41 |
|
42 |
|
43 |
@dataclass(frozen=True)
|
|
|
48 |
opinion_start_index: int
|
49 |
opinion_end_index: int
|
50 |
opinion_term: str
|
|