aste-v2 / src /convert.py
Matthew Franglen
Create an entrypoint and split code up
59da9af
raw
history blame
9.07 kB
from typing import Optional
import Levenshtein
import pandas as pd
from .data import read_aste_file, read_sem_eval_file
from .types import CharacterIndices, WordSpan
def get_original_text(
aste_file: str,
sem_eval_file: str,
debug: bool = False,
) -> pd.DataFrame:
approximate_matches = 0
def best_match(text: str) -> str:
comparison = text.replace(" ", "")
if comparison in comparison_to_text:
return comparison_to_text[comparison]
nonlocal approximate_matches
approximate_matches += 1
distances = sem_eval_comparison.apply(
lambda se_comparison: Levenshtein.distance(comparison, se_comparison)
)
best = sem_eval_df.iloc[distances.argmin()].text
return best
sem_eval_df = read_sem_eval_file(sem_eval_file)
sem_eval_comparison = sem_eval_df.text.str.replace(" ", "")
comparison_to_text = dict(zip(sem_eval_comparison, sem_eval_df.text))
aste_df = read_aste_file(aste_file)
aste_df = aste_df.rename(columns={"text": "preprocessed_text"})
aste_df["text"] = aste_df.preprocessed_text.apply(best_match)
if debug:
print(f"Read {len(aste_df):,} rows")
print(f"Had to use {approximate_matches:,} approximate matches")
return aste_df[["text", "preprocessed_text", "triples"]]
def edit(original: str, preprocessed: str) -> list[Optional[int]]:
indices: list[Optional[int]] = list(range(len(preprocessed)))
for operation, _source_position, destination_position in Levenshtein.editops(
preprocessed, original
):
if operation == "replace":
indices[destination_position] = None
elif operation == "insert":
indices.insert(destination_position, None)
elif operation == "delete":
del indices[destination_position]
return indices
def has_unmapped(indicies: list[Optional[int]]) -> bool:
return any(index is None for index in indicies)
def has_unmapped_non_space(row: pd.Series) -> bool:
letter_and_index: list[tuple[str, Optional[int]]] = list(
zip(row.text, row.text_indices)
)
return any(index is None for letter, index in letter_and_index if letter != " ")
def row_to_character_indices(row: pd.Series) -> pd.Series:
try:
return pd.Series(
to_character_indices(
triplet=row.triples,
preprocessed=row.preprocessed_text,
text=row.text,
text_indices=row.text_indices,
)
)
except:
print(f"failed to process row {row.name}")
print(row)
raise
def to_character_indices(
*,
triplet: tuple[tuple[int], tuple[int], str],
preprocessed: str,
text: str,
text_indices: list[Optional[int]],
) -> CharacterIndices:
def find_start_index(span: WordSpan) -> int:
# the starting letter in the lookup can be missing or None
# this would cause a lookup failure
# to recover from this we can find the following letter index and backtrack
for index in range(span.start_index, span.end_index):
try:
text_index = text_indices.index(index)
for _ in range(index - span.start_index):
if text_index - 1 <= 0:
break
if text_indices[text_index - 1] is not None:
break
text_index -= 1
return text_index
except ValueError:
pass
# not present in list
raise ValueError(f"cannot find any part of {span}")
def find_end_index(span: WordSpan) -> int:
# the ending letter in the lookup can be missing or None
# this would cause a lookup failure
# to recover from this we can find the preceding letter index and backtrack
for index in range(span.end_index - 1, span.start_index - 1, -1):
try:
text_index = text_indices.index(index)
for _ in range(span.end_index - index):
if text_index + 1 >= len(text_indices):
break
if text_indices[text_index + 1] is not None:
break
text_index += 1
return text_index
except ValueError:
pass
# not present in list
raise ValueError(f"cannot find any part of {span}")
def to_indices(span: tuple[int]) -> tuple[int, int]:
word_start = span[0]
word_start_span = word_indices[word_start]
word_end = span[-1]
word_end_span = word_indices[word_end]
start_index = find_start_index(word_start_span)
end_index = find_end_index(word_end_span)
return start_index, end_index
aspect_span, opinion_span, sentiment = triplet
assert is_sequential(aspect_span), f"aspect span not sequential: {aspect_span}"
assert is_sequential(opinion_span), f"opinion span not sequential: {opinion_span}"
assert sentiment in {"POS", "NEG", "NEU"}, f"unknown sentiment: {sentiment}"
word_indices = WordSpan.to_spans(preprocessed)
aspect_start_index, aspect_end_index = to_indices(aspect_span)
aspect_term = text[aspect_start_index : aspect_end_index + 1]
opinion_start_index, opinion_end_index = to_indices(opinion_span)
opinion_term = text[opinion_start_index : opinion_end_index + 1]
nice_sentiment = {
"POS": "positive",
"NEG": "negative",
"NEU": "neutral",
}[sentiment]
return CharacterIndices(
aspect_start_index=aspect_start_index,
aspect_end_index=aspect_end_index,
aspect_term=aspect_term,
opinion_start_index=opinion_start_index,
opinion_end_index=opinion_end_index,
opinion_term=opinion_term,
sentiment=nice_sentiment,
)
def convert_sem_eval_text(
aste_file: str,
sem_eval_file: str,
debug: bool = False,
) -> pd.DataFrame:
df = get_original_text(
aste_file=aste_file,
sem_eval_file=sem_eval_file,
debug=debug,
)
df = df.explode("triples")
df = df.reset_index(drop=False)
df["text_indices"] = df.apply(
lambda row: edit(original=row.text, preprocessed=row.preprocessed_text),
axis="columns",
)
df = df.merge(
df.apply(row_to_character_indices, axis="columns"),
left_index=True,
right_index=True,
)
df = df.drop(columns=["preprocessed_text", "triples", "text_indices"])
return df
def convert_aste_text(aste_file: str) -> pd.DataFrame:
df = read_aste_file(aste_file)
df = df.explode("triples")
df = df.reset_index(drop=False)
df = df.merge(
df.apply(aste_row_to_character_indices, axis="columns"),
left_index=True,
right_index=True,
)
df = df.drop(columns=["triples"])
return df
def aste_row_to_character_indices(row: pd.Series) -> pd.Series:
try:
return pd.Series(
aste_to_character_indices(
triplet=row.triples,
text=row.text,
)
)
except:
print(f"failed to process row {row.name}")
print(row)
raise
def is_sequential(span: tuple[int]) -> bool:
return all(span[index + 1] - span[index] == 1 for index in range(len(span) - 1))
def aste_to_character_indices(
*,
triplet: tuple[tuple[int], tuple[int], str],
text: str,
) -> CharacterIndices:
def to_indices(span: tuple[int]) -> tuple[int, int]:
word_start = span[0]
word_start_span = word_indices[word_start]
word_end = span[-1]
word_end_span = word_indices[word_end]
return word_start_span.start_index, word_end_span.end_index - 1
aspect_span, opinion_span, sentiment = triplet
assert is_sequential(aspect_span), f"aspect span not sequential: {aspect_span}"
assert is_sequential(opinion_span), f"opinion span not sequential: {opinion_span}"
assert sentiment in {"POS", "NEG", "NEU"}, f"unknown sentiment: {sentiment}"
word_indices = WordSpan.to_spans(text)
aspect_start_index, aspect_end_index = to_indices(aspect_span)
aspect_term = text[aspect_start_index : aspect_end_index + 1]
opinion_start_index, opinion_end_index = to_indices(opinion_span)
opinion_term = text[opinion_start_index : opinion_end_index + 1]
nice_sentiment = {
"POS": "positive",
"NEG": "negative",
"NEU": "neutral",
}[sentiment]
return CharacterIndices(
aspect_start_index=aspect_start_index,
aspect_end_index=aspect_end_index,
aspect_term=aspect_term,
opinion_start_index=opinion_start_index,
opinion_end_index=opinion_end_index,
opinion_term=opinion_term,
sentiment=nice_sentiment,
)
label_to_sentiment = {
"POS": "positive",
"NEG": "negative",
"NEU": "neutral",
}
def to_nice_sentiment(label: str) -> str:
return label_to_sentiment[sentiment]