File size: 5,944 Bytes
59da9af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cfa80ae
 
59da9af
 
22c9e6d
59da9af
 
 
 
 
 
 
 
 
 
cfa80ae
 
59da9af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cfa80ae
 
 
 
 
 
 
 
 
 
59da9af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cfa80ae
59da9af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
from dataclasses import asdict
from typing import Optional

import Levenshtein
import pandas as pd

from .types import CharacterIndices, Triplet, WordSpans


def find_closest_text(
    *,
    original: pd.Series,
    replacement: pd.Series,
) -> pd.Series:
    # Returns a series of the replacement values aligned to the original values
    no_space_replacements = {text.replace(" ", ""): text for text in replacement}
    original_text = original.str.replace(" ", "")
    result = original_text.map(no_space_replacements)
    non_perfect_matches = result.isna().sum()

    assert non_perfect_matches / len(original) <= 0.20, (
        "Poor alignment with replacement text. "
        f"{non_perfect_matches:,} of {len(original),} rows did not match well"
    )

    def closest(text: str) -> str:
        distances = replacement.apply(
            lambda comparison: Levenshtein.distance(text, comparison)
        )
        return replacement.iloc[distances.argmin()]

    result.loc[result.isna()] = original_text[result.isna()].apply(closest)
    result = result.str.strip()
    return result


def to_character_indices_series(row: pd.Series) -> pd.Series:
    result = to_character_indices(triplet=row.triples, text=row.text)
    return pd.Series(asdict(result))


def to_character_indices(
    *,
    triplet: Triplet,
    text: str,
) -> CharacterIndices:
    aspect_span, opinion_span, _ = triplet
    assert _is_sequential(aspect_span), f"aspect span not sequential: {aspect_span}"
    assert _is_sequential(opinion_span), f"opinion span not sequential: {opinion_span}"

    spans = WordSpans.make(text)

    aspect_start_index, aspect_end_index = spans.to_indices(aspect_span)
    aspect_term = text[aspect_start_index : aspect_end_index + 1]
    opinion_start_index, opinion_end_index = spans.to_indices(opinion_span)
    opinion_term = text[opinion_start_index : opinion_end_index + 1]

    return CharacterIndices(
        aspect_start_index=aspect_start_index,
        aspect_end_index=aspect_end_index,
        aspect_term=aspect_term,
        opinion_start_index=opinion_start_index,
        opinion_end_index=opinion_end_index,
        opinion_term=opinion_term,
    )


def to_aligned_character_indices_series(row: pd.Series) -> pd.Series:
    indices = to_character_indices(triplet=row.triples, text=row.original)
    result = to_aligned_character_indices(
        original=row.original,
        replacement=row.text,
        original_indices=indices,
    )
    return pd.Series(asdict(result))


def to_aligned_character_indices(
    *,
    original: str,
    replacement: str,
    original_indices: CharacterIndices,
) -> CharacterIndices:
    indices = _aligned_character_indices(original=original, replacement=replacement)

    aspect_start_index = _aligned_start_index(
        text=replacement,
        original_index=original_indices.aspect_start_index,
        indices=indices,
    )
    aspect_end_index = _aligned_end_index(
        text=replacement,
        original_index=original_indices.aspect_end_index,
        indices=indices,
    )
    aspect_term = replacement[aspect_start_index : aspect_end_index + 1]

    opinion_start_index = _aligned_start_index(
        text=replacement,
        original_index=original_indices.opinion_start_index,
        indices=indices,
    )
    opinion_end_index = _aligned_end_index(
        text=replacement,
        original_index=original_indices.opinion_end_index,
        indices=indices,
    )
    opinion_term = replacement[opinion_start_index : opinion_end_index + 1]

    return CharacterIndices(
        aspect_start_index=aspect_start_index,
        aspect_end_index=aspect_end_index,
        aspect_term=aspect_term,
        opinion_start_index=opinion_start_index,
        opinion_end_index=opinion_end_index,
        opinion_term=opinion_term,
    )


def _is_sequential(span: tuple[int, ...]) -> bool:
    return all(span[index + 1] - span[index] == 1 for index in range(len(span) - 1))


def _aligned_character_indices(original: str, replacement: str) -> list[Optional[int]]:
    indices: list[Optional[int]] = list(range(len(original)))
    for operation, _source_position, destination_position in Levenshtein.editops(
        original, replacement
    ):
        if operation == "replace":
            indices[destination_position] = None
        elif operation == "insert":
            indices.insert(destination_position, None)
        elif operation == "delete":
            del indices[destination_position]
    return indices


def _aligned_start_index(
    text: str, original_index: int, indices: list[Optional[int]]
) -> int:
    closest_after = min(
        index for index in indices if index is not None and index >= original_index
    )
    index = indices.index(closest_after)

    # Not every character in the original text is aligned to a character in the
    # replacement text. The replacement text may have deleted it, or replaced
    # it. Can step back through each letter until the word boundary is found or
    # an aligned character is found.
    while index > 0:
        if indices[index - 1] is not None:
            break
        if text[index - 1] == " ":
            break
        index -= 1
    return index


def _aligned_end_index(
    text: str, original_index: int, indices: list[Optional[int]]
) -> int:
    closest_before = max(
        index for index in indices if index is not None and index <= original_index
    )
    index = indices.index(closest_before)

    # Not every character in the original text is aligned to a character in the
    # replacement text. The replacement text may have deleted it, or replaced
    # it. Can step back through each letter until the word boundary is found or
    # an aligned character is found.
    while index < len(indices) - 1:
        if indices[index + 1] is not None:
            break
        if text[index + 1] == " ":
            break
        index += 1
    return index