File size: 1,654 Bytes
77425a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import re
import emoji

def normalize(sentence: str) -> str:
    """
    This function should be used before tokenizing the input string.

    Normalizes the input string in the following ways:
    -> Converts from ş to ș, ţ to ț, etc.
    -> Converts @mention to USER, #hashtag to HASHTAG, http... and www... to HTTPURL
    -> Converts emoticons to :emoji_with_long_name:
    -> Replaces :emoji_with_long_name: with emoji_with_long_name and replaces _, : and - with empty string
    -> Removes multiple whitespaces with a single whitespace
    Args:
        `sentence` (str): The input string.
    Returns:
        str: The normalized string.
    """

    # Make sure it's a string
    sentence = str(sentence)

    # Convert from ş to ș, ţ to ț, etc.
    sentence = re.sub(r'ş', 'ș', sentence)
    sentence = re.sub(r'Ş', 'Ș', sentence)
    sentence = re.sub(r'ţ', 'ț', sentence)
    sentence = re.sub(r'Ţ', 'Ț', sentence)

    # Convert @mentions to USER, #hashtags to HASHTAG, http... and www... to HTTPURL
    sentence = re.sub(r'@\S+', 'USER', sentence)
    sentence = re.sub(r'#\S+', 'HASHTAG', sentence)
    sentence = re.sub(r'http\S+', 'HTTPURL', sentence)
    sentence = re.sub(r'www\S+', 'HTTPURL', sentence)

    # Convert emoticons to :emoji_with_long_name:
    sentence = emoji.demojize(sentence, delimiters=(' :', ': '))

    # Replace :emoji_with_long_name: with emojiwithlongname
    sentence = re.sub(r':\S+:', lambda x: x.group(0).replace('_', '').replace(':', '').replace('-', ''), sentence)

    # Remove multiple whitespaces with a single whitespace
    sentence = re.sub(r'\s+', ' ', sentence)

    return sentence