|
""" |
|
Removes near-duplicates and tweets from top pct. of users. |
|
|
|
original -> https://github.com/cardiffnlp/timelms/blob/main/scripts/preprocess.py |
|
|
|
optional arguments: |
|
-h, --help show this help message and exit |
|
--src SRC Path to set of input tweets (.jl). |
|
--out OUT Path to output from preprocessing (.jl). |
|
--blacklist_pct BLACKLIST_PCT |
|
Percent of most frequent users to ignore. |
|
Example: |
|
python timelm_preprocessor.py --src /mnt/share/daniel_tweet_dump/2018.raw.jl --out dataset/tweets/2018.jsonline |
|
python timelm_preprocessor.py --src /mnt/share/daniel_tweet_dump/2019.raw.jl --out dataset/tweets/2019.jsonline |
|
python timelm_preprocessor.py --src /mnt/share/daniel_tweet_dump/2020.raw.jl --out dataset/tweets/2020.jsonline |
|
python timelm_preprocessor.py --src /mnt/share/daniel_tweet_dump/2021.raw.jl --out dataset/tweets/2021.jsonline |
|
python timelm_preprocessor.py --src /mnt/share/daniel_tweet_dump/2022.raw.jl --out dataset/tweets/2022.jsonline |
|
""" |
|
|
|
import argparse |
|
import json |
|
import logging |
|
import os |
|
import string |
|
import re |
|
from collections import Counter |
|
|
|
from datasketch import MinHash, LeanMinHash |
|
import xxhash |
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S') |
|
re_url = re.compile(r'https?:\/\/[\w\.\/\?\=\d&#%_:/-]+') |
|
re_user = re.compile(r'@\w+') |
|
with open('verified_users.v091122.txt') as f: |
|
verified_users = set([f"@{i}" for i in f.read().split('\n') if len(i)]) |
|
|
|
|
|
def clean_text(text): |
|
text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') |
|
text = re_url.sub('{URL}', text) |
|
users = re_user.findall(text) |
|
for user in users: |
|
if user not in verified_users: |
|
text = text.replace(user, '@user') |
|
return text |
|
|
|
|
|
def hash_tweet(target_tweet, num_perm=16): |
|
def normalize_text(text): |
|
text = text.translate(str.maketrans('', '', string.punctuation)) |
|
text = text.lower() |
|
return text |
|
|
|
def minhash(seq): |
|
|
|
m = MinHash(num_perm=num_perm, hashfunc=xxhash.xxh64_intdigest) |
|
for s in seq: |
|
m.update(s.encode('utf8')) |
|
return LeanMinHash(m) |
|
|
|
tokens = normalize_text(target_tweet['text']).split() |
|
return minhash(tokens) |
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
parser = argparse.ArgumentParser(description='Removes near-duplicates and tweets from top pct. of users.') |
|
parser.add_argument('--src', type=str, required=True, help='Path to set of input tweets (.jl).') |
|
parser.add_argument('--out', type=str, required=True, help='Path to output from preprocessing (.jl).') |
|
parser.add_argument('--blacklist_pct', type=float, required=False, default=0.01, |
|
help='Percent of most frequent users to ignore.') |
|
args = parser.parse_args() |
|
os.makedirs(os.path.dirname(args.out), exist_ok=True) |
|
|
|
logging.info('1st pass - Collecting username counts ...') |
|
n_input_tweets = 0 |
|
user_counter = Counter() |
|
with open(args.src) as in_tweets_f: |
|
for idx, jl_str in enumerate(in_tweets_f): |
|
if idx % 1e6 == 0: |
|
logging.info('1st pass - at idx %d' % idx) |
|
tweet = json.loads(jl_str) |
|
user_counter[tweet['username']] += 1 |
|
n_input_tweets += 1 |
|
|
|
logging.info('1st pass - Completed, found %d tweets' % n_input_tweets) |
|
logging.info('1st pass - Found %d users' % len(user_counter.keys())) |
|
|
|
top_users = [user for user, _ in user_counter.most_common()] |
|
n_blacklisted_users = int(len(top_users) * args.blacklist_pct) |
|
blacklisted_users = set(top_users[:n_blacklisted_users]) |
|
n_users = len(user_counter.keys()) |
|
pct_blacklisted_users = round((n_blacklisted_users / n_users) * 100, 2) |
|
n_blacklisted_tweets = sum([user_counter[u] for u in blacklisted_users]) |
|
pct_blacklisted_tweets = round((n_blacklisted_tweets / sum(user_counter.values())) * 100, 2) |
|
logging.info( |
|
f"1st pass - Blacklisted {len(blacklisted_users)} users ({pct_blacklisted_users}%), " |
|
f"ignoring {n_blacklisted_tweets} tweets ({pct_blacklisted_tweets}%)" |
|
) |
|
|
|
logging.info('2nd pass - Hashing and writing valid tweets ...') |
|
written_hashes = set() |
|
n_written = 0 |
|
n_ignored_by_user = 0 |
|
n_ignored_by_hash = 0 |
|
with open(args.src) as in_tweets_f: |
|
with open(args.out, 'w') as out_tweets_f: |
|
for idx, jl_str in enumerate(in_tweets_f): |
|
if idx % 1e5 == 0: |
|
logging.info('2nd pass - at idx %d' % idx) |
|
tweet = json.loads(jl_str) |
|
tweet['text'] = clean_text(tweet['text']) |
|
tweet_hash = hash_tweet(tweet) |
|
if tweet['username'] in blacklisted_users: |
|
n_ignored_by_user += 1 |
|
elif tweet_hash in written_hashes: |
|
n_ignored_by_hash += 1 |
|
else: |
|
out_tweets_f.write(json.dumps(tweet) + '\n') |
|
n_written += 1 |
|
written_hashes.add(tweet_hash) |
|
logging.info(f"2nd pass - Completed, wrote {n_written} tweets.") |
|
if n_ignored_by_user > 0: |
|
logging.info(f"\tignored {n_ignored_by_user} by user blacklist") |
|
if n_ignored_by_hash > 0: |
|
logging.info(f"\tignored {n_ignored_by_hash} by hash collision") |
|
logging.info("Done") |
|
|