#!/usr/bin/python3 # -*- coding: utf-8 -*- import argparse import json from pathlib import Path import pandas as pd from project_settings import project_path def get_args(): parser = argparse.ArgumentParser() parser.add_argument( "--data_dir", default="examples/preprocess/data/tweets_with_emoji", type=str, ) parser.add_argument( "--output_file", default="data/tweets_with_emoji.jsonl", type=str, ) args = parser.parse_args() return args def print_examples(): args = get_args() data_dir = project_path / args.data_dir print("| 样本数量 | 类别 | 例句1 | 例句2 |") print("| --- | --- | --- | --- |") for filename in data_dir.glob("*.csv"): df = pd.read_csv(filename) length = len(df) category = filename.stem text_list = list() for i, row in df.iterrows(): row = dict(row) text = row["Text"] text = str(text).strip() text = text.replace("\n", "
") text = text.replace("\r", "") text_list.append(text) if i >= 2: break row = "| {} | {} | {} | {} |".format(length, category, text_list[0], text_list[1]) print(row) return def main(): args = get_args() data_dir = project_path / args.data_dir output_file = project_path / args.output_file with open(output_file, "w", encoding="utf-8") as fout: for filename in data_dir.glob("*.csv"): df = pd.read_csv(filename) for i, row in df.iterrows(): row = dict(row) text = row["Text"] row_ = { "text": text, "category": filename.stem } row_ = json.dumps(row_, ensure_ascii=False) row_ = "{}\n".format(row_) fout.write(row_) return if __name__ == '__main__': print_examples()