| import glob, pandas, sys, os, gzip, json, time, lzma |
|
|
| try: fast_mode = (sys.argv[2] == "fast") |
| except: fast_mode = False |
|
|
| offset = 10 |
| index = int(sys.argv[1]) * offset |
|
|
| dumps = glob.glob("CC-MAIN-*") |
| for dump in dumps: |
| all_buckets = glob.glob(f"{dump}/CC-MAIN-*") |
| buckets = all_buckets[index : index + offset] |
| print("\n\n- - - - -\n", dump, "has", len(all_buckets), flush=True) |
| print(fast_mode, index, offset, buckets, flush=True) |
| for bucket in buckets: |
| files = glob.glob(f"{bucket}/*.parquet") |
| bucket = bucket.replace(f"{dump}/", f"{dump}_") |
| bucket = bucket.replace("CC-MAIN-", "") |
| output_file = f"jsonl/{bucket}.jsonl.lzma" |
| print(output_file, flush=True) |
|
|
| added_file_ids = set() |
| if os.path.exists(output_file): |
| ti_mcur = os.path.getmtime(output_file) |
| ti_mmax = 0 |
| remains = [] |
| for file in files: |
| ti_m = os.path.getmtime(file) |
| if ti_m > ti_mmax: ti_mmax = ti_m |
| if fast_mode and ti_m > ti_mcur: |
| remains.append(file) |
|
|
| if ti_mcur > ti_mmax: continue |
|
|
| if fast_mode: |
| files = remains |
| else: |
| skip_token = '"id": "000000000."' |
| with lzma.open(output_file, "rt") as fin: |
| for count, line in enumerate(fin): |
| if skip_token not in line: |
| file_id = line.split('"id": "')[1].split(".")[0] |
| skip_token = f'"id": "{file_id}.' |
| assert file_id not in added_file_ids |
| added_file_ids.add(file_id) |
|
|
| with lzma.open(output_file, "at") as fout: |
| for file in files: |
| file_id = file.split("/")[-1].replace(".parquet", "") |
| if file_id in added_file_ids: continue |
| print(f"Adding {file_id} to {output_file}...", flush=True) |
| df = pandas.read_parquet(file) |
| for line_count, row in df.iterrows(): |
| idd = f"{file_id}.{line_count}" |
| ss = json.dumps({"text": row[0], "id": idd}, ensure_ascii=False) |
| fout.write(ss + "\n") |
|
|
| print("fast_mode", fast_mode) |
|
|