|
import os |
|
import json |
|
import tiktoken |
|
|
|
encoding = tiktoken.get_encoding("cl100k_base") |
|
|
|
merged_md_file_path = "hermes-toth.txt" |
|
merged_jsonl_file_path = "hermes-toth.json" |
|
|
|
excluded = (merged_md_file_path, "README.md") |
|
|
|
|
|
def merge_to_md(): |
|
""" |
|
Merges all .md files in the current directory into a single md file. |
|
""" |
|
with open(merged_md_file_path, "w") as merged_file: |
|
first_file = True |
|
for root, _, files in os.walk("."): |
|
for file in files: |
|
if file.endswith(".md") and file not in excluded: |
|
print(f"Merging file: {file} into {merged_md_file_path}") |
|
file_path = os.path.join(root, file) |
|
with open(file_path, "r", encoding="utf-8") as f: |
|
contents = f.read() |
|
if not first_file: |
|
merged_file.write("\n\n\n") |
|
merged_file.write(contents) |
|
first_file = False |
|
|
|
|
|
def merge_to_jsonl(): |
|
""" |
|
Merges all .md files in the current directory into a single jsonl file. |
|
""" |
|
with open(merged_jsonl_file_path, "w") as merged_file: |
|
first_file = True |
|
for root, _, files in os.walk("."): |
|
for file in files: |
|
if file.endswith(".md") and file not in excluded: |
|
print(f"Merging file: {file} into {merged_jsonl_file_path}") |
|
file_path = os.path.join(root, file) |
|
with open(file_path, "r", encoding="utf-8") as f: |
|
contents = f.read() |
|
if not first_file: |
|
merged_file.write("\n") |
|
data = {"text": contents} |
|
json.dump(data, merged_file, ensure_ascii=False) |
|
first_file = False |
|
|
|
|
|
def token_count(): |
|
""" |
|
Counts the number of tokens in the merged dataset. |
|
""" |
|
with open(merged_md_file_path, "r", encoding="utf-8") as merged_file: |
|
tokenized = encoding.encode(merged_file.read()) |
|
print(f"Merged dataset has: {len(tokenized)} tokens.") |
|
|
|
|
|
if __name__ == "__main__": |
|
merge_to_md() |
|
merge_to_jsonl() |
|
token_count() |
|
|