|
--- |
|
license: mit |
|
language: |
|
- en |
|
tags: |
|
- not-for-all-audiences |
|
- chemistry |
|
- biology |
|
- finance |
|
- legal |
|
- music |
|
- art |
|
- code |
|
- climate |
|
- medical |
|
pretty_name: Easy Reddit |
|
size_categories: |
|
- 100M<n<1B |
|
task_categories: |
|
- question-answering |
|
--- |
|
|
|
|
|
![image/png](https://cdn-uploads.huggingface.co/production/uploads/62a3bb1cd0d8c2c2169f0b88/N_RqZSJ32MDIrRGbLcPqm.png) |
|
|
|
# 🙋🏻♂️Welcome to 🧑🏻🚀Tonic's🚀🚰Easy🔴Reddit🔥! |
|
|
|
This is every "best reddit_question_best_answers" appended and produced according to the following template : |
|
|
|
```json |
|
{"prompt": "This is the first prompt", "completion": "This is the first completion"} |
|
{"prompt": "This is the second prompt", "completion": "This is the second completion"} |
|
``` |
|
|
|
|
|
- 🌟 This dataset is internally consistent |
|
|
|
|
|
🤔The point is to make it easy to train models with a single correctly formatted dataset of |
|
|
|
- 54,367,153 rows |
|
|
|
# Original Dataset : |
|
|
|
[nreimers/reddit_question_best_answers](https://huggingface.co/datasets/nreimers/reddit_question_best_answers) |
|
|
|
# How To Use : |
|
|
|
Combine random shards in random quantities to produce a very high quality conversational training dataset for fine tuning by running the following code: |
|
|
|
```python |
|
|
|
import random |
|
|
|
# Define the shards |
|
shards = [f"shard_{i}" for i in range(1, 34)] |
|
|
|
# Function to combine random shards |
|
def combine_shards(): |
|
# Select a random shard |
|
selected_shard = random.choice(shards) |
|
|
|
# Select a random quantity (1-5) |
|
quantity = random.randint(1, 5) |
|
|
|
return selected_shard, quantity |
|
|
|
# Example usage |
|
for _ in range(10): # Combine 10 times as an example |
|
shard, qty = combine_shards() |
|
print(f"Combined {qty} of {shard}") |
|
|
|
``` |
|
|
|
Or try combining rows line by line to save memory : |
|
|
|
```python |
|
|
|
# see selectbyline.py |
|
|
|
import os |
|
import random |
|
|
|
# Directory containing the shard JSONL files |
|
shard_directory = "/path/to/shard/directory" |
|
|
|
# Get a list of all JSONL files in the directory |
|
shard_files = [f for f in os.listdir(shard_directory) if f.endswith('.jsonl')] |
|
|
|
# Function to read a random number of lines (between min_lines and max_lines) from a file |
|
def read_random_lines(filename, min_lines, max_lines): |
|
selected_lines = [] |
|
num_lines = random.randint(min_lines, max_lines) |
|
|
|
with open(filename, 'r') as file: |
|
lines = list(file) |
|
if len(lines) <= num_lines: |
|
return lines |
|
selected_lines = random.sample(lines, num_lines) |
|
|
|
return selected_lines |
|
|
|
# Function to combine shards |
|
def combine_shards(output_filename, num_combinations): |
|
with open(output_filename, 'w') as output_file: |
|
for _ in range(num_combinations): |
|
selected_shard_file = random.choice(shard_files) |
|
lines = read_random_lines(os.path.join(shard_directory, selected_shard_file), 5000, 10000) |
|
output_file.writelines(lines) |
|
|
|
# Example usage |
|
combine_shards("/path/to/output/combined_shards.jsonl", 10) |
|
|
|
``` |
|
|
|
# Pre-Processing |
|
|
|
```python |
|
import json |
|
import os |
|
import gzip |
|
import logging |
|
import re |
|
import random |
|
|
|
# Setup basic logging |
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") |
|
|
|
def clean_string(s): |
|
"""Remove special characters, keeping only alphanumeric characters and spaces.""" |
|
if isinstance(s, list): |
|
# Extract text from each dictionary in the list and join into a single string |
|
s = " ".join([d.get("body", "") if isinstance(d, dict) else str(d) for d in s]) |
|
return re.sub(r'[^A-Za-z0-9 ]+', '', s) |
|
|
|
def process_file(input_file, output_file): |
|
try: |
|
dataset = [] |
|
with gzip.open(input_file, 'rt') as infile: |
|
for line in infile: |
|
# Parse the JSON line |
|
try: |
|
data = json.loads(line) |
|
except json.JSONDecodeError: |
|
logging.error(f"Invalid JSON format in {input_file}: {line}") |
|
continue |
|
|
|
# Extract and clean the 'body' and 'answers' fields |
|
prompt = clean_string(data.get("body", "")) |
|
completion = clean_string(data.get("answers", "")) |
|
|
|
# For each body found, make a new row and duplicate the prompt for it |
|
if isinstance(data.get("body", ""), list): |
|
for body in data.get("body", []): |
|
cleaned_body = clean_string(body) |
|
dataset.append({"prompt": cleaned_body, "completion": completion}) |
|
else: |
|
dataset.append({"prompt": prompt, "completion": completion}) |
|
|
|
# Shuffle the dataset |
|
random.shuffle(dataset) |
|
|
|
# Write the shuffled dataset to the output file |
|
with open(output_file, 'a') as outfile: |
|
for item in dataset: |
|
json.dump(item, outfile) |
|
outfile.write('\n') |
|
|
|
logging.info(f"Processed file: {input_file}") |
|
|
|
except Exception as e: |
|
logging.error(f"Error processing file {input_file}: {e}") |
|
|
|
def process_files(file_list, output_dir): |
|
# Ensure the output directory exists |
|
if not os.path.exists(output_dir): |
|
os.makedirs(output_dir) |
|
|
|
# Create a single output file path |
|
output_file = os.path.join(output_dir, 'synthesized_dataset.jsonl') |
|
|
|
for input_file in file_list: |
|
process_file(input_file, output_file) |
|
|
|
# Update with your list of .gz file paths |
|
file_list = [r'C:\Users\MeMyself\FILES, r"C:\Users\MeMyself\FILES" ] # Update with your list of .gz file paths |
|
output_dir = r'C:\Users\MeMyself\reddit_question_best_answers\processed' |
|
process_files(file_list, output_dir) |
|
``` |
|
|
|
#### **sharding script** : |
|
- [here](https://huggingface.co/datasets/Tonic/WellReddit/blob/main/shard.py) |
|
|
|
|
|
|
|
🌟Probably there's a **big problem with the token count** on these long answers 😉 |
|
|
|
🌟**good luck** !🧑🏻🚀🚀 |