File size: 5,844 Bytes
98d6dab
 
5423466
 
 
 
 
 
 
 
 
 
 
 
 
d2b35c3
5423466
 
 
 
98d6dab
5423466
 
cdde8a4
 
d2b35c3
5423466
 
 
 
 
 
 
 
57d0d96
 
 
 
5423466
 
 
 
 
 
 
 
0ac1ff1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57d0d96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0ac1ff1
5423466
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df2cb98
5423466
 
223f2e0
 
961e4f2
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
---
license: mit
language:
- en
tags:
- not-for-all-audiences
- chemistry
- biology
- finance
- legal
- music
- art
- code
- climate
- medical
pretty_name: Easy Reddit
size_categories:
- 100M<n<1B
task_categories:
- question-answering
---


![image/png](https://cdn-uploads.huggingface.co/production/uploads/62a3bb1cd0d8c2c2169f0b88/N_RqZSJ32MDIrRGbLcPqm.png)

# 🙋🏻‍♂️Welcome to 🧑🏻‍🚀Tonic's🚀🚰Easy🔴Reddit🔥!

This is every "best reddit_question_best_answers" appended and produced according to the following template :

```json
{"prompt": "This is the first prompt", "completion": "This is the first completion"}
{"prompt": "This is the second prompt", "completion": "This is the second completion"}
```


- 🌟 This dataset is internally consistent


🤔The point is to make it easy to train models with a single correctly formatted dataset of

- 54,367,153 rows

# Original Dataset :

[nreimers/reddit_question_best_answers](https://huggingface.co/datasets/nreimers/reddit_question_best_answers)

# How To Use : 

Combine random shards in random quantities to produce a very high quality conversational training dataset for fine tuning by running the following code:

```python

import random

# Define the shards
shards = [f"shard_{i}" for i in range(1, 34)]

# Function to combine random shards
def combine_shards():
    # Select a random shard
    selected_shard = random.choice(shards)

    # Select a random quantity (1-5)
    quantity = random.randint(1, 5)

    return selected_shard, quantity

# Example usage
for _ in range(10):  # Combine 10 times as an example
    shard, qty = combine_shards()
    print(f"Combined {qty} of {shard}")

```

Or try combining rows line by line to save memory :

```python

# see selectbyline.py

import os
import random

# Directory containing the shard JSONL files
shard_directory = "/path/to/shard/directory"

# Get a list of all JSONL files in the directory
shard_files = [f for f in os.listdir(shard_directory) if f.endswith('.jsonl')]

# Function to read a random number of lines (between min_lines and max_lines) from a file
def read_random_lines(filename, min_lines, max_lines):
    selected_lines = []
    num_lines = random.randint(min_lines, max_lines)

    with open(filename, 'r') as file:
        lines = list(file)
        if len(lines) <= num_lines:
            return lines
        selected_lines = random.sample(lines, num_lines)

    return selected_lines

# Function to combine shards
def combine_shards(output_filename, num_combinations):
    with open(output_filename, 'w') as output_file:
        for _ in range(num_combinations):
            selected_shard_file = random.choice(shard_files)
            lines = read_random_lines(os.path.join(shard_directory, selected_shard_file), 5000, 10000)
            output_file.writelines(lines)

# Example usage
combine_shards("/path/to/output/combined_shards.jsonl", 10)

```

# Pre-Processing

```python
import json
import os
import gzip
import logging
import re
import random

# Setup basic logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")

def clean_string(s):
    """Remove special characters, keeping only alphanumeric characters and spaces."""
    if isinstance(s, list):
        # Extract text from each dictionary in the list and join into a single string
        s = " ".join([d.get("body", "") if isinstance(d, dict) else str(d) for d in s])
    return re.sub(r'[^A-Za-z0-9 ]+', '', s)

def process_file(input_file, output_file):
    try:
        dataset = []
        with gzip.open(input_file, 'rt') as infile:
            for line in infile:
                # Parse the JSON line
                try:
                    data = json.loads(line)
                except json.JSONDecodeError:
                    logging.error(f"Invalid JSON format in {input_file}: {line}")
                    continue

                # Extract and clean the 'body' and 'answers' fields
                prompt = clean_string(data.get("body", ""))
                completion = clean_string(data.get("answers", ""))

                # For each body found, make a new row and duplicate the prompt for it
                if isinstance(data.get("body", ""), list):
                    for body in data.get("body", []):
                        cleaned_body = clean_string(body)
                        dataset.append({"prompt": cleaned_body, "completion": completion})
                else:
                    dataset.append({"prompt": prompt, "completion": completion})

        # Shuffle the dataset
        random.shuffle(dataset)

        # Write the shuffled dataset to the output file
        with open(output_file, 'a') as outfile:
            for item in dataset:
                json.dump(item, outfile)
                outfile.write('\n')

        logging.info(f"Processed file: {input_file}")

    except Exception as e:
        logging.error(f"Error processing file {input_file}: {e}")

def process_files(file_list, output_dir):
    # Ensure the output directory exists
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Create a single output file path
    output_file = os.path.join(output_dir, 'synthesized_dataset.jsonl')

    for input_file in file_list:
        process_file(input_file, output_file)

        # Update with your list of .gz file paths
file_list = [r'C:\Users\MeMyself\FILES, r"C:\Users\MeMyself\FILES" ]  # Update with your list of .gz file paths
output_dir = r'C:\Users\MeMyself\reddit_question_best_answers\processed'
process_files(file_list, output_dir)
```

#### **sharding script** : 
- [here](https://huggingface.co/datasets/Tonic/WellReddit/blob/main/shard.py)



🌟Probably there's a **big problem with the token count** on these long answers 😉

🌟**good luck** !🧑🏻‍🚀🚀