File size: 7,166 Bytes
98d6dab
 
5423466
 
 
 
 
 
 
 
 
 
 
 
 
d2b35c3
5423466
 
 
 
8bf97ac
11ccc76
8bf97ac
 
 
98d6dab
5423466
 
d2b35c3
5423466
95e43b7
5423466
 
 
 
 
 
 
95e43b7
 
57d0d96
bbde141
 
 
 
57d0d96
 
5423466
 
 
 
 
 
 
 
0ac1ff1
 
bbde141
57d0d96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0ac1ff1
5423466
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df2cb98
5423466
 
223f2e0
 
961e4f2
 
361c2c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
961e4f2
361c2c7
961e4f2
 
 
361c2c7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
---
license: mit
language:
- en
tags:
- not-for-all-audiences
- chemistry
- biology
- finance
- legal
- music
- art
- code
- climate
- medical
pretty_name: Easy Reddit
size_categories:
- 100M<n<1B
task_categories:
- question-answering
configs:
- config_name: shards
  data_files:
  - split: train
    path: "data/*.jsonl"
---


# 🙋🏻‍♂️Welcome to 🧑🏻‍🚀Tonic's🚀🚰Easy🔴Reddit🔥!


This is every "best reddit_question_best_answers" appended and produced according to the following template :

```json
{"prompt": "This is the first prompt", "completion": "This is the first completion"}
{"prompt": "This is the second prompt", "completion": "This is the second completion"}
```

![image/png](https://cdn-uploads.huggingface.co/production/uploads/62a3bb1cd0d8c2c2169f0b88/N_RqZSJ32MDIrRGbLcPqm.png)


- 🌟 You can use it in shards or all together !

- 🌟 This dataset is **internally consistent** !



🤔The point is to make it easy to train models with a single correctly formatted dataset of

- 54,367,153 rows

# Original Dataset :

[nreimers/reddit_question_best_answers](https://huggingface.co/datasets/nreimers/reddit_question_best_answers)

# How To Use : 

Combine random shards in random quantities to produce a very high quality conversational training dataset for fine tuning or try combining rows line by line to save memory by running the following code: 

```python

# see selectbyline.py

import os
import random

# Directory containing the shard JSONL files
shard_directory = "/path/to/shard/directory"

# Get a list of all JSONL files in the directory
shard_files = [f for f in os.listdir(shard_directory) if f.endswith('.jsonl')]

# Function to read a random number of lines (between min_lines and max_lines) from a file
def read_random_lines(filename, min_lines, max_lines):
    selected_lines = []
    num_lines = random.randint(min_lines, max_lines)

    with open(filename, 'r') as file:
        lines = list(file)
        if len(lines) <= num_lines:
            return lines
        selected_lines = random.sample(lines, num_lines)

    return selected_lines

# Function to combine shards
def combine_shards(output_filename, num_combinations):
    with open(output_filename, 'w') as output_file:
        for _ in range(num_combinations):
            selected_shard_file = random.choice(shard_files)
            lines = read_random_lines(os.path.join(shard_directory, selected_shard_file), 5000, 10000)
            output_file.writelines(lines)

# Example usage
combine_shards("/path/to/output/combined_shards.jsonl", 10)

```

# Pre-Processing

```python
import json
import os
import gzip
import logging
import re
import random

# Setup basic logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")

def clean_string(s):
    """Remove special characters, keeping only alphanumeric characters and spaces."""
    if isinstance(s, list):
        # Extract text from each dictionary in the list and join into a single string
        s = " ".join([d.get("body", "") if isinstance(d, dict) else str(d) for d in s])
    return re.sub(r'[^A-Za-z0-9 ]+', '', s)

def process_file(input_file, output_file):
    try:
        dataset = []
        with gzip.open(input_file, 'rt') as infile:
            for line in infile:
                # Parse the JSON line
                try:
                    data = json.loads(line)
                except json.JSONDecodeError:
                    logging.error(f"Invalid JSON format in {input_file}: {line}")
                    continue

                # Extract and clean the 'body' and 'answers' fields
                prompt = clean_string(data.get("body", ""))
                completion = clean_string(data.get("answers", ""))

                # For each body found, make a new row and duplicate the prompt for it
                if isinstance(data.get("body", ""), list):
                    for body in data.get("body", []):
                        cleaned_body = clean_string(body)
                        dataset.append({"prompt": cleaned_body, "completion": completion})
                else:
                    dataset.append({"prompt": prompt, "completion": completion})

        # Shuffle the dataset
        random.shuffle(dataset)

        # Write the shuffled dataset to the output file
        with open(output_file, 'a') as outfile:
            for item in dataset:
                json.dump(item, outfile)
                outfile.write('\n')

        logging.info(f"Processed file: {input_file}")

    except Exception as e:
        logging.error(f"Error processing file {input_file}: {e}")

def process_files(file_list, output_dir):
    # Ensure the output directory exists
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Create a single output file path
    output_file = os.path.join(output_dir, 'synthesized_dataset.jsonl')

    for input_file in file_list:
        process_file(input_file, output_file)

        # Update with your list of .gz file paths
file_list = [r'C:\Users\MeMyself\FILES, r"C:\Users\MeMyself\FILES" ]  # Update with your list of .gz file paths
output_dir = r'C:\Users\MeMyself\reddit_question_best_answers\processed'
process_files(file_list, output_dir)
```

#### **sharding script** : 

```python

import json
import os

def read_dataset(file_path):
    try:
        with open(file_path, 'r') as file:
            data = [json.loads(line) for line in file]
        print(f"Dataset loaded successfully from {file_path}.")
        return data
    except Exception as e:
        print(f"Error reading dataset from {file_path}: {e}")
        return []

def shard_dataset(dataset, num_shards):
    shard_size = len(dataset) // num_shards
    shards = [dataset[i:i + shard_size] for i in range(0, len(dataset), shard_size)]
    if len(shards) > num_shards:
        shards[num_shards - 1].extend(shards.pop())
    print(f"Dataset sharded into {num_shards} parts.")
    return shards

def write_shards(shards, output_dir):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
        print(f"Created output directory at {output_dir}.")

    for i, shard in enumerate(shards):
        shard_file = os.path.join(output_dir, f'shard_{i+1}.jsonl')
        with open(shard_file, 'w') as file:
            for item in shard:
                json.dump(item, file)
                file.write('\n')
        print(f"Shard {i+1} written to {shard_file}.")

def main():
    input_file = 'path_to_processed_dataset.jsonl'  # Update with your processed dataset file path
    output_dir = 'sharded_dataset'  # Update with your output directory for shards
    num_shards = 33

    dataset = read_dataset(input_file)
    if dataset:
        shards = shard_dataset(dataset, num_shards)
        write_shards(shards, output_dir)
        print("All shards have been successfully written.")
    else:
        print("No dataset to process.")

if __name__ == "__main__":
    main()

```

Disclaimer :

🌟Re-format this dataset before use.

🌟Probably there's a **big problem with the token count** on these long answers 😉

🌟**Good Luck !** 🧑🏻‍🚀🚀