chitravyanjan
commited on
Commit
·
630930b
1
Parent(s):
3a8f40d
reddit code
Browse files- README.md +2 -3
- nirmaan/minireddit/minireddit-creation.ipynb +172 -0
- nirmaan/minireddit/minireddit.ipynb +174 -0
- nirmaan/preddit/preddit.ipynb +242 -0
- nirmaan/subreddit.ipynb +291 -0
- reddit/LICENSE +21 -0
- reddit/README.md +58 -0
- reddit/__init__.py +2 -0
- reddit/core.py +162 -0
- reddit/reddit_info.py +211 -0
- reddit/reddit_processor.py +163 -0
- reddit/redditd.py +142 -0
- reddit/subreddits.py +194 -0
- reddit/utils.py +92 -0
- reddit_urls_10k.txt +0 -0
- reddit_urls_50k.txt +0 -0
- requirements.txt +1 -0
- run_reddit.old.py +146 -0
README.md
CHANGED
@@ -1,3 +1,2 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
---
|
|
|
1 |
+
# reddew
|
2 |
+
Reddit Download and Datasets
|
|
nirmaan/minireddit/minireddit-creation.ipynb
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"### Code to create Mini-Reddit"
|
8 |
+
]
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"cell_type": "code",
|
12 |
+
"execution_count": 1,
|
13 |
+
"metadata": {},
|
14 |
+
"outputs": [],
|
15 |
+
"source": [
|
16 |
+
"from pathlib import Path\n",
|
17 |
+
"from garage.data import image_listing\n",
|
18 |
+
"from PIL import Image\n",
|
19 |
+
"from random import shuffle\n",
|
20 |
+
"import shutil\n",
|
21 |
+
"from tqdm import tqdm\n",
|
22 |
+
"import re\n",
|
23 |
+
"\n",
|
24 |
+
"import numpy as np\n",
|
25 |
+
"import math"
|
26 |
+
]
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"cell_type": "code",
|
30 |
+
"execution_count": 2,
|
31 |
+
"metadata": {},
|
32 |
+
"outputs": [
|
33 |
+
{
|
34 |
+
"name": "stdout",
|
35 |
+
"output_type": "stream",
|
36 |
+
"text": [
|
37 |
+
"179443\n"
|
38 |
+
]
|
39 |
+
}
|
40 |
+
],
|
41 |
+
"source": [
|
42 |
+
"reddit_dir = Path(\"~/me/data/reddit\").expanduser()\n",
|
43 |
+
"assert reddit_dir.exists()\n",
|
44 |
+
"subreddit_dir = [d for d in reddit_dir.iterdir() if d.is_dir()]\n",
|
45 |
+
"image_files = image_listing.find_images(reddit_dir)\n",
|
46 |
+
"print(len(image_files))"
|
47 |
+
]
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"cell_type": "code",
|
51 |
+
"execution_count": 3,
|
52 |
+
"metadata": {},
|
53 |
+
"outputs": [],
|
54 |
+
"source": [
|
55 |
+
"output_dir = Path(\"~/me/data/reddit-tiny\").expanduser()\n",
|
56 |
+
"pcnt = 0.05\n",
|
57 |
+
"num_image_files = len(image_files)\n",
|
58 |
+
"num_out_files = int(math.ceil(pcnt*num_image_files))\n",
|
59 |
+
"shuffle(image_files)\n",
|
60 |
+
"num_out_files\n",
|
61 |
+
"output_dir.mkdir()"
|
62 |
+
]
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"cell_type": "code",
|
66 |
+
"execution_count": 4,
|
67 |
+
"metadata": {},
|
68 |
+
"outputs": [
|
69 |
+
{
|
70 |
+
"data": {
|
71 |
+
"text/plain": [
|
72 |
+
"8973"
|
73 |
+
]
|
74 |
+
},
|
75 |
+
"execution_count": 4,
|
76 |
+
"metadata": {},
|
77 |
+
"output_type": "execute_result"
|
78 |
+
}
|
79 |
+
],
|
80 |
+
"source": [
|
81 |
+
"num_out_files"
|
82 |
+
]
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"cell_type": "code",
|
86 |
+
"execution_count": 5,
|
87 |
+
"metadata": {},
|
88 |
+
"outputs": [
|
89 |
+
{
|
90 |
+
"name": "stderr",
|
91 |
+
"output_type": "stream",
|
92 |
+
"text": [
|
93 |
+
" 0%| | 0/8973 [00:00<?, ?it/s]"
|
94 |
+
]
|
95 |
+
},
|
96 |
+
{
|
97 |
+
"name": "stderr",
|
98 |
+
"output_type": "stream",
|
99 |
+
"text": [
|
100 |
+
"writing: /Users/reeteshmukul/me/data/reddit-tiny/apocalypse/scheduleddeliv_v9jc7m.jpeg: 100%|██████████| 8973/8973 [41:31<00:00, 3.60it/s] \n"
|
101 |
+
]
|
102 |
+
}
|
103 |
+
],
|
104 |
+
"source": [
|
105 |
+
"idx = 0\n",
|
106 |
+
"with tqdm(total=num_out_files) as pbar:\n",
|
107 |
+
" \n",
|
108 |
+
" while idx < num_image_files:\n",
|
109 |
+
" f = Path(image_files[idx])\n",
|
110 |
+
" idx = idx + 1\n",
|
111 |
+
"\n",
|
112 |
+
" try:\n",
|
113 |
+
" im = Image.open(f)\n",
|
114 |
+
" size = im.size\n",
|
115 |
+
" pixels = max(1, size[0]) * max(1, size[1])\n",
|
116 |
+
"\n",
|
117 |
+
" if pixels > Image.MAX_IMAGE_PIXELS: continue\n",
|
118 |
+
" nim = np.array(im)\n",
|
119 |
+
" if len(nim.shape)!= 3 or nim.shape[-1] != 3 : continue\n",
|
120 |
+
"\n",
|
121 |
+
" subreddit = f.relative_to(reddit_dir).parent\n",
|
122 |
+
" \n",
|
123 |
+
" dname = subreddit.name \n",
|
124 |
+
" dname = dname.lower()\n",
|
125 |
+
" if \"porn\" in dname:\n",
|
126 |
+
" dname = dname.replace(\"porn\", \"\")\n",
|
127 |
+
"\n",
|
128 |
+
" subreddit_dir = output_dir / subreddit.parent / dname\n",
|
129 |
+
"\n",
|
130 |
+
" if not subreddit_dir.exists():\n",
|
131 |
+
" subreddit_dir.mkdir()\n",
|
132 |
+
"\n",
|
133 |
+
" \n",
|
134 |
+
" fname = f.stem\n",
|
135 |
+
" fname = re.sub(r'\\W+', '', fname)\n",
|
136 |
+
" fname = fname + f.suffix\n",
|
137 |
+
" fname = fname.lower()\n",
|
138 |
+
" outpath = (output_dir / subreddit_dir/f\"{fname}\").with_suffix(\".jpeg\")\n",
|
139 |
+
"\n",
|
140 |
+
" im.save(outpath)\n",
|
141 |
+
" im.close()\n",
|
142 |
+
" pbar.set_description(f\"writing: {outpath}\")\n",
|
143 |
+
" pbar.update(1)\n",
|
144 |
+
" if(pbar.n == pbar.total) : break\n",
|
145 |
+
" except Exception as e:\n",
|
146 |
+
" pass"
|
147 |
+
]
|
148 |
+
}
|
149 |
+
],
|
150 |
+
"metadata": {
|
151 |
+
"kernelspec": {
|
152 |
+
"display_name": "Python 3",
|
153 |
+
"language": "python",
|
154 |
+
"name": "python3"
|
155 |
+
},
|
156 |
+
"language_info": {
|
157 |
+
"codemirror_mode": {
|
158 |
+
"name": "ipython",
|
159 |
+
"version": 3
|
160 |
+
},
|
161 |
+
"file_extension": ".py",
|
162 |
+
"mimetype": "text/x-python",
|
163 |
+
"name": "python",
|
164 |
+
"nbconvert_exporter": "python",
|
165 |
+
"pygments_lexer": "ipython3",
|
166 |
+
"version": "3.11.2"
|
167 |
+
},
|
168 |
+
"orig_nbformat": 4
|
169 |
+
},
|
170 |
+
"nbformat": 4,
|
171 |
+
"nbformat_minor": 2
|
172 |
+
}
|
nirmaan/minireddit/minireddit.ipynb
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"### Code to create Mini-Reddit"
|
8 |
+
]
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"cell_type": "code",
|
12 |
+
"execution_count": 2,
|
13 |
+
"metadata": {},
|
14 |
+
"outputs": [],
|
15 |
+
"source": [
|
16 |
+
"from pathlib import Path\n",
|
17 |
+
"from garage.data import image_listing\n",
|
18 |
+
"from PIL import Image\n",
|
19 |
+
"from random import shuffle\n",
|
20 |
+
"import shutil\n",
|
21 |
+
"from tqdm import tqdm\n",
|
22 |
+
"import re\n",
|
23 |
+
"\n",
|
24 |
+
"import numpy as np"
|
25 |
+
]
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"cell_type": "code",
|
29 |
+
"execution_count": 3,
|
30 |
+
"metadata": {},
|
31 |
+
"outputs": [
|
32 |
+
{
|
33 |
+
"name": "stdout",
|
34 |
+
"output_type": "stream",
|
35 |
+
"text": [
|
36 |
+
"179443\n"
|
37 |
+
]
|
38 |
+
}
|
39 |
+
],
|
40 |
+
"source": [
|
41 |
+
"reddit_dir = Path(\"~/me/data/reddit\").expanduser()\n",
|
42 |
+
"assert reddit_dir.exists()\n",
|
43 |
+
"subreddit_dir = [d for d in reddit_dir.iterdir() if d.is_dir()]\n",
|
44 |
+
"image_files = image_listing.find_images(reddit_dir)\n",
|
45 |
+
"print(len(image_files))"
|
46 |
+
]
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"cell_type": "code",
|
50 |
+
"execution_count": 4,
|
51 |
+
"metadata": {},
|
52 |
+
"outputs": [],
|
53 |
+
"source": [
|
54 |
+
"def create_minireddit(pcnt=0.1):\n",
|
55 |
+
" pcnt = 0.1\n",
|
56 |
+
" output_dir = Path(\"~/me/data/minireddit\").expanduser()\n",
|
57 |
+
"\n",
|
58 |
+
" if output_dir.exists() : return\n",
|
59 |
+
" \n",
|
60 |
+
" output_dir.mkdir()\n",
|
61 |
+
"\n",
|
62 |
+
" pbar = tqdm(subreddit_dir)\n",
|
63 |
+
"\n",
|
64 |
+
" total_files = 0\n",
|
65 |
+
"\n",
|
66 |
+
" for d in pbar:\n",
|
67 |
+
" sub_files = [f for f in d.iterdir() if f.is_file()]\n",
|
68 |
+
" shuffle(sub_files)\n",
|
69 |
+
" if not sub_files: continue\n",
|
70 |
+
" pnum = int(pcnt*len(sub_files))\n",
|
71 |
+
" if pnum == 0: continue\n",
|
72 |
+
"\n",
|
73 |
+
" sub_output_dir = output_dir / (d.stem)\n",
|
74 |
+
"\n",
|
75 |
+
" if sub_output_dir.exists():\n",
|
76 |
+
" total_files = total_files + len([f for f in sub_output_dir.iterdir() if f.is_file()])\n",
|
77 |
+
" else:\n",
|
78 |
+
" sub_output_dir.mkdir()\n",
|
79 |
+
" idx = 0\n",
|
80 |
+
"\n",
|
81 |
+
" for f in sub_files:\n",
|
82 |
+
" try:\n",
|
83 |
+
" im = Image.open(f)\n",
|
84 |
+
" size = im.size\n",
|
85 |
+
" pixels = max(1, size[0]) * max(1, size[1])\n",
|
86 |
+
" if pixels > Image.MAX_IMAGE_PIXELS: continue\n",
|
87 |
+
" im.save((sub_output_dir/f\"{f.stem}\").with_suffix(\".jpeg\"))\n",
|
88 |
+
" im.close()\n",
|
89 |
+
" idx = idx + 1\n",
|
90 |
+
" total_files = total_files + 1\n",
|
91 |
+
" if idx >= pnum: break\n",
|
92 |
+
" except Exception as e:\n",
|
93 |
+
" #print(e)\n",
|
94 |
+
" pass\n",
|
95 |
+
" pbar.set_description(f\"Num Files = {total_files}\")\n",
|
96 |
+
"\n",
|
97 |
+
" sub_output_dirs = [d for d in output_dir.iterdir()]\n",
|
98 |
+
" for d in sub_output_dirs:\n",
|
99 |
+
" dname = d.stem.lower()\n",
|
100 |
+
" if \"porn\" in dname:\n",
|
101 |
+
" dname = dname.replace(\"porn\", \"\")\n",
|
102 |
+
" \n",
|
103 |
+
" if dname != d.stem:\n",
|
104 |
+
" d.rename(dname)\n",
|
105 |
+
" \n",
|
106 |
+
" sub_output_dirs = [d for d in output_dir.iterdir() if d.is_dir()]\n",
|
107 |
+
" for d in sub_output_dirs:\n",
|
108 |
+
" for f in d.iterdir():\n",
|
109 |
+
" if not f.is_file():\n",
|
110 |
+
" continue\n",
|
111 |
+
" else:\n",
|
112 |
+
" fname = f.stem\n",
|
113 |
+
" fname = re.sub(r'\\W+', '', fname)\n",
|
114 |
+
" fname = fname + f.suffix\n",
|
115 |
+
" fname = fname.lower()\n",
|
116 |
+
" if fname != f.name:\n",
|
117 |
+
" f.rename(d/fname)\n",
|
118 |
+
"\n",
|
119 |
+
"create_minireddit()\n",
|
120 |
+
"\n",
|
121 |
+
"def clean_iamge_files():\n",
|
122 |
+
" pass"
|
123 |
+
]
|
124 |
+
},
|
125 |
+
{
|
126 |
+
"cell_type": "code",
|
127 |
+
"execution_count": 6,
|
128 |
+
"metadata": {},
|
129 |
+
"outputs": [],
|
130 |
+
"source": [
|
131 |
+
"minreddit_dir = Path(\"~/me/data/minireddit\").expanduser()\n",
|
132 |
+
"sub_mr_dirs = [d for d in minreddit_dir.iterdir() if d.is_dir()]\n",
|
133 |
+
"\n",
|
134 |
+
"for d in sub_mr_dirs:\n",
|
135 |
+
" for f in d.iterdir():\n",
|
136 |
+
" if not f.is_file():\n",
|
137 |
+
" continue\n",
|
138 |
+
" else:\n",
|
139 |
+
" if \".DS_Store\" in f.name:\n",
|
140 |
+
" f.unlink()\n",
|
141 |
+
" continue\n",
|
142 |
+
" try:\n",
|
143 |
+
" im = np.array(Image.open(f))\n",
|
144 |
+
" if (len(im.shape)!=3) or (im.shape[-1] !=3):\n",
|
145 |
+
" print(im.shape)\n",
|
146 |
+
" f.unlink()\n",
|
147 |
+
" except:\n",
|
148 |
+
" continue"
|
149 |
+
]
|
150 |
+
}
|
151 |
+
],
|
152 |
+
"metadata": {
|
153 |
+
"kernelspec": {
|
154 |
+
"display_name": "Python 3",
|
155 |
+
"language": "python",
|
156 |
+
"name": "python3"
|
157 |
+
},
|
158 |
+
"language_info": {
|
159 |
+
"codemirror_mode": {
|
160 |
+
"name": "ipython",
|
161 |
+
"version": 3
|
162 |
+
},
|
163 |
+
"file_extension": ".py",
|
164 |
+
"mimetype": "text/x-python",
|
165 |
+
"name": "python",
|
166 |
+
"nbconvert_exporter": "python",
|
167 |
+
"pygments_lexer": "ipython3",
|
168 |
+
"version": "3.11.2"
|
169 |
+
},
|
170 |
+
"orig_nbformat": 4
|
171 |
+
},
|
172 |
+
"nbformat": 4,
|
173 |
+
"nbformat_minor": 2
|
174 |
+
}
|
nirmaan/preddit/preddit.ipynb
ADDED
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"import pandas as pd\n",
|
10 |
+
"import numpy as np\n",
|
11 |
+
"\n",
|
12 |
+
"from pathlib import Path\n",
|
13 |
+
"import os\n",
|
14 |
+
"import zipfile"
|
15 |
+
]
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"cell_type": "code",
|
19 |
+
"execution_count": 6,
|
20 |
+
"metadata": {},
|
21 |
+
"outputs": [],
|
22 |
+
"source": [
|
23 |
+
"def archive_reddit(reddit_dir: os.PathLike, archive_dir: os.PathLike, archive_size = 3*(2**30))->pd.DataFrame:\n",
|
24 |
+
" reddit_csv_path = reddit_dir/\"reddit.csv\"\n",
|
25 |
+
" rdf = pd.read_csv(reddit_csv_path)\n",
|
26 |
+
"\n",
|
27 |
+
" rdf = rdf.drop(columns=[\"wallpaper_dir\"])\n",
|
28 |
+
" archive_csv_path = archive_dir/\"preddit.csv\"\n",
|
29 |
+
" archive_dir.mkdir(exist_ok=True)\n",
|
30 |
+
"\n",
|
31 |
+
" columns = ['index', 'os_type', 'title', 'url', 'subreddit']\n",
|
32 |
+
"\n",
|
33 |
+
"\n",
|
34 |
+
" if archive_csv_path.exists():\n",
|
35 |
+
" prdf = pd.read_csv(archive_csv_path)\n",
|
36 |
+
" else:\n",
|
37 |
+
" prdf = pd.DataFrame(columns= columns + ['archive'])\n",
|
38 |
+
" prdf.to_csv(archive_csv_path, index=False)\n",
|
39 |
+
" \n",
|
40 |
+
" prdf = pd.merge(prdf, rdf, how=\"right\", left_on=columns, right_on=columns)\n",
|
41 |
+
" prdf.to_csv(archive_csv_path, index=False)\n",
|
42 |
+
"\n",
|
43 |
+
" while True:\n",
|
44 |
+
" archive_csv_path = archive_dir/\"preddit.csv\"\n",
|
45 |
+
"\n",
|
46 |
+
" prdf = pd.read_csv(archive_csv_path)\n",
|
47 |
+
" tdf = prdf[prdf[\"archive\"].isnull()]\n",
|
48 |
+
"\n",
|
49 |
+
" max_ar = prdf[\"archive\"].max()\n",
|
50 |
+
"\n",
|
51 |
+
" if np.isnan(max_ar):\n",
|
52 |
+
" max_ar = 0\n",
|
53 |
+
" else:\n",
|
54 |
+
" max_ar = int(max_ar) + 1\n",
|
55 |
+
" \n",
|
56 |
+
" total_file_size = 0\n",
|
57 |
+
" curr_archive_list = [ ]\n",
|
58 |
+
"\n",
|
59 |
+
"\n",
|
60 |
+
" for idx, row in tdf.iterrows():\n",
|
61 |
+
" \n",
|
62 |
+
" subreddit_dir = reddit_dir / str(row[\"subreddit\"])\n",
|
63 |
+
" findex = row[\"index\"]\n",
|
64 |
+
" files = [f for f in subreddit_dir.glob(f\"*{findex}*\")]\n",
|
65 |
+
"\n",
|
66 |
+
" if not files: continue\n",
|
67 |
+
"\n",
|
68 |
+
" imfile : Path = files[0]\n",
|
69 |
+
" fsize = imfile.stat().st_size\n",
|
70 |
+
" \n",
|
71 |
+
" if total_file_size + fsize > archive_size:\n",
|
72 |
+
" break\n",
|
73 |
+
" else:\n",
|
74 |
+
" total_file_size = total_file_size + fsize\n",
|
75 |
+
" curr_archive_list.append((idx, imfile))\n",
|
76 |
+
" \n",
|
77 |
+
" if 2*total_file_size < archive_size: break \n",
|
78 |
+
" archive_name = archive_dir / f\"reddit-{max_ar}.zip\"\n",
|
79 |
+
"\n",
|
80 |
+
" \n",
|
81 |
+
" print(f\"Writing Zip File {archive_name}\")\n",
|
82 |
+
"\n",
|
83 |
+
" with zipfile.ZipFile(archive_name, 'w') as redditz: \n",
|
84 |
+
" for (idx, ifile) in curr_archive_list:\n",
|
85 |
+
" redditz.write(ifile, Path(\"reddit\")/ifile.parent.name/ifile.name, compress_type=zipfile.ZIP_DEFLATED)\n",
|
86 |
+
" \n",
|
87 |
+
" print(f\"Written Zip File {archive_name}\")\n",
|
88 |
+
"\n",
|
89 |
+
" for (idx, ifile) in curr_archive_list:\n",
|
90 |
+
" prdf.at[idx, \"archive\"] = max_ar\n",
|
91 |
+
" \n",
|
92 |
+
"\n",
|
93 |
+
" prdf.to_csv(archive_csv_path)\n",
|
94 |
+
"\n",
|
95 |
+
" return prdf"
|
96 |
+
]
|
97 |
+
},
|
98 |
+
{
|
99 |
+
"cell_type": "code",
|
100 |
+
"execution_count": 7,
|
101 |
+
"metadata": {},
|
102 |
+
"outputs": [
|
103 |
+
{
|
104 |
+
"name": "stdout",
|
105 |
+
"output_type": "stream",
|
106 |
+
"text": [
|
107 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-3.zip\n",
|
108 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-3.zip\n",
|
109 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-4.zip\n",
|
110 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-4.zip\n",
|
111 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-5.zip\n",
|
112 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-5.zip\n",
|
113 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-6.zip\n",
|
114 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-6.zip\n",
|
115 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-7.zip\n",
|
116 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-7.zip\n",
|
117 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-8.zip\n",
|
118 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-8.zip\n",
|
119 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-9.zip\n",
|
120 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-9.zip\n",
|
121 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-10.zip\n",
|
122 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-10.zip\n",
|
123 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-11.zip\n",
|
124 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-11.zip\n",
|
125 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-12.zip\n",
|
126 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-12.zip\n",
|
127 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-13.zip\n",
|
128 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-13.zip\n",
|
129 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-14.zip\n",
|
130 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-14.zip\n",
|
131 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-15.zip\n",
|
132 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-15.zip\n",
|
133 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-16.zip\n",
|
134 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-16.zip\n",
|
135 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-17.zip\n",
|
136 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-17.zip\n",
|
137 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-18.zip\n",
|
138 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-18.zip\n",
|
139 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-19.zip\n",
|
140 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-19.zip\n",
|
141 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-20.zip\n",
|
142 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-20.zip\n",
|
143 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-21.zip\n",
|
144 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-21.zip\n",
|
145 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-22.zip\n",
|
146 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-22.zip\n",
|
147 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-23.zip\n",
|
148 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-23.zip\n",
|
149 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-24.zip\n",
|
150 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-24.zip\n",
|
151 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-25.zip\n",
|
152 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-25.zip\n",
|
153 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-26.zip\n",
|
154 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-26.zip\n",
|
155 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-27.zip\n",
|
156 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-27.zip\n",
|
157 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-28.zip\n",
|
158 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-28.zip\n",
|
159 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-29.zip\n",
|
160 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-29.zip\n",
|
161 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-30.zip\n",
|
162 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-30.zip\n",
|
163 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-31.zip\n",
|
164 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-31.zip\n",
|
165 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-32.zip\n",
|
166 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-32.zip\n",
|
167 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-33.zip\n",
|
168 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-33.zip\n",
|
169 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-34.zip\n",
|
170 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-34.zip\n",
|
171 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-35.zip\n",
|
172 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-35.zip\n",
|
173 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-36.zip\n",
|
174 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-36.zip\n",
|
175 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-37.zip\n",
|
176 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-37.zip\n",
|
177 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-38.zip\n",
|
178 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-38.zip\n",
|
179 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-39.zip\n",
|
180 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-39.zip\n",
|
181 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-40.zip\n",
|
182 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-40.zip\n",
|
183 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-41.zip\n",
|
184 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-41.zip\n",
|
185 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-42.zip\n",
|
186 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-42.zip\n",
|
187 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-43.zip\n",
|
188 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-43.zip\n",
|
189 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-44.zip\n",
|
190 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-44.zip\n",
|
191 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-45.zip\n",
|
192 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-45.zip\n",
|
193 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-46.zip\n",
|
194 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-46.zip\n",
|
195 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-47.zip\n",
|
196 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-47.zip\n",
|
197 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-48.zip\n",
|
198 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-48.zip\n",
|
199 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-49.zip\n",
|
200 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-49.zip\n",
|
201 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-50.zip\n",
|
202 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-50.zip\n",
|
203 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-51.zip\n",
|
204 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-51.zip\n",
|
205 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-52.zip\n",
|
206 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-52.zip\n",
|
207 |
+
"Writing Zip File /Users/reeteshmukul/me/data/preddit/reddit-53.zip\n",
|
208 |
+
"Written Zip File /Users/reeteshmukul/me/data/preddit/reddit-53.zip\n"
|
209 |
+
]
|
210 |
+
}
|
211 |
+
],
|
212 |
+
"source": [
|
213 |
+
"reddit_dir = Path(\"~/me/data/reddit\").expanduser()\n",
|
214 |
+
"preddit_dir = Path(\"~/me/data/preddit\").expanduser()\n",
|
215 |
+
"\n",
|
216 |
+
"prdf = archive_reddit(reddit_dir, preddit_dir)"
|
217 |
+
]
|
218 |
+
}
|
219 |
+
],
|
220 |
+
"metadata": {
|
221 |
+
"kernelspec": {
|
222 |
+
"display_name": "Python 3",
|
223 |
+
"language": "python",
|
224 |
+
"name": "python3"
|
225 |
+
},
|
226 |
+
"language_info": {
|
227 |
+
"codemirror_mode": {
|
228 |
+
"name": "ipython",
|
229 |
+
"version": 3
|
230 |
+
},
|
231 |
+
"file_extension": ".py",
|
232 |
+
"mimetype": "text/x-python",
|
233 |
+
"name": "python",
|
234 |
+
"nbconvert_exporter": "python",
|
235 |
+
"pygments_lexer": "ipython3",
|
236 |
+
"version": "3.11.2"
|
237 |
+
},
|
238 |
+
"orig_nbformat": 4
|
239 |
+
},
|
240 |
+
"nbformat": 4,
|
241 |
+
"nbformat_minor": 2
|
242 |
+
}
|
nirmaan/subreddit.ipynb
ADDED
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"import pandas as pd\n",
|
10 |
+
"import urllib3\n",
|
11 |
+
"import json\n",
|
12 |
+
"from bs4 import BeautifulSoup\n",
|
13 |
+
"import numpy as np\n",
|
14 |
+
"from concurrent.futures import ThreadPoolExecutor\n",
|
15 |
+
"from concurrent.futures import Future\n",
|
16 |
+
"from traitlets import List\n",
|
17 |
+
"\n",
|
18 |
+
"from reddit.reddit_info import subreddit_name_l, subreddit_sort_l, subreddit_t_l\n",
|
19 |
+
"import itertools\n",
|
20 |
+
"import random\n",
|
21 |
+
"from pathlib import Path\n",
|
22 |
+
"\n",
|
23 |
+
"from tqdm import tqdm\n",
|
24 |
+
"\n",
|
25 |
+
"from datetime import datetime, timezone\n",
|
26 |
+
"from typing import Optional\n",
|
27 |
+
"from requests.utils import requote_uri\n",
|
28 |
+
"from random_word import RandomWords\n",
|
29 |
+
"from wonderwords import RandomSentence"
|
30 |
+
]
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"cell_type": "code",
|
34 |
+
"execution_count": null,
|
35 |
+
"metadata": {},
|
36 |
+
"outputs": [],
|
37 |
+
"source": [
|
38 |
+
"def get_subreddit_url(subreddit, sort_by:str = \"hot\", sort_time:str=\"all\", limit:int=100, query:Optional[str]=None):\n",
|
39 |
+
" if not query:\n",
|
40 |
+
" return f'https://www.reddit.com/r/{subreddit}/{sort_by}/.json?raw_json=1&t={sort_time}&limit={limit}'\n",
|
41 |
+
" else:\n",
|
42 |
+
" return f'https://www.reddit.com/r/{subreddit}/search/.json?raw_json=1&q={query}&limit={100}'"
|
43 |
+
]
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"cell_type": "code",
|
47 |
+
"execution_count": null,
|
48 |
+
"metadata": {},
|
49 |
+
"outputs": [],
|
50 |
+
"source": [
|
51 |
+
"subreddit_url = get_subreddit_url(\"CityPorn\", \"hot\", \"all\", 100)#, query=\"6 years ago\")\n",
|
52 |
+
"subreddit_url = requote_uri(subreddit_url)\n",
|
53 |
+
"subreddit_url"
|
54 |
+
]
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"cell_type": "code",
|
58 |
+
"execution_count": null,
|
59 |
+
"metadata": {},
|
60 |
+
"outputs": [],
|
61 |
+
"source": [
|
62 |
+
"user_agent = {'user-agent': 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) ..'}\n",
|
63 |
+
"pool_manager = urllib3.PoolManager(headers=user_agent)\n",
|
64 |
+
"res = []"
|
65 |
+
]
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"cell_type": "code",
|
69 |
+
"execution_count": null,
|
70 |
+
"metadata": {},
|
71 |
+
"outputs": [],
|
72 |
+
"source": [
|
73 |
+
"def fetch_subreddit_image_entries(subreddit_url: str, pool_manager):\n",
|
74 |
+
" result = [ ]\n",
|
75 |
+
" try:\n",
|
76 |
+
" response = pool_manager.request('GET', subreddit_url)\n",
|
77 |
+
" #print(response.data)\n",
|
78 |
+
" subreddit_data = json.loads(response.data) \n",
|
79 |
+
" \n",
|
80 |
+
" if not \"data\" in subreddit_data: return []\n",
|
81 |
+
" if not \"children\" in subreddit_data[\"data\"]: return []\n",
|
82 |
+
" \n",
|
83 |
+
" for content in subreddit_data['data']['children']:\n",
|
84 |
+
" try:\n",
|
85 |
+
" if content['data'].get('post_hint', 'none') == 'image' and 'preview' in content['data']: \n",
|
86 |
+
" created_utc = datetime.fromtimestamp(content['data'][\"created_utc\"], timezone.utc)\n",
|
87 |
+
" \n",
|
88 |
+
" #print(created_utc)\n",
|
89 |
+
" \n",
|
90 |
+
" source_d = content['data']['preview']['images'][0]['source']\n",
|
91 |
+
" image_url, width, height = source_d['url'], source_d[\"width\"], source_d[\"height\"]\n",
|
92 |
+
" image_title = content['data']['title']\n",
|
93 |
+
" image_id = content['data']['id']\n",
|
94 |
+
" data_url = content['data']['url']\n",
|
95 |
+
" subreddit = content['data']['subreddit']\n",
|
96 |
+
" if content['data']['is_video'] : continue \n",
|
97 |
+
" result.append({\n",
|
98 |
+
" \"image_url\" : image_url,\n",
|
99 |
+
" \"title\" : image_title,\n",
|
100 |
+
" \"image_id\" : image_id,\n",
|
101 |
+
" \"url\" : data_url,\n",
|
102 |
+
" \"subreddit\" : subreddit,\n",
|
103 |
+
" \"width\" : width,\n",
|
104 |
+
" \"height\" : height,\n",
|
105 |
+
" \"created_utc\" : created_utc,\n",
|
106 |
+
" })\n",
|
107 |
+
" except Exception as e:\n",
|
108 |
+
" pass\n",
|
109 |
+
" return result\n",
|
110 |
+
" except Exception as e:\n",
|
111 |
+
" #print(e)\n",
|
112 |
+
" return []\n",
|
113 |
+
" \n",
|
114 |
+
"#fetch_subreddit_image_entries(subreddit_url, pool_manager)"
|
115 |
+
]
|
116 |
+
},
|
117 |
+
{
|
118 |
+
"cell_type": "code",
|
119 |
+
"execution_count": null,
|
120 |
+
"metadata": {},
|
121 |
+
"outputs": [],
|
122 |
+
"source": [
|
123 |
+
"def fetch_multiple_subreddit_image_entries(subreddit_urls: str, thread_pool_size: int=5, urllib_pool_size:int=5):\n",
|
124 |
+
" \n",
|
125 |
+
" pool_manager = urllib3.PoolManager(maxsize=urllib_pool_size)\n",
|
126 |
+
" thread_pool = ThreadPoolExecutor(thread_pool_size)\n",
|
127 |
+
" res_futs = [ ]\n",
|
128 |
+
" \n",
|
129 |
+
" for subreddit_url in subreddit_urls:\n",
|
130 |
+
" res_futs.append(thread_pool.submit(fetch_subreddit_image_entries, subreddit_url, pool_manager))\n",
|
131 |
+
" \n",
|
132 |
+
" res :[List[Future]] = []\n",
|
133 |
+
" \n",
|
134 |
+
" for r in res_futs:\n",
|
135 |
+
" res.extend(r.result())\n",
|
136 |
+
" \n",
|
137 |
+
" return list({x[\"image_id\"] : x for x in res}.values())"
|
138 |
+
]
|
139 |
+
},
|
140 |
+
{
|
141 |
+
"cell_type": "code",
|
142 |
+
"execution_count": null,
|
143 |
+
"metadata": {},
|
144 |
+
"outputs": [],
|
145 |
+
"source": [
|
146 |
+
"def get_random_subreddit_urls(num_urls:int = 20):\n",
|
147 |
+
" subr_l = list(itertools.product(subreddit_name_l, subreddit_sort_l, subreddit_t_l))\n",
|
148 |
+
" return [get_subreddit_url(*xs, 100) for xs in random.sample(subr_l, k=num_urls)]\n",
|
149 |
+
"\n",
|
150 |
+
"def get_random_subreddit_query_urls(num_urls:int = 20, query_type: str = \"chronology\"):\n",
|
151 |
+
" '''\n",
|
152 |
+
" query_type:\n",
|
153 |
+
" chronology\n",
|
154 |
+
" random_word\n",
|
155 |
+
" random_phrase\n",
|
156 |
+
" '''\n",
|
157 |
+
" timeline = random.choices([\"days\", \"months\", \"years\"], k = num_urls)\n",
|
158 |
+
" timevalue = random.choices(range(1, 12), k = num_urls)\n",
|
159 |
+
" subr = random.sample(subreddit_name_l, k = num_urls)\n",
|
160 |
+
" \n",
|
161 |
+
" if query_type == \"chronology\":\n",
|
162 |
+
" return [get_subreddit_url(subreddit=sr, query=f\"{tv} {tl} ago\") for (sr, tl, tv) in list(itertools.product(subr, timeline, timevalue))]\n",
|
163 |
+
" elif query_type == \"random_word\":\n",
|
164 |
+
" r = RandomWords()\n",
|
165 |
+
" return [get_subreddit_url(subreddit=sr, query=f\"{r.get_random_word()}\") for sr in subr]\n",
|
166 |
+
" elif query_type == \"random_phrase\":\n",
|
167 |
+
" s = RandomSentence()\n",
|
168 |
+
" return [get_subreddit_url(subreddit=sr, query=f\"{s.sentence()}\") for sr in subr]\n",
|
169 |
+
" else:\n",
|
170 |
+
" return [ ] \n",
|
171 |
+
"\n",
|
172 |
+
"\n",
|
173 |
+
"\n",
|
174 |
+
"#get_random_subreddit_urls() \n",
|
175 |
+
"#get_random_subreddit_query_urls(query_type=\"random_phrase\")"
|
176 |
+
]
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"cell_type": "code",
|
180 |
+
"execution_count": null,
|
181 |
+
"metadata": {},
|
182 |
+
"outputs": [],
|
183 |
+
"source": [
|
184 |
+
"#subreddit_url = get_random_subreddit_query_urls(num_urls=5)[2]\n",
|
185 |
+
"#subreddit_url = get_subreddit_url(\"CityPorn\", \"hot\", \"all\", 100, query=\"11 years ago\")\n",
|
186 |
+
"#fetch_subreddit_image_entries(subreddit_url, pool_manager)"
|
187 |
+
]
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"cell_type": "code",
|
191 |
+
"execution_count": null,
|
192 |
+
"metadata": {},
|
193 |
+
"outputs": [],
|
194 |
+
"source": [
|
195 |
+
"#res = fetch_multiple_subreddit_image_entries(get_random_subreddit_urls(num_urls=100))\n",
|
196 |
+
"#res = fetch_multiple_subreddit_image_entries(get_random_subreddit_query_urls(num_urls=5))\n",
|
197 |
+
"#len(res)"
|
198 |
+
]
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"cell_type": "code",
|
202 |
+
"execution_count": null,
|
203 |
+
"metadata": {},
|
204 |
+
"outputs": [],
|
205 |
+
"source": [
|
206 |
+
"policies = [\n",
|
207 |
+
" (\"subredit_sort\", lambda: fetch_multiple_subreddit_image_entries(get_random_subreddit_urls(num_urls=100))),\n",
|
208 |
+
" (\"subreddit_chrono\", lambda: get_random_subreddit_query_urls(num_urls=5, query_type=\"chronology\")),\n",
|
209 |
+
" (\"subreddit_random_word\", lambda: get_random_subreddit_query_urls(num_urls=5, query_type=\"random_word\")),\n",
|
210 |
+
" (\"subreddit_random_phrase\", lambda: get_random_subreddit_query_urls(num_urls=5, query_type=\"random_phrase\")),\n",
|
211 |
+
"]\n",
|
212 |
+
"\n",
|
213 |
+
"policy_count = [[]]*len(policies)\n",
|
214 |
+
"policy_hit = [[]]*len(policies)"
|
215 |
+
]
|
216 |
+
},
|
217 |
+
{
|
218 |
+
"cell_type": "code",
|
219 |
+
"execution_count": null,
|
220 |
+
"metadata": {},
|
221 |
+
"outputs": [],
|
222 |
+
"source": [
|
223 |
+
"dfname = \"reddit.csv\"\n",
|
224 |
+
"otime = 0\n",
|
225 |
+
"\n",
|
226 |
+
"tarr = []\n",
|
227 |
+
"karr = []\n",
|
228 |
+
"\n",
|
229 |
+
"total_updates = 0\n",
|
230 |
+
"\n",
|
231 |
+
"with tqdm(total=10000) as pbar:\n",
|
232 |
+
" for _ in range(10000):\n",
|
233 |
+
" if random.random() > 0.6:\n",
|
234 |
+
" res = fetch_multiple_subreddit_image_entries(get_random_subreddit_urls(num_urls=100))\n",
|
235 |
+
" else:\n",
|
236 |
+
" res = fetch_multiple_subreddit_image_entries(get_random_subreddit_query_urls(num_urls=5, query_type=\"random_phrase\"))\n",
|
237 |
+
" \n",
|
238 |
+
" num_fetched = len(res)\n",
|
239 |
+
" \n",
|
240 |
+
" if res:\n",
|
241 |
+
" if not Path(dfname).exists():\n",
|
242 |
+
" pd.DataFrame(res).to_csv(dfname, index=False)\n",
|
243 |
+
" karr.append(len(res))\n",
|
244 |
+
" else:\n",
|
245 |
+
" df = pd.read_csv(dfname)\n",
|
246 |
+
" keys = set(df[\"image_id\"])\n",
|
247 |
+
" cres = [x for x in res if not (x[\"image_id\"] in keys)]\n",
|
248 |
+
" \n",
|
249 |
+
" if cres:\n",
|
250 |
+
" ndf = pd.DataFrame(cres)\n",
|
251 |
+
" ndf.to_csv(dfname, mode=\"a\", header=None, index=False)\n",
|
252 |
+
" karr.append(len(cres))\n",
|
253 |
+
" else:\n",
|
254 |
+
" karr.append(0)\n",
|
255 |
+
" \n",
|
256 |
+
" ntime = pbar.format_dict['elapsed']\n",
|
257 |
+
" N = len(pd.read_csv(dfname))\n",
|
258 |
+
" tarr.append(ntime-otime)\n",
|
259 |
+
" otime = ntime\n",
|
260 |
+
" tarr = tarr[-25:]\n",
|
261 |
+
" karr = karr[-25:]\n",
|
262 |
+
" rate = sum(karr)/sum(tarr)\n",
|
263 |
+
" pbar.update(1)\n",
|
264 |
+
" total_updates = total_updates + karr[-1]\n",
|
265 |
+
" pbar.set_description_str(f\"count:{N}, fetch rate:{rate:.3f}, last_update:{karr[-1]}, total_updates:{total_updates}\")"
|
266 |
+
]
|
267 |
+
}
|
268 |
+
],
|
269 |
+
"metadata": {
|
270 |
+
"kernelspec": {
|
271 |
+
"display_name": "Python 3",
|
272 |
+
"language": "python",
|
273 |
+
"name": "python3"
|
274 |
+
},
|
275 |
+
"language_info": {
|
276 |
+
"codemirror_mode": {
|
277 |
+
"name": "ipython",
|
278 |
+
"version": 3
|
279 |
+
},
|
280 |
+
"file_extension": ".py",
|
281 |
+
"mimetype": "text/x-python",
|
282 |
+
"name": "python",
|
283 |
+
"nbconvert_exporter": "python",
|
284 |
+
"pygments_lexer": "ipython3",
|
285 |
+
"version": "3.11.6"
|
286 |
+
},
|
287 |
+
"orig_nbformat": 4
|
288 |
+
},
|
289 |
+
"nbformat": 4,
|
290 |
+
"nbformat_minor": 2
|
291 |
+
}
|
reddit/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2017 Sarjak Thakkar
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
reddit/README.md
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Download Images and Set Wallpapers from your choice of Subreddit!
|
2 |
+
|
3 |
+
Using this script:
|
4 |
+
* **No Setup Needed!**
|
5 |
+
* **Download any number of images** from **Any Subreddit!**
|
6 |
+
* Select your sort type **(Top, Hot, New, Controversial)**
|
7 |
+
* Select your sort time **(All, Year, Month, Week, Day)**
|
8 |
+
* Extremely **Reliable Downloading.**
|
9 |
+
* **Automatically set and switch Wallpaper** from the downloaded images
|
10 |
+
* **Store the links** to all the downloaded images (in case you want to access later?)
|
11 |
+
* Works on **Python 2.x and Python 3.x** both!
|
12 |
+
* Specify your **custom save location**!
|
13 |
+
|
14 |
+
### Grab the latest release here! Or download it using the next step!
|
15 |
+
DownloadRedditImages v2.0.0 - https://github.com/tsarjak/WallpapersFromReddit/releases/tag/v2.0.0
|
16 |
+
|
17 |
+
### No Dependencies! Just Download and Run!
|
18 |
+
Just download the files and run the file - no need to setup anything at all!
|
19 |
+
|
20 |
+
To download via Terminal:
|
21 |
+
```shell
|
22 |
+
git clone https://github.com/tsarjak/WallpapersFromReddit.git
|
23 |
+
```
|
24 |
+
|
25 |
+
|
26 |
+
### To run the code
|
27 |
+
|
28 |
+
In terminal:
|
29 |
+
```shell
|
30 |
+
# In the Home Directory or the Directory in which you cloned/downloaded the script
|
31 |
+
cd ~/DownloadRedditImages
|
32 |
+
python main.py
|
33 |
+
--subreddit <your choice of subreddit>
|
34 |
+
--sort_type <hot|new|rising|top>
|
35 |
+
--sort_time <all|year|month|week|day>
|
36 |
+
--save_dir <Directory where you want to store the images. By default it saves in wallpapers folder>
|
37 |
+
--update_every <Number of minutes after which you want to change the wallpapers>
|
38 |
+
--run_for <Total number of hours you want to run the script for>
|
39 |
+
--download_only <Toggle this to either only download the images, or to also keep cycling throught the wallpapers>
|
40 |
+
```
|
41 |
+
|
42 |
+
Example:
|
43 |
+
```shell
|
44 |
+
python wallpaper.py --subreddit earthporn -sort_type top --sort_time all --download_only
|
45 |
+
```
|
46 |
+
|
47 |
+
|
48 |
+
### After First run, you might want to run the app in background (This is possible even on first run!)
|
49 |
+
|
50 |
+
```shell
|
51 |
+
nohup python main.py &
|
52 |
+
```
|
53 |
+
nohup command is used to keep the script running in background, even when the terminal is closed
|
54 |
+
|
55 |
+
### Add the script to run it as startup application
|
56 |
+
|
57 |
+
Goto "Startup Applications"
|
58 |
+
Click "Add" -> Click "Custom Command" -> Enter command as "nohup python ~/main.py &", enter name and other details and save
|
reddit/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from .redditd import rdownloader
|
2 |
+
from .subreddits import SUBREDDITS
|
reddit/core.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Dict
|
3 |
+
|
4 |
+
import requests
|
5 |
+
|
6 |
+
from .utils import Utils
|
7 |
+
from os import path as osp
|
8 |
+
from pathlib import Path
|
9 |
+
import pandas as pd
|
10 |
+
import concurrent
|
11 |
+
|
12 |
+
class WallpaperDownloader:
|
13 |
+
"""
|
14 |
+
Core class to download images from Reddit.
|
15 |
+
"""
|
16 |
+
|
17 |
+
def __init__(self,
|
18 |
+
subreddit: str = 'wallpaper',
|
19 |
+
sort_by: str = 'top',
|
20 |
+
sort_time: str = 'all',
|
21 |
+
save_dir: str = 'default'):
|
22 |
+
"""
|
23 |
+
Initialize the preference and link to subreddit.
|
24 |
+
:param subreddit: Name of the subreddit.
|
25 |
+
:param sort_by: Sort by? Hot/Top/New/Controversial.
|
26 |
+
:param sort_time: Sort time. day/week/month/year/all
|
27 |
+
"""
|
28 |
+
self._url = 'https://www.reddit.com/r/{}/{}/.json?raw_json=1&t={}&limit=100'.format(subreddit,
|
29 |
+
sort_by,
|
30 |
+
sort_time)
|
31 |
+
self._preferences_file = Path(save_dir) / 'wp_preferences.json'
|
32 |
+
self._preferences = self._setup_preferences(save_dir=save_dir)
|
33 |
+
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=16)
|
34 |
+
|
35 |
+
@property
|
36 |
+
def preferences(self) -> Dict:
|
37 |
+
"""
|
38 |
+
Get the preferences.
|
39 |
+
:return: Preferences Dict.
|
40 |
+
"""
|
41 |
+
return Utils.load_preferences(self._preferences_file)
|
42 |
+
|
43 |
+
@staticmethod
|
44 |
+
def download_image(image_url, img_save_name):
|
45 |
+
|
46 |
+
if osp.exists(img_save_name):
|
47 |
+
return
|
48 |
+
|
49 |
+
dummy_img_save_name = img_save_name + ".dummy"
|
50 |
+
|
51 |
+
with requests.get(image_url, stream=True) as r:
|
52 |
+
r.raise_for_status()
|
53 |
+
with open('{}'.format(img_save_name), 'wb') as f:
|
54 |
+
for chunk in r.iter_content(chunk_size=26214400):
|
55 |
+
if chunk:
|
56 |
+
f.write(chunk)
|
57 |
+
|
58 |
+
if osp.exists(dummy_img_save_name):
|
59 |
+
osp.remove(dummy_img_save_name)
|
60 |
+
|
61 |
+
def download(self, max_count: int = 200):
|
62 |
+
"""
|
63 |
+
This is where all the downloading takes place.
|
64 |
+
:param max_count: Maximum number of images to download.
|
65 |
+
:return: num_downloaded: Number of downloaded images.
|
66 |
+
"""
|
67 |
+
|
68 |
+
# Update URL to fetch entries based on max_count.
|
69 |
+
self._url += '&limit={}'.format(
|
70 |
+
max_count * 1000) # There are a lot of unwanted images, so setting it to high value.
|
71 |
+
|
72 |
+
# Fetch the JSON file for subreddit here.
|
73 |
+
subreddit_data = Utils.fetch_subreddit_data(subreddit_url=self._url)
|
74 |
+
|
75 |
+
# If we can't get subreddit data even after 20 trials, we close the program. Try later.
|
76 |
+
if subreddit_data is None:
|
77 |
+
#print('Unable to connect to reddit. Check internet connection. Or try later.')
|
78 |
+
return 0
|
79 |
+
|
80 |
+
count = 0
|
81 |
+
saved_images = []
|
82 |
+
|
83 |
+
for content in subreddit_data['data']['children']:
|
84 |
+
|
85 |
+
if content['data'].get('post_hint', 'none') == 'image' and 'preview' in content['data']:
|
86 |
+
# Get the information about the image.
|
87 |
+
image_url = content['data']['preview']['images'][0]['source']['url']
|
88 |
+
image_title = content['data']['title'][:15]
|
89 |
+
image_title = ''.join(filter(str.isalnum, image_title))
|
90 |
+
image_id = content['data']['id']
|
91 |
+
|
92 |
+
# Set image save name
|
93 |
+
img_save_name = '{}_{}.jpg'.format(image_title, image_id)
|
94 |
+
img_save_name = osp.join(self._preferences['wallpaper_dir'], img_save_name)
|
95 |
+
|
96 |
+
|
97 |
+
dummy_img_save_name = img_save_name + ".dummy"
|
98 |
+
# If we have already downloaded the image, we can skip.
|
99 |
+
if osp.exists(img_save_name):
|
100 |
+
continue
|
101 |
+
|
102 |
+
open(img_save_name, 'a').close()
|
103 |
+
os.utime(img_save_name, None)
|
104 |
+
|
105 |
+
# Actually downloading the image.
|
106 |
+
try:
|
107 |
+
#self.executor.submit(WallpaperDownloader.download_image, image_url, img_save_name)
|
108 |
+
'''
|
109 |
+
with requests.get(image_url, stream=True) as r:
|
110 |
+
r.raise_for_status()
|
111 |
+
with open('{}'.format(img_save_name), 'wb') as f:
|
112 |
+
for chunk in r.iter_content(chunk_size=26214400):
|
113 |
+
if chunk:
|
114 |
+
f.write(chunk)
|
115 |
+
'''
|
116 |
+
except:
|
117 |
+
continue
|
118 |
+
|
119 |
+
saved_images.append(img_save_name)
|
120 |
+
|
121 |
+
# Update the preferences.
|
122 |
+
self._preferences['urls'][image_id] = {'title': image_title,
|
123 |
+
'url': image_url}
|
124 |
+
Utils.save_to_preferences(self._preferences, self._preferences_file)
|
125 |
+
|
126 |
+
count += 1
|
127 |
+
|
128 |
+
# Done downloading, so remove unwanted images and return the total number of saved images.
|
129 |
+
if count >= max_count:
|
130 |
+
count_removed = Utils.remove_unwanted_images(saved_images)
|
131 |
+
return len(saved_images) - count_removed
|
132 |
+
|
133 |
+
return count
|
134 |
+
|
135 |
+
def _setup_preferences(self, save_dir='default') -> Dict:
|
136 |
+
"""
|
137 |
+
Setup the preferences for downloading. Find the machine type etc if its running for the first time.
|
138 |
+
:return: preferences - Loaded preferences.
|
139 |
+
"""
|
140 |
+
# Load the preferences file.
|
141 |
+
preferences = Utils.load_preferences(self._preferences_file) if osp.exists(self._preferences_file) else {}
|
142 |
+
|
143 |
+
# If it's empty (running for the first time), we set it up manually.
|
144 |
+
if preferences == {}:
|
145 |
+
os_type, wallpaper_dir = Utils.get_os()
|
146 |
+
|
147 |
+
# If wallpapers directory is not there, we create it.
|
148 |
+
if not osp.exists(wallpaper_dir):
|
149 |
+
os.makedirs(wallpaper_dir)
|
150 |
+
|
151 |
+
preferences['os_type'] = os_type
|
152 |
+
preferences['wallpaper_dir'] = wallpaper_dir
|
153 |
+
preferences['urls'] = dict()
|
154 |
+
|
155 |
+
# Update the default dir here.
|
156 |
+
if save_dir != 'default':
|
157 |
+
preferences['wallpaper_dir'] = save_dir
|
158 |
+
|
159 |
+
# Just save preferences back to file in case of update.
|
160 |
+
Utils.save_to_preferences(preferences, self._preferences_file)
|
161 |
+
|
162 |
+
return preferences
|
reddit/reddit_info.py
ADDED
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
subreddit_name_l = list(dict.fromkeys([
|
2 |
+
'BollywoodUHQonly',
|
3 |
+
|
4 |
+
'Wallpapers',
|
5 |
+
'wallpaper',
|
6 |
+
|
7 |
+
'CityPorn',
|
8 |
+
'VillagePorn',
|
9 |
+
'RuralPorn',
|
10 |
+
'ArchitecturePorn',
|
11 |
+
'HousePorn',
|
12 |
+
'CabinPorn',
|
13 |
+
'ChurchPorn',
|
14 |
+
'AbandonedPorn',
|
15 |
+
'CemeteryPorn',
|
16 |
+
'InfrastructurePorn',
|
17 |
+
'MachinePorn',
|
18 |
+
'carporn',
|
19 |
+
'F1Porn',
|
20 |
+
'MotorcyclePorn',
|
21 |
+
'MilitaryPorn',
|
22 |
+
'GunPorn',
|
23 |
+
'KnifePorn',
|
24 |
+
'BoatPorn',
|
25 |
+
'RidesPorn',
|
26 |
+
'DestructionPorn',
|
27 |
+
'ThingsCutInHalfPorn',
|
28 |
+
'StarshipPorn',
|
29 |
+
'ToolPorn',
|
30 |
+
'TechnologyPorn',
|
31 |
+
'BridgePorn',
|
32 |
+
'PolicePorn',
|
33 |
+
'SteamPorn',
|
34 |
+
'RetailPorn',
|
35 |
+
'SpaceFlightPorn',
|
36 |
+
'roadporn',
|
37 |
+
'drydockporn',
|
38 |
+
'AnimalPorn',
|
39 |
+
'HumanPorn',
|
40 |
+
'EarthlingPorn',
|
41 |
+
'AdrenalinePorn',
|
42 |
+
'ClimbingPorn',
|
43 |
+
'SportsPorn',
|
44 |
+
'AgriculturePorn',
|
45 |
+
'TeaPorn',
|
46 |
+
'BonsaiPorn',
|
47 |
+
'FoodPorn',
|
48 |
+
'CulinaryPorn',
|
49 |
+
'DessertPorn',
|
50 |
+
'DesignPorn',
|
51 |
+
'RoomPorn',
|
52 |
+
'AlbumArtPorn',
|
53 |
+
'MetalPorn',
|
54 |
+
'MoviePosterPorn',
|
55 |
+
'TelevisionPosterPorn',
|
56 |
+
'ComicBookPorn',
|
57 |
+
'StreetArtPorn',
|
58 |
+
'AdPorn',
|
59 |
+
'ArtPorn',
|
60 |
+
'FractalPorn',
|
61 |
+
'InstrumentPorn',
|
62 |
+
'ExposurePorn',
|
63 |
+
'MacroPorn',
|
64 |
+
'MicroPorn',
|
65 |
+
'GeekPorn',
|
66 |
+
'MTGPorn',
|
67 |
+
'GamerPorn',
|
68 |
+
'PowerWashingPorn',
|
69 |
+
'AerialPorn',
|
70 |
+
'OrganizationPorn',
|
71 |
+
'FashionPorn',
|
72 |
+
'AVPorn',
|
73 |
+
'ApocalypsePorn',
|
74 |
+
'InfraredPorn',
|
75 |
+
'ViewPorn',
|
76 |
+
'HellscapePorn',
|
77 |
+
'sculptureporn',
|
78 |
+
|
79 |
+
'HighResPrints',
|
80 |
+
'SpaceWallpapers',
|
81 |
+
|
82 |
+
'ArtefactPorn',
|
83 |
+
'MegalithPorn',
|
84 |
+
'FossilPorn',
|
85 |
+
'FuturePorn',
|
86 |
+
'QuotesPorn',
|
87 |
+
'NewsPorn',
|
88 |
+
'BookPorn',
|
89 |
+
'UniformPorn',
|
90 |
+
'HistoryPorn',
|
91 |
+
'AutumnPorn',
|
92 |
+
'EarthPorn',
|
93 |
+
'SkyPorn',
|
94 |
+
'SeaPorn',
|
95 |
+
'WeatherPorn',
|
96 |
+
'BotanicalPorn',
|
97 |
+
'LakePorn',
|
98 |
+
'BeachPorn',
|
99 |
+
'WaterPorn',
|
100 |
+
'SpacePorn',
|
101 |
+
'FirePorn',
|
102 |
+
'DesertPorn',
|
103 |
+
'WinterPorn',
|
104 |
+
'GeologyPorn',
|
105 |
+
'MushroomPorn',
|
106 |
+
'SpringPorn',
|
107 |
+
'SummerPorn',
|
108 |
+
'LavaPorn',
|
109 |
+
'multiwall',
|
110 |
+
'offensive_wallpapers',
|
111 |
+
'HI_Res',
|
112 |
+
'Forest',
|
113 |
+
'ArtFundamentals',
|
114 |
+
'Art',
|
115 |
+
'HighResCelebs',
|
116 |
+
'CelebEvents',
|
117 |
+
'UltraHighResCelebs',
|
118 |
+
'UHQcelebs',
|
119 |
+
'celebrityboners',
|
120 |
+
'HRCelebs',
|
121 |
+
'UHQDesi',
|
122 |
+
'4kcelebs',
|
123 |
+
'UltraHighResBollywood',
|
124 |
+
'Astronomy',
|
125 |
+
'MakeupAddiction',
|
126 |
+
'ultrahdwallpapers',
|
127 |
+
'FlowerPorn',
|
128 |
+
'mountainporn',
|
129 |
+
'RiverPorn',
|
130 |
+
'F1Porn',
|
131 |
+
'jameswebb',
|
132 |
+
'randomsexiness',
|
133 |
+
'gentlemanboners',
|
134 |
+
'BeautifulFemales',
|
135 |
+
'RoyalWarReport',
|
136 |
+
'DesiPetiteCelebs',
|
137 |
+
'travelphotos',
|
138 |
+
'IncredibleIndia',
|
139 |
+
'interestingasfuck',
|
140 |
+
'MuseumPorn',
|
141 |
+
'Fruitgore',
|
142 |
+
'FestivalSluts',
|
143 |
+
'festivals',
|
144 |
+
'AlternateAngles',
|
145 |
+
'ColorizedHistory',
|
146 |
+
'MoviemaniaHQ',
|
147 |
+
'IndianFoodPhotos',
|
148 |
+
|
149 |
+
'surrealism',
|
150 |
+
|
151 |
+
'Comics',
|
152 |
+
'SpecArt',
|
153 |
+
|
154 |
+
'WTF',
|
155 |
+
|
156 |
+
'OldSchoolCool',
|
157 |
+
|
158 |
+
'pics',
|
159 |
+
'wwiipics',
|
160 |
+
'travelpics',
|
161 |
+
|
162 |
+
'photography',
|
163 |
+
'fashionphotography',
|
164 |
+
'productphotography',
|
165 |
+
'travelphotography',
|
166 |
+
|
167 |
+
'photocritique',
|
168 |
+
'itookapicture',
|
169 |
+
'PictureChallenge',
|
170 |
+
'photoit',
|
171 |
+
'postprocessing',
|
172 |
+
|
173 |
+
|
174 |
+
'outside',
|
175 |
+
|
176 |
+
'TheWayWeWere',
|
177 |
+
|
178 |
+
'Cinemagraphs',
|
179 |
+
|
180 |
+
'BlackPeopleTwitter',
|
181 |
+
|
182 |
+
'WtSSTaDaMiT',
|
183 |
+
|
184 |
+
'Eyebleach',
|
185 |
+
|
186 |
+
'me_irl',
|
187 |
+
|
188 |
+
'Posters',
|
189 |
+
'PropagandaPosters',
|
190 |
+
'PlexPosters',
|
191 |
+
|
192 |
+
'painting',
|
193 |
+
]))
|
194 |
+
|
195 |
+
|
196 |
+
subreddit_sort_l = [
|
197 |
+
"hot",
|
198 |
+
"new",
|
199 |
+
"top",
|
200 |
+
"controversial",
|
201 |
+
"rising",
|
202 |
+
]
|
203 |
+
|
204 |
+
subreddit_t_l = [
|
205 |
+
"hour",
|
206 |
+
"day",
|
207 |
+
"week",
|
208 |
+
"month",
|
209 |
+
"year",
|
210 |
+
"all",
|
211 |
+
]
|
reddit/reddit_processor.py
ADDED
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pandas as pd
|
3 |
+
import urllib3
|
4 |
+
import json
|
5 |
+
from bs4 import BeautifulSoup
|
6 |
+
import numpy as np
|
7 |
+
from concurrent.futures import ThreadPoolExecutor
|
8 |
+
from concurrent.futures import Future
|
9 |
+
from traitlets import List
|
10 |
+
|
11 |
+
from reddit.reddit_info import subreddit_name_l, subreddit_sort_l, subreddit_t_l
|
12 |
+
import itertools
|
13 |
+
import random
|
14 |
+
from pathlib import Path
|
15 |
+
|
16 |
+
from tqdm import tqdm
|
17 |
+
|
18 |
+
from datetime import datetime, timezone
|
19 |
+
from typing import Any, Optional
|
20 |
+
from requests.utils import requote_uri
|
21 |
+
from random_word import RandomWords
|
22 |
+
from wonderwords import RandomSentence
|
23 |
+
|
24 |
+
|
25 |
+
class RedditProcessor:
|
26 |
+
def get_subreddit_url(self, subreddit, sort_by:str = "hot", sort_time:str="all", limit:int=100, query:Optional[str]=None):
|
27 |
+
if not query:
|
28 |
+
return f'https://www.reddit.com/r/{subreddit}/{sort_by}/.json?raw_json=1&t={sort_time}&limit={limit}'
|
29 |
+
else:
|
30 |
+
return f'https://www.reddit.com/r/{subreddit}/search/.json?raw_json=1&q={query}&limit={100}'
|
31 |
+
|
32 |
+
|
33 |
+
def fetch_subreddit_image_entries(self, subreddit_url: str, pool_manager):
|
34 |
+
result = [ ]
|
35 |
+
try:
|
36 |
+
response = pool_manager.request('GET', subreddit_url)
|
37 |
+
#print(response.data)
|
38 |
+
subreddit_data = json.loads(response.data)
|
39 |
+
|
40 |
+
if not "data" in subreddit_data: return []
|
41 |
+
if not "children" in subreddit_data["data"]: return []
|
42 |
+
|
43 |
+
for content in subreddit_data['data']['children']:
|
44 |
+
try:
|
45 |
+
if content['data'].get('post_hint', 'none') == 'image' and 'preview' in content['data']:
|
46 |
+
created_utc = datetime.fromtimestamp(content['data']["created_utc"], timezone.utc)
|
47 |
+
|
48 |
+
#print(created_utc)
|
49 |
+
|
50 |
+
source_d = content['data']['preview']['images'][0]['source']
|
51 |
+
image_url, width, height = source_d['url'], source_d["width"], source_d["height"]
|
52 |
+
image_title = content['data']['title']
|
53 |
+
image_id = content['data']['id']
|
54 |
+
data_url = content['data']['url']
|
55 |
+
subreddit = content['data']['subreddit']
|
56 |
+
if content['data']['is_video'] : continue
|
57 |
+
result.append({
|
58 |
+
"image_url" : image_url,
|
59 |
+
"title" : image_title,
|
60 |
+
"image_id" : image_id,
|
61 |
+
"url" : data_url,
|
62 |
+
"subreddit" : subreddit,
|
63 |
+
"width" : width,
|
64 |
+
"height" : height,
|
65 |
+
"created_utc" : created_utc,
|
66 |
+
})
|
67 |
+
except Exception as e:
|
68 |
+
pass
|
69 |
+
return result
|
70 |
+
except Exception as e:
|
71 |
+
#print(e)
|
72 |
+
return []
|
73 |
+
|
74 |
+
|
75 |
+
def fetch_multiple_subreddit_image_entries(self, subreddit_urls: str, thread_pool_size: int=5, urllib_pool_size:int=5):
|
76 |
+
|
77 |
+
pool_manager = urllib3.PoolManager(maxsize=urllib_pool_size)
|
78 |
+
thread_pool = ThreadPoolExecutor(thread_pool_size)
|
79 |
+
res_futs = [ ]
|
80 |
+
|
81 |
+
for subreddit_url in subreddit_urls:
|
82 |
+
res_futs.append(thread_pool.submit(self.fetch_subreddit_image_entries, subreddit_url, pool_manager))
|
83 |
+
|
84 |
+
res :[List[Future]] = []
|
85 |
+
|
86 |
+
for r in res_futs:
|
87 |
+
res.extend(r.result())
|
88 |
+
|
89 |
+
return list({x["image_id"] : x for x in res}.values())
|
90 |
+
|
91 |
+
def get_random_subreddit_urls(self, num_urls:int = 20):
|
92 |
+
subr_l = list(itertools.product(subreddit_name_l, subreddit_sort_l, subreddit_t_l))
|
93 |
+
return [self.get_subreddit_url(*xs, 100) for xs in random.sample(subr_l, k=num_urls)]
|
94 |
+
|
95 |
+
|
96 |
+
def get_random_subreddit_query_urls(self, num_urls:int = 20, query_type: str = "chronology"):
|
97 |
+
'''
|
98 |
+
query_type:
|
99 |
+
chronology
|
100 |
+
random_word
|
101 |
+
random_phrase
|
102 |
+
'''
|
103 |
+
timeline = random.choices(["days", "months", "years"], k = num_urls)
|
104 |
+
timevalue = random.choices(range(1, 12), k = num_urls)
|
105 |
+
subr = random.sample(subreddit_name_l, k = num_urls)
|
106 |
+
|
107 |
+
if query_type == "chronology":
|
108 |
+
return [self.get_subreddit_url(subreddit=sr, query=f"{tv} {tl} ago") for (sr, tl, tv) in list(itertools.product(subr, timeline, timevalue))]
|
109 |
+
elif query_type == "random_word":
|
110 |
+
r = RandomWords()
|
111 |
+
return [self.get_subreddit_url(subreddit=sr, query=f"{r.get_random_word()}") for sr in subr]
|
112 |
+
elif query_type == "random_phrase":
|
113 |
+
s = RandomSentence()
|
114 |
+
return [self.get_subreddit_url(subreddit=sr, query=f"{s.sentence()}") for sr in subr]
|
115 |
+
else:
|
116 |
+
return [ ]
|
117 |
+
|
118 |
+
|
119 |
+
def __call__(self, reddit_out_file: os.PathLike) -> Any:
|
120 |
+
dfname = reddit_out_file
|
121 |
+
otime = 0
|
122 |
+
|
123 |
+
tarr = []
|
124 |
+
karr = []
|
125 |
+
|
126 |
+
total_updates = 0
|
127 |
+
|
128 |
+
with tqdm(total=10000) as pbar:
|
129 |
+
for _ in range(10000):
|
130 |
+
if random.random() > 0.6:
|
131 |
+
res = self.fetch_multiple_subreddit_image_entries(self.get_random_subreddit_urls(num_urls=100))
|
132 |
+
else:
|
133 |
+
res = self.fetch_multiple_subreddit_image_entries(
|
134 |
+
self.get_random_subreddit_query_urls(num_urls=5, query_type="random_phrase"))
|
135 |
+
|
136 |
+
num_fetched = len(res)
|
137 |
+
|
138 |
+
if res:
|
139 |
+
if not Path(dfname).exists():
|
140 |
+
pd.DataFrame(res).to_csv(dfname, index=False)
|
141 |
+
karr.append(len(res))
|
142 |
+
else:
|
143 |
+
df = pd.read_csv(dfname)
|
144 |
+
keys = set(df["image_id"])
|
145 |
+
cres = [x for x in res if not (x["image_id"] in keys)]
|
146 |
+
|
147 |
+
if cres:
|
148 |
+
ndf = pd.DataFrame(cres)
|
149 |
+
ndf.to_csv(dfname, mode="a", header=None, index=False)
|
150 |
+
karr.append(len(cres))
|
151 |
+
else:
|
152 |
+
karr.append(0)
|
153 |
+
|
154 |
+
ntime = pbar.format_dict['elapsed']
|
155 |
+
N = len(pd.read_csv(dfname))
|
156 |
+
tarr.append(ntime-otime)
|
157 |
+
otime = ntime
|
158 |
+
tarr = tarr[-25:]
|
159 |
+
karr = karr[-25:]
|
160 |
+
rate = sum(karr)/sum(tarr)
|
161 |
+
pbar.update(1)
|
162 |
+
total_updates = total_updates + karr[-1]
|
163 |
+
pbar.set_description_str(f"count:{N}, fetch rate:{rate:.3f}, last_update:{karr[-1]}, total_updates:{total_updates}")
|
reddit/redditd.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
import random
|
4 |
+
import glob
|
5 |
+
from os import path as osp
|
6 |
+
from typing import Optional
|
7 |
+
from .core import WallpaperDownloader
|
8 |
+
|
9 |
+
import pandas as pd
|
10 |
+
|
11 |
+
from pathlib import Path
|
12 |
+
from types import SimpleNamespace
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
def save_as_dataframe(preferences_file, subreddit_name, parent_dir) -> None:
|
17 |
+
"""
|
18 |
+
Saves the preferences as dataframe
|
19 |
+
"""
|
20 |
+
|
21 |
+
parent_dir = Path(parent_dir)
|
22 |
+
reddit_df_path = parent_dir/"reddit.csv"
|
23 |
+
|
24 |
+
original_len = 0
|
25 |
+
|
26 |
+
try:
|
27 |
+
df = pd.read_json(preferences_file,
|
28 |
+
parse_dates=False)
|
29 |
+
rdf = df.urls.apply(pd.Series)
|
30 |
+
ndf = df.join(rdf)
|
31 |
+
ndf = ndf.drop(columns=["urls"]).reset_index()
|
32 |
+
ndf["subreddit"] = subreddit_name
|
33 |
+
ndf["downloaded"] = False
|
34 |
+
except:
|
35 |
+
return
|
36 |
+
|
37 |
+
try:
|
38 |
+
if reddit_df_path.exists():
|
39 |
+
tdf = pd.read_csv(reddit_df_path)
|
40 |
+
original_len = len(tdf)
|
41 |
+
tdf = pd.concat([tdf, ndf], axis=0).drop_duplicates(subset=['index'])
|
42 |
+
else:
|
43 |
+
tdf = ndf
|
44 |
+
except (FileNotFoundError, pd.errors.EmptyDataError) as err:
|
45 |
+
tdf = ndf
|
46 |
+
|
47 |
+
new_len = len(tdf)
|
48 |
+
if new_len != original_len:
|
49 |
+
tdf.to_csv(reddit_df_path, index=False)
|
50 |
+
|
51 |
+
|
52 |
+
def rdownloader(d : Optional[SimpleNamespace] = None):
|
53 |
+
"""
|
54 |
+
Runs the main code. Downloads images, set and switches wallpapers.
|
55 |
+
"""
|
56 |
+
|
57 |
+
if d is None:
|
58 |
+
# Process user inputs.
|
59 |
+
args = parse_args()
|
60 |
+
else:
|
61 |
+
args = d
|
62 |
+
|
63 |
+
parent_dir = Path(args.save_dir).expanduser()
|
64 |
+
save_dir = parent_dir.expanduser() / args.subreddit
|
65 |
+
save_dir.mkdir(parents=True, exist_ok=True)
|
66 |
+
save_dir = str(save_dir)
|
67 |
+
|
68 |
+
# Setup the downloader.
|
69 |
+
downloader = WallpaperDownloader(subreddit=args.subreddit,
|
70 |
+
sort_time=args.sort_time,
|
71 |
+
sort_by=args.sort_by,
|
72 |
+
save_dir=save_dir)
|
73 |
+
|
74 |
+
# Download and return the total number of downloads.
|
75 |
+
total_downloaded = downloader.download(max_count=args.max_download_count)
|
76 |
+
save_as_dataframe(downloader._preferences_file, args.subreddit, parent_dir)
|
77 |
+
|
78 |
+
#print('Downloaded {} images from /r/{} sorted by {} for sort time {}'.format(total_downloaded,
|
79 |
+
# args.subreddit,
|
80 |
+
# args.sort_by,
|
81 |
+
# args.sort_time))
|
82 |
+
|
83 |
+
if args.download_only:
|
84 |
+
return args.subreddit, total_downloaded
|
85 |
+
|
86 |
+
# Now setup wallpaper randomly from the wallpaper folder.
|
87 |
+
preferences = downloader.preferences
|
88 |
+
wallpapers = glob.glob(osp.join(preferences['wallpaper_dir'], '*.jpg'))
|
89 |
+
random.shuffle(wallpapers)
|
90 |
+
|
91 |
+
total_iters = int((args.run_for * 60) / args.update_every)
|
92 |
+
|
93 |
+
assert total_iters >= 1, "See help for run_for and update_every"
|
94 |
+
|
95 |
+
if total_iters >= len(wallpapers):
|
96 |
+
to_extend = int(total_iters/len(wallpapers)) + 1
|
97 |
+
wallpapers_copy = []
|
98 |
+
for _ in range(to_extend):
|
99 |
+
wallpapers_copy.extend(wallpapers.copy())
|
100 |
+
wallpapers = wallpapers_copy
|
101 |
+
|
102 |
+
for i in range(total_iters):
|
103 |
+
if preferences['os_type'] == 'Linux':
|
104 |
+
filepath = "gsettings set org.gnome.desktop.background picture-uri file:" + wallpapers[i]
|
105 |
+
elif preferences['os_type'] == 'Darwin':
|
106 |
+
filepath = "osascript -e 'tell application \"Finder\" to set desktop picture to POSIX file \"" + \
|
107 |
+
wallpapers[i] + "\"'"
|
108 |
+
else:
|
109 |
+
raise NotImplementedError('Implemented only for Linux and Mac. ')
|
110 |
+
|
111 |
+
os.system(filepath)
|
112 |
+
|
113 |
+
|
114 |
+
def parse_args():
|
115 |
+
"""
|
116 |
+
Fetch user inputs from command line.
|
117 |
+
"""
|
118 |
+
parser = argparse.ArgumentParser(
|
119 |
+
description='Download images and set wallpaper from your choice of subreddit!')
|
120 |
+
parser.add_argument('--subreddit', type=str, default='wallpaper',
|
121 |
+
help='Your choice of subreddit to download Images. Default is "wallpaper".')
|
122 |
+
parser.add_argument('--sort_by', type=str, default='hot',
|
123 |
+
help='Select sort-by option. Default is "hot". Options: hot|new|rising|top')
|
124 |
+
parser.add_argument('--sort_time', type=str, default='day',
|
125 |
+
help='Sort time for subreddit. Default is "day". Options: all|year|month|week|day')
|
126 |
+
parser.add_argument('--max_download_count', type=int, default=20,
|
127 |
+
help='Maximum number of images to download. Default is 20.')
|
128 |
+
parser.add_argument('--save_dir', type=str, default='~/me/data/reddit',
|
129 |
+
help='Where to save downloaded images? '
|
130 |
+
'By default it saves at machine default wallpapers folder')
|
131 |
+
parser.add_argument('--download_only', action='store_true', default=False,
|
132 |
+
help='Only download the photos. Skip setting up wallpaper.')
|
133 |
+
parser.add_argument('--update_every', type=int, default=30,
|
134 |
+
help='Update the wallpaper after how many mins? Default is 30 mins.')
|
135 |
+
parser.add_argument('--run_for', type=int, default=24,
|
136 |
+
help='How long you want to keep updating the wallpaper? Default ius 24 hours.')
|
137 |
+
args = parser.parse_args()
|
138 |
+
return args
|
139 |
+
|
140 |
+
|
141 |
+
if __name__ == '__main__':
|
142 |
+
rdownloader()
|
reddit/subreddits.py
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
SUBREDDITS = list(dict.fromkeys([
|
2 |
+
'BollywoodUHQonly',
|
3 |
+
|
4 |
+
'Wallpapers',
|
5 |
+
'wallpaper',
|
6 |
+
|
7 |
+
'CityPorn',
|
8 |
+
'VillagePorn',
|
9 |
+
'RuralPorn',
|
10 |
+
'ArchitecturePorn',
|
11 |
+
'HousePorn',
|
12 |
+
'CabinPorn',
|
13 |
+
'ChurchPorn',
|
14 |
+
'AbandonedPorn',
|
15 |
+
'CemeteryPorn',
|
16 |
+
'InfrastructurePorn',
|
17 |
+
'MachinePorn',
|
18 |
+
'carporn',
|
19 |
+
'F1Porn',
|
20 |
+
'MotorcyclePorn',
|
21 |
+
'MilitaryPorn',
|
22 |
+
'GunPorn',
|
23 |
+
'KnifePorn',
|
24 |
+
'BoatPorn',
|
25 |
+
'RidesPorn',
|
26 |
+
'DestructionPorn',
|
27 |
+
'ThingsCutInHalfPorn',
|
28 |
+
'StarshipPorn',
|
29 |
+
'ToolPorn',
|
30 |
+
'TechnologyPorn',
|
31 |
+
'BridgePorn',
|
32 |
+
'PolicePorn',
|
33 |
+
'SteamPorn',
|
34 |
+
'RetailPorn',
|
35 |
+
'SpaceFlightPorn',
|
36 |
+
'roadporn',
|
37 |
+
'drydockporn',
|
38 |
+
'AnimalPorn',
|
39 |
+
'HumanPorn',
|
40 |
+
'EarthlingPorn',
|
41 |
+
'AdrenalinePorn',
|
42 |
+
'ClimbingPorn',
|
43 |
+
'SportsPorn',
|
44 |
+
'AgriculturePorn',
|
45 |
+
'TeaPorn',
|
46 |
+
'BonsaiPorn',
|
47 |
+
'FoodPorn',
|
48 |
+
'CulinaryPorn',
|
49 |
+
'DessertPorn',
|
50 |
+
'DesignPorn',
|
51 |
+
'RoomPorn',
|
52 |
+
'AlbumArtPorn',
|
53 |
+
'MetalPorn',
|
54 |
+
'MoviePosterPorn',
|
55 |
+
'TelevisionPosterPorn',
|
56 |
+
'ComicBookPorn',
|
57 |
+
'StreetArtPorn',
|
58 |
+
'AdPorn',
|
59 |
+
'ArtPorn',
|
60 |
+
'FractalPorn',
|
61 |
+
'InstrumentPorn',
|
62 |
+
'ExposurePorn',
|
63 |
+
'MacroPorn',
|
64 |
+
'MicroPorn',
|
65 |
+
'GeekPorn',
|
66 |
+
'MTGPorn',
|
67 |
+
'GamerPorn',
|
68 |
+
'PowerWashingPorn',
|
69 |
+
'AerialPorn',
|
70 |
+
'OrganizationPorn',
|
71 |
+
'FashionPorn',
|
72 |
+
'AVPorn',
|
73 |
+
'ApocalypsePorn',
|
74 |
+
'InfraredPorn',
|
75 |
+
'ViewPorn',
|
76 |
+
'HellscapePorn',
|
77 |
+
'sculptureporn',
|
78 |
+
|
79 |
+
'HighResPrints',
|
80 |
+
'SpaceWallpapers',
|
81 |
+
|
82 |
+
'ArtefactPorn',
|
83 |
+
'MegalithPorn',
|
84 |
+
'FossilPorn',
|
85 |
+
'FuturePorn',
|
86 |
+
'QuotesPorn',
|
87 |
+
'NewsPorn',
|
88 |
+
'BookPorn',
|
89 |
+
'UniformPorn',
|
90 |
+
'HistoryPorn',
|
91 |
+
'AutumnPorn',
|
92 |
+
'EarthPorn',
|
93 |
+
'SkyPorn',
|
94 |
+
'SeaPorn',
|
95 |
+
'WeatherPorn',
|
96 |
+
'BotanicalPorn',
|
97 |
+
'LakePorn',
|
98 |
+
'BeachPorn',
|
99 |
+
'WaterPorn',
|
100 |
+
'SpacePorn',
|
101 |
+
'FirePorn',
|
102 |
+
'DesertPorn',
|
103 |
+
'WinterPorn',
|
104 |
+
'GeologyPorn',
|
105 |
+
'MushroomPorn',
|
106 |
+
'SpringPorn',
|
107 |
+
'SummerPorn',
|
108 |
+
'LavaPorn',
|
109 |
+
'multiwall',
|
110 |
+
'offensive_wallpapers',
|
111 |
+
'HI_Res',
|
112 |
+
'Forest',
|
113 |
+
'ArtFundamentals',
|
114 |
+
'Art',
|
115 |
+
'HighResCelebs',
|
116 |
+
'CelebEvents',
|
117 |
+
'UltraHighResCelebs',
|
118 |
+
'UHQcelebs',
|
119 |
+
'celebrityboners',
|
120 |
+
'HRCelebs',
|
121 |
+
'UHQDesi',
|
122 |
+
'4kcelebs',
|
123 |
+
'UltraHighResBollywood',
|
124 |
+
'Astronomy',
|
125 |
+
'MakeupAddiction',
|
126 |
+
'ultrahdwallpapers',
|
127 |
+
'FlowerPorn',
|
128 |
+
'mountainporn',
|
129 |
+
'RiverPorn',
|
130 |
+
'F1Porn',
|
131 |
+
'jameswebb',
|
132 |
+
'randomsexiness',
|
133 |
+
'gentlemanboners',
|
134 |
+
'BeautifulFemales',
|
135 |
+
'RoyalWarReport',
|
136 |
+
'DesiPetiteCelebs',
|
137 |
+
'travelphotos',
|
138 |
+
'IncredibleIndia',
|
139 |
+
'interestingasfuck',
|
140 |
+
'MuseumPorn',
|
141 |
+
'Fruitgore',
|
142 |
+
'FestivalSluts',
|
143 |
+
'festivals',
|
144 |
+
'AlternateAngles',
|
145 |
+
'ColorizedHistory',
|
146 |
+
'MoviemaniaHQ',
|
147 |
+
'IndianFoodPhotos',
|
148 |
+
|
149 |
+
'surrealism',
|
150 |
+
|
151 |
+
'Comics',
|
152 |
+
'SpecArt',
|
153 |
+
|
154 |
+
'WTF',
|
155 |
+
|
156 |
+
'OldSchoolCool',
|
157 |
+
|
158 |
+
'pics',
|
159 |
+
'wwiipics',
|
160 |
+
'travelpics',
|
161 |
+
|
162 |
+
'photography',
|
163 |
+
'fashionphotography',
|
164 |
+
'productphotography',
|
165 |
+
'travelphotography',
|
166 |
+
|
167 |
+
'photocritique',
|
168 |
+
'itookapicture',
|
169 |
+
'PictureChallenge',
|
170 |
+
'photoit',
|
171 |
+
'postprocessing',
|
172 |
+
|
173 |
+
|
174 |
+
'outside',
|
175 |
+
|
176 |
+
'TheWayWeWere',
|
177 |
+
|
178 |
+
'Cinemagraphs',
|
179 |
+
|
180 |
+
'BlackPeopleTwitter',
|
181 |
+
|
182 |
+
'WtSSTaDaMiT',
|
183 |
+
|
184 |
+
'Eyebleach',
|
185 |
+
|
186 |
+
'me_irl',
|
187 |
+
|
188 |
+
'Posters',
|
189 |
+
'PropagandaPosters',
|
190 |
+
'PlexPosters',
|
191 |
+
|
192 |
+
'painting',
|
193 |
+
]))
|
194 |
+
|
reddit/utils.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import platform
|
4 |
+
import pwd
|
5 |
+
import time
|
6 |
+
from typing import List, Dict
|
7 |
+
from os import path as osp
|
8 |
+
|
9 |
+
import urllib.request as urllib2 # For Python 3.x
|
10 |
+
|
11 |
+
class Utils:
|
12 |
+
"""
|
13 |
+
Bunch of utils used for the Reddit Downloader.
|
14 |
+
"""
|
15 |
+
|
16 |
+
@staticmethod
|
17 |
+
def save_to_preferences(preferences: Dict[str, Dict], preferences_file: str):
|
18 |
+
"""
|
19 |
+
Save the preferences to a JSON file.
|
20 |
+
:param preferences: Dict containing preferences to save to file.
|
21 |
+
:param preferences_file: Location of the file where you want to save.
|
22 |
+
"""
|
23 |
+
with open(preferences_file, 'w') as f:
|
24 |
+
json.dump(preferences, f)
|
25 |
+
|
26 |
+
@staticmethod
|
27 |
+
def load_preferences(preferences_file: str) -> Dict:
|
28 |
+
"""
|
29 |
+
Load the preferences from JSON file and return as Dict.
|
30 |
+
:param preferences_file: Location of the file containing the preferences.
|
31 |
+
:return: preferences - Dict containing preferences to save to file.
|
32 |
+
"""
|
33 |
+
with open(preferences_file, 'r') as f:
|
34 |
+
preferences = json.load(f)
|
35 |
+
|
36 |
+
return preferences
|
37 |
+
|
38 |
+
@staticmethod
|
39 |
+
def get_os():
|
40 |
+
"""
|
41 |
+
Get the OS type (Linux or Macbook), and set the wallpaper folder accordingly.
|
42 |
+
:return:
|
43 |
+
os_type: Type of OS (Linux for Linux, Darwin for Mac).
|
44 |
+
wallpapers_directory: Directory where the wallpapers will be saved.
|
45 |
+
"""
|
46 |
+
os_type = platform.system()
|
47 |
+
assert os_type in {'Darwin', 'Linux'}
|
48 |
+
|
49 |
+
# Get the username
|
50 |
+
username = pwd.getpwuid(os.getuid()).pw_name
|
51 |
+
|
52 |
+
# Set the directory to download images.
|
53 |
+
wallpapers_directory = '/Users/{}/Pictures/Wallpapers/'.format(username) if os_type == 'Darwin' \
|
54 |
+
else '/home/{}/Wallpapers/'.format(username)
|
55 |
+
|
56 |
+
return platform.system(), wallpapers_directory
|
57 |
+
|
58 |
+
@staticmethod
|
59 |
+
def remove_unwanted_images(images: List[str]):
|
60 |
+
"""
|
61 |
+
Remove unwanted images. Since this is a naive approach, we might end up downloading some unwanted images, so we
|
62 |
+
delete them.
|
63 |
+
:param images: List of image file locations to filter and remove unwanted images from.
|
64 |
+
"""
|
65 |
+
count_removed = 0
|
66 |
+
for image in images:
|
67 |
+
# These are some random html pages that might have been downloaded.
|
68 |
+
# This is a fairly quick and naive approach to downloading images from reddit.
|
69 |
+
if osp.getsize(image) < 102400:
|
70 |
+
os.remove(image)
|
71 |
+
count_removed += 1
|
72 |
+
return count_removed
|
73 |
+
|
74 |
+
@staticmethod
|
75 |
+
def fetch_subreddit_data(subreddit_url: str, max_trials: int = 20) -> Dict:
|
76 |
+
"""
|
77 |
+
Fetch the subreddit JSON page based on the URL.
|
78 |
+
:param subreddit_url: URL created based on user inputs (subreddit, sort_type, sort_time, max_download_count).
|
79 |
+
:param max_trials: Maximum number of trial to use for fetching the subreddit JSON data.
|
80 |
+
:return: subreddit_data - Nested Dict containing Subreddit data for query.
|
81 |
+
"""
|
82 |
+
|
83 |
+
subreddit_data = None
|
84 |
+
for _ in range(max_trials):
|
85 |
+
try:
|
86 |
+
subreddit_page = urllib2.urlopen(subreddit_url)
|
87 |
+
subreddit_data = json.load(subreddit_page)
|
88 |
+
break
|
89 |
+
except:
|
90 |
+
time.sleep(0.2) # If we cannot access the reddit page, we wait for 2 seconds and retry.
|
91 |
+
|
92 |
+
return subreddit_data
|
reddit_urls_10k.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
reddit_urls_50k.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
rich
|
run_reddit.old.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#%%
|
2 |
+
from pathlib import Path
|
3 |
+
from pyparsing import Any
|
4 |
+
from types import SimpleNamespace
|
5 |
+
|
6 |
+
import time
|
7 |
+
import random
|
8 |
+
import filetype
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
+
from rich.live import Live
|
12 |
+
from rich.table import Table
|
13 |
+
|
14 |
+
from reddit import rdownloader, SUBREDDITS
|
15 |
+
|
16 |
+
class UCB:
|
17 |
+
def __init__(self, K) -> None:
|
18 |
+
self.K = K
|
19 |
+
self.xs = np.zeros(self.K)
|
20 |
+
self.Ts = np.zeros(self.K)
|
21 |
+
self.delta = 1./self.K
|
22 |
+
self.ucb = np.empty(self.K)
|
23 |
+
self.ucb.fill(10000.)
|
24 |
+
|
25 |
+
def update(self, idx, reward):
|
26 |
+
self.xs[idx] += reward
|
27 |
+
self.Ts[idx] += 1
|
28 |
+
self.ucb[idx] = (self.xs[idx]/self.Ts[idx]) + 2*np.sqrt(1./self.delta*(self.Ts[idx]))
|
29 |
+
|
30 |
+
def get_index(self):
|
31 |
+
return np.argmax(self.ucb)
|
32 |
+
|
33 |
+
|
34 |
+
class RedditDownloader:
|
35 |
+
def initialize_subreddit_counter(self):
|
36 |
+
for subreddit in SUBREDDITS:
|
37 |
+
subreddit_folder = self.reddit_root_dir / subreddit
|
38 |
+
|
39 |
+
imfiles = [imfile for imfile in subreddit_folder.glob('*') if filetype.is_image(imfile)]
|
40 |
+
|
41 |
+
self.subreddit_counter[subreddit] = len(imfiles)
|
42 |
+
#self.delay_counter[subreddit] = 0
|
43 |
+
self.changed_status[subreddit] = False
|
44 |
+
|
45 |
+
def __init__(self) -> None:
|
46 |
+
self.reddit_root_dir = Path("~/me/data/reddit").expanduser()
|
47 |
+
self.subreddit_counter = { }
|
48 |
+
self.changed_status = {}
|
49 |
+
|
50 |
+
self.initialize_subreddit_counter()
|
51 |
+
|
52 |
+
def generate_table(self, overall_run_status = True, subreddit = None, running = False) -> Table:
|
53 |
+
"""Make a new table."""
|
54 |
+
table = Table()
|
55 |
+
|
56 |
+
overall_run_status_col = "green" if overall_run_status else "yellow"
|
57 |
+
|
58 |
+
table.add_column(f"[{overall_run_status_col}]Subreddit")
|
59 |
+
table.add_column("Total Images")
|
60 |
+
|
61 |
+
total = 0
|
62 |
+
|
63 |
+
for subr, kount in self.subreddit_counter.items():
|
64 |
+
changed_num_col= "orchid" if self.changed_status[subr] else "blue"
|
65 |
+
|
66 |
+
if subr != subreddit:
|
67 |
+
table.add_row(f"{subr}", f"[{changed_num_col}]{kount}")
|
68 |
+
else:
|
69 |
+
if running == False:
|
70 |
+
table.add_row(f"[magenta3]{subr}", f"[{changed_num_col}]{kount}")
|
71 |
+
else:
|
72 |
+
table.add_row(f"[green]{subr}", f"[{changed_num_col}]{kount}")
|
73 |
+
total += kount
|
74 |
+
|
75 |
+
table.add_section()
|
76 |
+
table.add_row(f"[silver]Total", f"[blue]{total}")
|
77 |
+
|
78 |
+
return table
|
79 |
+
|
80 |
+
|
81 |
+
def __call__(self, *args: Any, **kwds: Any) -> Any:
|
82 |
+
|
83 |
+
run_list = set(SUBREDDITS)
|
84 |
+
|
85 |
+
delays = 0
|
86 |
+
|
87 |
+
with Live(self.generate_table(overall_run_status=True), auto_refresh=False, refresh_per_second=0.01) as live:
|
88 |
+
|
89 |
+
while True:
|
90 |
+
|
91 |
+
chosen_sort_by = random.choice(['new', 'hot', 'top', 'rising'])
|
92 |
+
chosen_sort_time = random.choice(['day', 'week', 'month', 'year', 'all'])
|
93 |
+
|
94 |
+
for subreddit in SUBREDDITS:
|
95 |
+
self.changed_status[subreddit] = False
|
96 |
+
|
97 |
+
for subreddit in SUBREDDITS:
|
98 |
+
|
99 |
+
if subreddit not in run_list: continue
|
100 |
+
|
101 |
+
overall_run_status = len(run_list) > 0
|
102 |
+
|
103 |
+
live.update(self.generate_table(overall_run_status, subreddit, running=True), refresh=True)
|
104 |
+
d = { }
|
105 |
+
d["subreddit"] = subreddit
|
106 |
+
d["sort_by"] = chosen_sort_by
|
107 |
+
d["sort_time"] = chosen_sort_time
|
108 |
+
d["max_download_count"] = 200
|
109 |
+
d["save_dir"] = self.reddit_root_dir
|
110 |
+
d["download_only"] = True
|
111 |
+
d["update_every"] = 20
|
112 |
+
d["run_for"] = 24
|
113 |
+
d = SimpleNamespace(**d)
|
114 |
+
rsubreddit, total_downloaded = rdownloader(d)
|
115 |
+
|
116 |
+
if subreddit in self.subreddit_counter:
|
117 |
+
self.subreddit_counter[subreddit] += total_downloaded
|
118 |
+
else:
|
119 |
+
self.subreddit_counter[subreddit] = total_downloaded
|
120 |
+
|
121 |
+
if total_downloaded == 0:
|
122 |
+
run_list.remove(subreddit)
|
123 |
+
|
124 |
+
self.changed_status[subreddit] = (total_downloaded > 0)
|
125 |
+
|
126 |
+
overall_run_status = len(run_list) > 0
|
127 |
+
|
128 |
+
live.update(self.generate_table(overall_run_status, subreddit, running=False), refresh=True)
|
129 |
+
|
130 |
+
if len(run_list) == 0:
|
131 |
+
time.sleep(120)
|
132 |
+
delays = 0
|
133 |
+
run_list = set(SUBREDDITS)
|
134 |
+
else:
|
135 |
+
delays += 30
|
136 |
+
time.sleep(30)
|
137 |
+
if (delays > 120):
|
138 |
+
run_list = set(SUBREDDITS)
|
139 |
+
if run_list == set(SUBREDDITS):
|
140 |
+
delays = 0
|
141 |
+
|
142 |
+
|
143 |
+
if __name__ == "__main__":
|
144 |
+
RedditDownloader()()
|
145 |
+
|
146 |
+
# %%
|