parquet-converter commited on
Commit
f5dd3a1
1 Parent(s): 3f776fe

Update parquet files

Browse files
Notebooks.txt DELETED
@@ -1,3 +0,0 @@
1
- https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks/Fast-Dreambooth-v1.5.ipynb
2
- https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks/Fast-Dreambooth-v2.ipynb
3
- https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks/Fast-SD-A1111.ipynb
 
 
 
 
Notebooks/Fast-Dreambooth-v1.5.ipynb DELETED
@@ -1,400 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "id": "494d5ce4-5843-4d70-ae96-c1983e21b6e8",
6
- "metadata": {},
7
- "source": [
8
- "## Dreambooth v1.5 Paperspace Notebook From https://github.com/TheLastBen/fast-stable-diffusion, if you encounter any issues, feel free to discuss them. [Support](https://ko-fi.com/thelastben)"
9
- ]
10
- },
11
- {
12
- "cell_type": "markdown",
13
- "id": "8afdca63-eff3-4a9d-b4d9-127c0f028033",
14
- "metadata": {
15
- "tags": []
16
- },
17
- "source": [
18
- "# Dependencies"
19
- ]
20
- },
21
- {
22
- "cell_type": "code",
23
- "execution_count": null,
24
- "id": "be74b2d5-da96-4bf4-ae82-4fe4b8abc04c",
25
- "metadata": {
26
- "tags": []
27
- },
28
- "outputs": [],
29
- "source": [
30
- "# Install the dependencies\n",
31
- "\n",
32
- "force_reinstall= False\n",
33
- "\n",
34
- "# Set to true only if you want to install the dependencies again.\n",
35
- "\n",
36
- "\n",
37
- "#--------------------\n",
38
- "with open('/dev/null', 'w') as devnull:import requests, os, time, importlib;open('/notebooks/mainpaperspacev1.py', 'wb').write(requests.get('https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Scripts/mainpaperspacev1.py').content); os.chdir('/notebooks');time.sleep(3);import mainpaperspacev1;importlib.reload(mainpaperspacev1);from mainpaperspacev1 import *;Deps(force_reinstall)"
39
- ]
40
- },
41
- {
42
- "cell_type": "markdown",
43
- "id": "7a4ef4a2-6863-4603-9254-a1e2a547ee38",
44
- "metadata": {
45
- "tags": []
46
- },
47
- "source": [
48
- "# Download the model"
49
- ]
50
- },
51
- {
52
- "cell_type": "code",
53
- "execution_count": null,
54
- "id": "a1ba734e-515b-4761-8c88-ef7f165d7971",
55
- "metadata": {
56
- "tags": []
57
- },
58
- "outputs": [],
59
- "source": [
60
- "#Leave everything EMPTY to use the original model\n",
61
- "\n",
62
- "Path_to_HuggingFace= \"\"\n",
63
- "\n",
64
- "# Load and finetune a model from Hugging Face, use the format \"profile/model\" like : runwayml/stable-diffusion-v1-5\n",
65
- "\n",
66
- "\n",
67
- "CKPT_Path = \"\"\n",
68
- "\n",
69
- "# Load a CKPT model from the storage.\n",
70
- "\n",
71
- "\n",
72
- "CKPT_Link = \"\"\n",
73
- "\n",
74
- "# A CKPT direct link, huggingface CKPT link or a shared CKPT from gdrive.\n",
75
- "\n",
76
- "\n",
77
- "#----------------\n",
78
- "MODEL_NAME=dl(Path_to_HuggingFace, CKPT_Path, CKPT_Link)"
79
- ]
80
- },
81
- {
82
- "cell_type": "markdown",
83
- "id": "4c6c4932-e614-4f5e-8d4a-4feca5ce54f5",
84
- "metadata": {},
85
- "source": [
86
- "# Create/Load a Session"
87
- ]
88
- },
89
- {
90
- "cell_type": "code",
91
- "execution_count": null,
92
- "id": "b6595c37-8ad2-45ff-a055-fe58c6663d2f",
93
- "metadata": {
94
- "tags": []
95
- },
96
- "outputs": [],
97
- "source": [
98
- "Session_Name = \"\"\n",
99
- "\n",
100
- "# Enter the session name, it if it exists, it will load it, otherwise it'll create an new session.\n",
101
- "\n",
102
- "\n",
103
- "Session_Link_optional = \"\"\n",
104
- "\n",
105
- "# Import a session from another gdrive, the shared gdrive link must point to the specific session's folder that contains the trained CKPT, remove any intermediary CKPT if any.\n",
106
- "\n",
107
- "\n",
108
- "#-----------------\n",
109
- "[PT, WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, CONCEPT_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAME, resume]=sess(Session_Name, Session_Link_optional, MODEL_NAME if 'MODEL_NAME' in locals() else \"\")"
110
- ]
111
- },
112
- {
113
- "cell_type": "markdown",
114
- "id": "5698de61-08d3-4d90-83ef-f882ed956d01",
115
- "metadata": {},
116
- "source": [
117
- "# Instance Images"
118
- ]
119
- },
120
- {
121
- "cell_type": "code",
122
- "execution_count": null,
123
- "id": "bc2f8f28-226e-45b8-8257-804bbb711f56",
124
- "metadata": {
125
- "tags": []
126
- },
127
- "outputs": [],
128
- "source": [
129
- "Remove_existing_instance_images= True\n",
130
- "\n",
131
- "# Set to False to keep the existing instance images if any.\n",
132
- "\n",
133
- "\n",
134
- "IMAGES_FOLDER_OPTIONAL=\"\"\n",
135
- "\n",
136
- "# If you prefer to specify directly the folder of the pictures instead of uploading, this will add the pictures to the existing (if any) instance images. Leave EMPTY to upload.\n",
137
- "\n",
138
- "\n",
139
- "Smart_crop_images= True\n",
140
- "\n",
141
- "# Automatically crop your input images.\n",
142
- "\n",
143
- "\n",
144
- "Crop_size = 512\n",
145
- "\n",
146
- "# Choices: \"512\", \"576\", \"640\", \"704\", \"768\", \"832\", \"896\", \"960\", \"1024\"\n",
147
- "\n",
148
- "# Check out this example for naming : https://i.imgur.com/d2lD3rz.jpeg\n",
149
- "\n",
150
- "\n",
151
- "#-----------------\n",
152
- "uplder(Remove_existing_instance_images, Smart_crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, False)"
153
- ]
154
- },
155
- {
156
- "cell_type": "markdown",
157
- "id": "0e93924f-a6bf-45d5-aa77-915ad7385dcd",
158
- "metadata": {},
159
- "source": [
160
- "# Manual Captioning"
161
- ]
162
- },
163
- {
164
- "cell_type": "code",
165
- "execution_count": null,
166
- "id": "c5dbcb29-b42f-4cfc-9be8-83355838d5a2",
167
- "metadata": {
168
- "tags": []
169
- },
170
- "outputs": [],
171
- "source": [
172
- "# Open a tool to manually caption the instance images.\n",
173
- "\n",
174
- "#-----------------\n",
175
- "caption(CAPTIONS_DIR, INSTANCE_DIR)"
176
- ]
177
- },
178
- {
179
- "cell_type": "markdown",
180
- "id": "c90140c1-6c91-4cae-a222-e1a746957f95",
181
- "metadata": {},
182
- "source": [
183
- "# Concept Images"
184
- ]
185
- },
186
- {
187
- "cell_type": "code",
188
- "execution_count": null,
189
- "id": "55c27688-8601-4943-b61d-fc48b9ded067",
190
- "metadata": {},
191
- "outputs": [],
192
- "source": [
193
- "Remove_existing_concept_images= True\n",
194
- "\n",
195
- "# Set to False to keep the existing concept images if any.\n",
196
- "\n",
197
- "\n",
198
- "IMAGES_FOLDER_OPTIONAL=\"\"\n",
199
- "\n",
200
- "# If you prefer to specify directly the folder of the pictures instead of uploading, this will add the pictures to the existing (if any) concept images. Leave EMPTY to upload.\n",
201
- "\n",
202
- "\n",
203
- "#-----------------\n",
204
- "uplder(Remove_existing_concept_images, True, 512, IMAGES_FOLDER_OPTIONAL, CONCEPT_DIR, CAPTIONS_DIR, True)"
205
- ]
206
- },
207
- {
208
- "cell_type": "markdown",
209
- "id": "2a4aa42a-fd68-41ad-9ba7-da99f834e2c1",
210
- "metadata": {},
211
- "source": [
212
- "# Dreambooth"
213
- ]
214
- },
215
- {
216
- "cell_type": "code",
217
- "execution_count": null,
218
- "id": "612d8335-b984-4f34-911d-5457ff98e507",
219
- "metadata": {},
220
- "outputs": [],
221
- "source": [
222
- "Resume_Training = False\n",
223
- "\n",
224
- "# If you're not satisfied with the result, Set to True, run again the cell and it will continue training the current model.\n",
225
- "\n",
226
- "\n",
227
- "UNet_Training_Steps=1500\n",
228
- "\n",
229
- "UNet_Learning_Rate = \"4e-6\"\n",
230
- "\n",
231
- "# If you use 10 images, use 1500 steps, if you're not satisfied with the result, resume training for another 200 steps, and so on ...\n",
232
- "\n",
233
- "\n",
234
- "Text_Encoder_Training_Steps=300\n",
235
- "\n",
236
- "Text_Encoder_Learning_Rate= \"1e-6\"\n",
237
- "\n",
238
- "# 350-600 steps is enough for a small dataset, keep this number small to avoid overfitting, set to 0 to disable, set it to 0 before resuming training if it is already trained.\n",
239
- "\n",
240
- "\n",
241
- "Text_Encoder_Concept_Training_Steps=0\n",
242
- "\n",
243
- "# Suitable for training a style/concept as it acts as regularization, with a minimum of 300 steps, 1 step/image is enough to train the concept(s), set to 0 to disable, set both the settings above to 0 to fintune only the text_encoder on the concept, set it to 0 before resuming training if it is already trained.\n",
244
- "\n",
245
- "\n",
246
- "External_Captions= False\n",
247
- "\n",
248
- "# Get the captions from a text file for each instance image.\n",
249
- "\n",
250
- "\n",
251
- "Style_Training=False\n",
252
- "\n",
253
- "# Further reduce overfitting, suitable when training a style or a general theme, don't check the box at the beginning, check it after training for at least 800 steps. (Has no effect when using External Captions)\n",
254
- "\n",
255
- "\n",
256
- "Resolution = 512\n",
257
- "\n",
258
- "# Choices : \"512\", \"576\", \"640\", \"704\", \"768\", \"832\", \"896\", \"960\", \"1024\"\n",
259
- "# Higher resolution = Higher quality, make sure the instance images are cropped to this selected size (or larger).\n",
260
- "\n",
261
- "#---------------------------------------------------------------\n",
262
- "\n",
263
- "Save_Checkpoint_Every_n_Steps = False\n",
264
- "\n",
265
- "Save_Checkpoint_Every=500\n",
266
- "\n",
267
- "# Minimum 200 steps between each save.\n",
268
- "\n",
269
- "\n",
270
- "Start_saving_from_the_step=500\n",
271
- "\n",
272
- "# Start saving intermediary checkpoints from this step.\n",
273
- "\n",
274
- "\n",
275
- "#-----------------\n",
276
- "resume=dbtrain(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Style_Training, Resolution, MODEL_NAME, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resume, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every)"
277
- ]
278
- },
279
- {
280
- "cell_type": "markdown",
281
- "id": "bf6f2232-60b3-41c5-bea6-b0dcc4aef937",
282
- "metadata": {},
283
- "source": [
284
- "# Test the Trained Model"
285
- ]
286
- },
287
- {
288
- "cell_type": "code",
289
- "execution_count": null,
290
- "id": "1263a084-b142-4e63-a0aa-2706673a4355",
291
- "metadata": {},
292
- "outputs": [],
293
- "source": [
294
- "Previous_Session_Name=\"\"\n",
295
- "\n",
296
- "# Leave empty if you want to use the current trained model.\n",
297
- "\n",
298
- "\n",
299
- "Custom_Path = \"\"\n",
300
- "\n",
301
- "# Input the full path to a desired model.\n",
302
- "\n",
303
- "\n",
304
- "User = \"\"\n",
305
- "\n",
306
- "Password= \"\"\n",
307
- "\n",
308
- "# Add credentials to your Gradio interface (optional).\n",
309
- "\n",
310
- "\n",
311
- "Use_localtunnel = False\n",
312
- "\n",
313
- "# If you have trouble using Gradio server, use this one.\n",
314
- "\n",
315
- "\n",
316
- "#-----------------\n",
317
- "configf=test(Custom_Path, Previous_Session_Name, Session_Name, User, Password, Use_localtunnel) if 'Session_Name' in locals() else test(Custom_Path, Previous_Session_Name, \"\", User, Password, Use_localtunnel)\n",
318
- "!python /notebooks/sd/stable-diffusion-webui/webui.py $configf"
319
- ]
320
- },
321
- {
322
- "cell_type": "markdown",
323
- "id": "53ccbcaf-3319-44f5-967b-ecbdfa9d0e78",
324
- "metadata": {},
325
- "source": [
326
- "# Upload The Trained Model to Hugging Face"
327
- ]
328
- },
329
- {
330
- "cell_type": "code",
331
- "execution_count": null,
332
- "id": "2c9cb205-d828-4e51-9943-f337bd410ea8",
333
- "metadata": {},
334
- "outputs": [],
335
- "source": [
336
- "#Save it to your personal profile or collaborate to the public [library of concepts](https://huggingface.co/sd-dreambooth-library)\n",
337
- "\n",
338
- "Name_of_your_concept = \"\"\n",
339
- "\n",
340
- "# Leave empty if you want to name your concept the same as the current session.\n",
341
- "\n",
342
- "\n",
343
- "Save_concept_to = \"My_Profile\"\n",
344
- "\n",
345
- "# Choices : \"Public_Library\", \"My_Profile\".\n",
346
- "\n",
347
- "\n",
348
- "hf_token_write = \"\"\n",
349
- "\n",
350
- "# Create a write access token here : https://huggingface.co/settings/tokens, go to \"New token\" -> Role : Write, a regular read token won't work here.\n",
351
- "\n",
352
- "\n",
353
- "#---------------------------------\n",
354
- "hf(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, OUTPUT_DIR, Session_Name, MDLPTH)"
355
- ]
356
- },
357
- {
358
- "cell_type": "markdown",
359
- "id": "881d80a3-4ebf-41bc-b68f-ac1cacb080f3",
360
- "metadata": {},
361
- "source": [
362
- "# Free up space"
363
- ]
364
- },
365
- {
366
- "cell_type": "code",
367
- "execution_count": null,
368
- "id": "7403744d-cc45-419f-88ac-5475fa0f7f45",
369
- "metadata": {},
370
- "outputs": [],
371
- "source": [
372
- "# Display a list of sessions from which you can remove any session you don't need anymore\n",
373
- "\n",
374
- "#-------------------------\n",
375
- "clean()"
376
- ]
377
- }
378
- ],
379
- "metadata": {
380
- "kernelspec": {
381
- "display_name": "Python 3 (ipykernel)",
382
- "language": "python",
383
- "name": "python3"
384
- },
385
- "language_info": {
386
- "codemirror_mode": {
387
- "name": "ipython",
388
- "version": 3
389
- },
390
- "file_extension": ".py",
391
- "mimetype": "text/x-python",
392
- "name": "python",
393
- "nbconvert_exporter": "python",
394
- "pygments_lexer": "ipython3",
395
- "version": "3.9.13"
396
- }
397
- },
398
- "nbformat": 4,
399
- "nbformat_minor": 5
400
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Notebooks/Fast-Dreambooth-v2.ipynb DELETED
@@ -1,409 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "id": "494d5ce4-5843-4d70-ae96-c1983e21b6e8",
6
- "metadata": {},
7
- "source": [
8
- "## Dreambooth v2 Paperspace Notebook From https://github.com/TheLastBen/fast-stable-diffusion, if you encounter any issues, feel free to discuss them. [Support](https://ko-fi.com/thelastben)"
9
- ]
10
- },
11
- {
12
- "cell_type": "markdown",
13
- "id": "8afdca63-eff3-4a9d-b4d9-127c0f028033",
14
- "metadata": {
15
- "tags": []
16
- },
17
- "source": [
18
- "# Dependencies"
19
- ]
20
- },
21
- {
22
- "cell_type": "code",
23
- "execution_count": null,
24
- "id": "be74b2d5-da96-4bf4-ae82-4fe4b8abc04c",
25
- "metadata": {
26
- "tags": []
27
- },
28
- "outputs": [],
29
- "source": [
30
- "# Install the dependencies\n",
31
- "\n",
32
- "force_reinstall= False\n",
33
- "\n",
34
- "# Set to true only if you want to install the dependencies again.\n",
35
- "\n",
36
- "\n",
37
- "#--------------------\n",
38
- "with open('/dev/null', 'w') as devnull:import requests, os, time, importlib;open('/notebooks/mainpaperspacev2.py', 'wb').write(requests.get('https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Scripts/mainpaperspacev2.py').content);os.chdir('/notebooks');time.sleep(3);import mainpaperspacev2;importlib.reload(mainpaperspacev2);from mainpaperspacev2 import *;Deps(force_reinstall)"
39
- ]
40
- },
41
- {
42
- "cell_type": "markdown",
43
- "id": "7a4ef4a2-6863-4603-9254-a1e2a547ee38",
44
- "metadata": {
45
- "tags": []
46
- },
47
- "source": [
48
- "# Download the model"
49
- ]
50
- },
51
- {
52
- "cell_type": "code",
53
- "execution_count": null,
54
- "id": "a1ba734e-515b-4761-8c88-ef7f165d7971",
55
- "metadata": {
56
- "tags": []
57
- },
58
- "outputs": [],
59
- "source": [
60
- "Model_Version = \"768\"\n",
61
- "\n",
62
- "# Choices are : \"512\", \"768\"\n",
63
- "\n",
64
- "#-----------------------------------------------------------------------------------------------------------------------------------\n",
65
- "\n",
66
- "Custom_Model_Version = \"768\"\n",
67
- "\n",
68
- "# Choices are : \"512\", \"768\"\n",
69
- "\n",
70
- "Path_to_HuggingFace= \"\"\n",
71
- "\n",
72
- "# Load and finetune a model from Hugging Face, use the format \"profile/model\" like : runwayml/stable-diffusion-v1-5.\n",
73
- "\n",
74
- "CKPT_Path = \"\"\n",
75
- "\n",
76
- "# Load a CKPT model from the storage.\n",
77
- "\n",
78
- "CKPT_Link = \"\"\n",
79
- "\n",
80
- "# A CKPT direct link, huggingface CKPT link or a shared CKPT from gdrive.\n",
81
- "\n",
82
- "\n",
83
- "#-------------\n",
84
- "MODEL_NAMEv2=dlv2(Path_to_HuggingFace, CKPT_Path, CKPT_Link, Model_Version, Custom_Model_Version)"
85
- ]
86
- },
87
- {
88
- "cell_type": "markdown",
89
- "id": "4c6c4932-e614-4f5e-8d4a-4feca5ce54f5",
90
- "metadata": {},
91
- "source": [
92
- "# Create/Load a Session"
93
- ]
94
- },
95
- {
96
- "cell_type": "code",
97
- "execution_count": null,
98
- "id": "b6595c37-8ad2-45ff-a055-fe58c6663d2f",
99
- "metadata": {
100
- "tags": []
101
- },
102
- "outputs": [],
103
- "source": [
104
- "Session_Name = \"\"\n",
105
- "\n",
106
- "# Enter the session name, it if it exists, it will load it, otherwise it'll create an new session.\n",
107
- "\n",
108
- "Session_Link_optional = \"\"\n",
109
- "\n",
110
- "# Import a session from another gdrive, the shared gdrive link must point to the specific session's folder that contains the trained CKPT, remove any intermediary CKPT if any.\n",
111
- "\n",
112
- "Model_Version = \"768\"\n",
113
- "\n",
114
- "# Ignore this if you're not loading a previous session that contains a trained model, choices are : \"512\", \"768\"\n",
115
- "\n",
116
- "\n",
117
- "#-----------------\n",
118
- "[PT, WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, CONCEPT_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAMEv2, resumev2]=sessv2(Session_Name, Session_Link_optional, Model_Version, MODEL_NAMEv2 if 'MODEL_NAMEv2' in locals() else \"\")"
119
- ]
120
- },
121
- {
122
- "cell_type": "markdown",
123
- "id": "5698de61-08d3-4d90-83ef-f882ed956d01",
124
- "metadata": {},
125
- "source": [
126
- "# Instance Images"
127
- ]
128
- },
129
- {
130
- "cell_type": "code",
131
- "execution_count": null,
132
- "id": "bc2f8f28-226e-45b8-8257-804bbb711f56",
133
- "metadata": {
134
- "tags": []
135
- },
136
- "outputs": [],
137
- "source": [
138
- "Remove_existing_instance_images= True\n",
139
- "\n",
140
- "# Set to False to keep the existing instance images if any.\n",
141
- "\n",
142
- "\n",
143
- "IMAGES_FOLDER_OPTIONAL=\"\"\n",
144
- "\n",
145
- "# If you prefer to specify directly the folder of the pictures instead of uploading, this will add the pictures to the existing (if any) instance images. Leave EMPTY to upload.\n",
146
- "\n",
147
- "\n",
148
- "Smart_crop_images= True\n",
149
- "\n",
150
- "# Automatically crop your input images.\n",
151
- "\n",
152
- "\n",
153
- "Crop_size = 768\n",
154
- "\n",
155
- "# Choices: \"512\", \"576\", \"640\", \"704\", \"768\", \"832\", \"896\", \"960\", \"1024\"\n",
156
- "\n",
157
- "# Check out this example for naming : https://i.imgur.com/d2lD3rz.jpeg\n",
158
- "\n",
159
- "\n",
160
- "#-----------------\n",
161
- "uplder(Remove_existing_instance_images, Smart_crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, False)"
162
- ]
163
- },
164
- {
165
- "cell_type": "markdown",
166
- "id": "0e93924f-a6bf-45d5-aa77-915ad7385dcd",
167
- "metadata": {},
168
- "source": [
169
- "# Manual Captioning"
170
- ]
171
- },
172
- {
173
- "cell_type": "code",
174
- "execution_count": null,
175
- "id": "c5dbcb29-b42f-4cfc-9be8-83355838d5a2",
176
- "metadata": {
177
- "tags": []
178
- },
179
- "outputs": [],
180
- "source": [
181
- "# Open a tool to manually caption the instance images.\n",
182
- "\n",
183
- "#-----------------\n",
184
- "caption(CAPTIONS_DIR, INSTANCE_DIR)"
185
- ]
186
- },
187
- {
188
- "cell_type": "markdown",
189
- "id": "c90140c1-6c91-4cae-a222-e1a746957f95",
190
- "metadata": {},
191
- "source": [
192
- "# Concept Images"
193
- ]
194
- },
195
- {
196
- "cell_type": "code",
197
- "execution_count": null,
198
- "id": "55c27688-8601-4943-b61d-fc48b9ded067",
199
- "metadata": {},
200
- "outputs": [],
201
- "source": [
202
- "Remove_existing_concept_images= True\n",
203
- "\n",
204
- "# Set to False to keep the existing concept images if any.\n",
205
- "\n",
206
- "\n",
207
- "IMAGES_FOLDER_OPTIONAL=\"\"\n",
208
- "\n",
209
- "# If you prefer to specify directly the folder of the pictures instead of uploading, this will add the pictures to the existing (if any) concept images. Leave EMPTY to upload.\n",
210
- "\n",
211
- "\n",
212
- "#-----------------\n",
213
- "uplder(Remove_existing_concept_images, True, 512, IMAGES_FOLDER_OPTIONAL, CONCEPT_DIR, CAPTIONS_DIR, True)"
214
- ]
215
- },
216
- {
217
- "cell_type": "markdown",
218
- "id": "2a4aa42a-fd68-41ad-9ba7-da99f834e2c1",
219
- "metadata": {},
220
- "source": [
221
- "# Dreambooth"
222
- ]
223
- },
224
- {
225
- "cell_type": "code",
226
- "execution_count": null,
227
- "id": "612d8335-b984-4f34-911d-5457ff98e507",
228
- "metadata": {},
229
- "outputs": [],
230
- "source": [
231
- "Resume_Training = False\n",
232
- "\n",
233
- "# If you're not satisfied with the result, Set to True, run again the cell and it will continue training the current model.\n",
234
- "\n",
235
- "\n",
236
- "UNet_Training_Steps=850\n",
237
- "\n",
238
- "UNet_Learning_Rate = \"6e-6\"\n",
239
- "\n",
240
- "# If you use 10 images, use 650 steps, if you're not satisfied with the result, resume training for another 200 steps with a lower learning rate (8e-6), and so on ...\n",
241
- "\n",
242
- "\n",
243
- "Text_Encoder_Training_Steps=300\n",
244
- "\n",
245
- "Text_Encoder_Learning_Rate= \"1e-6\"\n",
246
- "\n",
247
- "# 350-600 steps is enough for a small dataset, keep this number small to avoid overfitting, set to 0 to disable, set it to 0 before resuming training if it is already trained.\n",
248
- "\n",
249
- "\n",
250
- "Text_Encoder_Concept_Training_Steps=0\n",
251
- "\n",
252
- "# Suitable for training a style/concept as it acts as regularization, with a minimum of 300 steps, 1 step/image is enough to train the concept(s), set to 0 to disable, set both the settings above to 0 to fintune only the text_encoder on the concept, set it to 0 before resuming training if it is already trained.\n",
253
- "\n",
254
- "\n",
255
- "External_Captions= False\n",
256
- "\n",
257
- "# Get the captions from a text file for each instance image.\n",
258
- "\n",
259
- "\n",
260
- "Style_Training=False\n",
261
- "\n",
262
- "# Further reduce overfitting, suitable when training a style or a general theme, don't check the box at the beginning, check it after training for at least 800 steps. (Has no effect when using External Captions)\n",
263
- "\n",
264
- "\n",
265
- "Resolution = 768\n",
266
- "\n",
267
- "# Choices : \"512\", \"576\", \"640\", \"704\", \"768\", \"832\", \"896\", \"960\", \"1024\"\n",
268
- "# Higher resolution = Higher quality, make sure the instance images are cropped to this selected size (or larger).\n",
269
- "\n",
270
- "#---------------------------------------------------------------\n",
271
- "\n",
272
- "Save_Checkpoint_Every_n_Steps = False\n",
273
- "\n",
274
- "Save_Checkpoint_Every=500\n",
275
- "\n",
276
- "# Minimum 200 steps between each save.\n",
277
- "\n",
278
- "\n",
279
- "Start_saving_from_the_step=500\n",
280
- "\n",
281
- "# Start saving intermediary checkpoints from this step.\n",
282
- "\n",
283
- "\n",
284
- "#-----------------\n",
285
- "resumev2=dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Style_Training, Resolution, MODEL_NAMEv2, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resumev2, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every)"
286
- ]
287
- },
288
- {
289
- "cell_type": "markdown",
290
- "id": "bf6f2232-60b3-41c5-bea6-b0dcc4aef937",
291
- "metadata": {},
292
- "source": [
293
- "# Test the Trained Model"
294
- ]
295
- },
296
- {
297
- "cell_type": "code",
298
- "execution_count": null,
299
- "id": "1263a084-b142-4e63-a0aa-2706673a4355",
300
- "metadata": {},
301
- "outputs": [],
302
- "source": [
303
- "Previous_Session_Name=\"\"\n",
304
- "\n",
305
- "# Leave empty if you want to use the current trained model.\n",
306
- "\n",
307
- "\n",
308
- "Custom_Path = \"\"\n",
309
- "\n",
310
- "# Input the full path to a desired model.\n",
311
- "\n",
312
- "\n",
313
- "User = \"\" \n",
314
- "\n",
315
- "Password= \"\"\n",
316
- "\n",
317
- "# Add credentials to your Gradio interface (optional).\n",
318
- "\n",
319
- "\n",
320
- "Use_localtunnel = False\n",
321
- "\n",
322
- "# If you have trouble using Gradio server, use this one.\n",
323
- "\n",
324
- "\n",
325
- "#-----------------\n",
326
- "configf=test(Custom_Path, Previous_Session_Name, Session_Name, User, Password, Use_localtunnel) if 'Session_Name' in locals() else test(Custom_Path, Previous_Session_Name, \"\", User, Password, Use_localtunnel)\n",
327
- "!python /notebooks/sd/stable-diffusion-webui/webui.py $configf"
328
- ]
329
- },
330
- {
331
- "cell_type": "markdown",
332
- "id": "53ccbcaf-3319-44f5-967b-ecbdfa9d0e78",
333
- "metadata": {},
334
- "source": [
335
- "# Upload The Trained Model to Hugging Face"
336
- ]
337
- },
338
- {
339
- "cell_type": "code",
340
- "execution_count": null,
341
- "id": "2c9cb205-d828-4e51-9943-f337bd410ea8",
342
- "metadata": {},
343
- "outputs": [],
344
- "source": [
345
- "#Save it to your personal profile or collaborate to the public [library of concepts](https://huggingface.co/sd-dreambooth-library)\n",
346
- "\n",
347
- "Name_of_your_concept = \"\"\n",
348
- "\n",
349
- "# Leave empty if you want to name your concept the same as the current session.\n",
350
- "\n",
351
- "\n",
352
- "Save_concept_to = \"My_Profile\"\n",
353
- "\n",
354
- "# Choices : \"Public_Library\", \"My_Profile\".\n",
355
- "\n",
356
- "\n",
357
- "hf_token_write = \"\"\n",
358
- "\n",
359
- "# Create a write access token here : https://huggingface.co/settings/tokens, go to \"New token\" -> Role : Write, a regular read token won't work here.\n",
360
- "\n",
361
- "\n",
362
- "#---------------------------------\n",
363
- "hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, OUTPUT_DIR, Session_Name, MDLPTH)"
364
- ]
365
- },
366
- {
367
- "cell_type": "markdown",
368
- "id": "881d80a3-4ebf-41bc-b68f-ac1cacb080f3",
369
- "metadata": {},
370
- "source": [
371
- "# Free up space"
372
- ]
373
- },
374
- {
375
- "cell_type": "code",
376
- "execution_count": null,
377
- "id": "7403744d-cc45-419f-88ac-5475fa0f7f45",
378
- "metadata": {},
379
- "outputs": [],
380
- "source": [
381
- "# Display a list of sessions from which you can remove any session you don't need anymore\n",
382
- "\n",
383
- "#-------------------------\n",
384
- "clean()"
385
- ]
386
- }
387
- ],
388
- "metadata": {
389
- "kernelspec": {
390
- "display_name": "Python 3 (ipykernel)",
391
- "language": "python",
392
- "name": "python3"
393
- },
394
- "language_info": {
395
- "codemirror_mode": {
396
- "name": "ipython",
397
- "version": 3
398
- },
399
- "file_extension": ".py",
400
- "mimetype": "text/x-python",
401
- "name": "python",
402
- "nbconvert_exporter": "python",
403
- "pygments_lexer": "ipython3",
404
- "version": "3.9.13"
405
- }
406
- },
407
- "nbformat": 4,
408
- "nbformat_minor": 5
409
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Notebooks/Fast-SD-A1111.ipynb DELETED
@@ -1,155 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "id": "6441563f-5e6b-4987-abe3-2a1b8c25a789",
6
- "metadata": {},
7
- "source": [
8
- "## A1111 Paperspace Notebook From https://github.com/TheLastBen/fast-stable-diffusion, if you encounter any issues, feel free to discuss them. [Support](https://ko-fi.com/thelastben)"
9
- ]
10
- },
11
- {
12
- "cell_type": "markdown",
13
- "id": "840f2f6a-41d1-4938-a846-d51f76682b76",
14
- "metadata": {},
15
- "source": [
16
- "# Dependencies"
17
- ]
18
- },
19
- {
20
- "cell_type": "code",
21
- "execution_count": null,
22
- "id": "62e22de7-f054-45a7-b7e3-b63b9a0188a1",
23
- "metadata": {},
24
- "outputs": [],
25
- "source": [
26
- "# Install the dependencies\n",
27
- "\n",
28
- "force_reinstall= False\n",
29
- "\n",
30
- "# Set to true only if you want to install the dependencies again.\n",
31
- "\n",
32
- "\n",
33
- "#--------------------\n",
34
- "with open('/dev/null', 'w') as devnull:import requests, time, importlib;open('/notebooks/mainpaperspaceA1111.py', 'wb').write(requests.get('https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Scripts/mainpaperspaceA1111.py').content);os.chdir('/notebooks');time.sleep(3);import mainpaperspaceA1111;importlib.reload(mainpaperspaceA1111);from mainpaperspaceA1111 import *;Deps(force_reinstall)"
35
- ]
36
- },
37
- {
38
- "cell_type": "markdown",
39
- "id": "e21f3583-2d0f-4218-9da2-303f9e202820",
40
- "metadata": {
41
- "tags": []
42
- },
43
- "source": [
44
- "# Install/Update AUTOMATIC1111 repo"
45
- ]
46
- },
47
- {
48
- "cell_type": "code",
49
- "execution_count": null,
50
- "id": "ed6cebed-8a4f-4a35-b5c3-36126be008b9",
51
- "metadata": {},
52
- "outputs": [],
53
- "source": [
54
- "# Don't skip this cell to make sure the repo is up to date and functioning correctly\n",
55
- "\n",
56
- "#--------------------\n",
57
- "repo()"
58
- ]
59
- },
60
- {
61
- "cell_type": "markdown",
62
- "id": "ab0521c7-8c68-4ea2-915a-bc3f4b67f6e7",
63
- "metadata": {},
64
- "source": [
65
- "## Model Download/Load"
66
- ]
67
- },
68
- {
69
- "cell_type": "code",
70
- "execution_count": null,
71
- "id": "a6f367e0-df08-41fd-91b5-e2afbcbd42e4",
72
- "metadata": {},
73
- "outputs": [],
74
- "source": [
75
- "Original_Model_Version = \"v1.5\"\n",
76
- "\n",
77
- "# Choices are \"v1.5\", \"v2-512\", \"v2-768\"\n",
78
- "\n",
79
- "\n",
80
- "Path_to_MODEL = \"\"\n",
81
- "\n",
82
- "# Insert the full path of your trained model or to a folder containing multiple models.\n",
83
- "\n",
84
- "\n",
85
- "MODEL_LINK = \"\"\n",
86
- "\n",
87
- "# A direct link to a Model or a shared gdrive link.\n",
88
- "\n",
89
- "safetensors= False\n",
90
- "\n",
91
- "# Set to True if the model from the link is in safetensors format.\n",
92
- "\n",
93
- "Temporary_Storage = True\n",
94
- "\n",
95
- "# Download the model to a temporary storage, bigger capacity but will be removed at session shutdown.\n",
96
- "\n",
97
- "\n",
98
- "#--------------------\n",
99
- "model=mdl(Original_Model_Version, Path_to_MODEL, MODEL_LINK, safetensors, Temporary_Storage)"
100
- ]
101
- },
102
- {
103
- "cell_type": "markdown",
104
- "id": "e0baf0c4-a410-432f-891b-975c7250c77d",
105
- "metadata": {},
106
- "source": [
107
- "## Start Stable-Diffusion"
108
- ]
109
- },
110
- {
111
- "cell_type": "code",
112
- "execution_count": null,
113
- "id": "0121ea1d-1aa0-4961-b916-c5dbb900e05f",
114
- "metadata": {},
115
- "outputs": [],
116
- "source": [
117
- "User = \"\"\n",
118
- "\n",
119
- "Password= \"\"\n",
120
- "\n",
121
- "# Add credentials to your Gradio interface (optional).\n",
122
- "\n",
123
- "Use_localtunnel = False\n",
124
- "\n",
125
- "# If you have trouble using Gradio server, use this one.\n",
126
- "\n",
127
- "\n",
128
- "#-----------------\n",
129
- "configf=sd(User, Password, Use_localtunnel)\n",
130
- "!python /notebooks/sd/stable-diffusion-webui/webui.py --ckpt $model $configf"
131
- ]
132
- }
133
- ],
134
- "metadata": {
135
- "kernelspec": {
136
- "display_name": "Python 3 (ipykernel)",
137
- "language": "python",
138
- "name": "python3"
139
- },
140
- "language_info": {
141
- "codemirror_mode": {
142
- "name": "ipython",
143
- "version": 3
144
- },
145
- "file_extension": ".py",
146
- "mimetype": "text/x-python",
147
- "name": "python",
148
- "nbconvert_exporter": "python",
149
- "pygments_lexer": "ipython3",
150
- "version": "3.9.13"
151
- }
152
- },
153
- "nbformat": 4,
154
- "nbformat_minor": 5
155
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,3 +0,0 @@
1
- ---
2
- license: cc-by-nc-4.0
3
- ---
 
 
 
 
Scripts/mainpaperspaceA1111.py DELETED
@@ -1,197 +0,0 @@
1
- import os
2
- from IPython.display import clear_output
3
- from subprocess import call, getoutput
4
- import time
5
- import sys
6
- import fileinput
7
- import ipywidgets as widgets
8
-
9
-
10
-
11
- def Deps(force_reinstall):
12
-
13
- if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
14
- os.chdir('/notebooks')
15
- if not os.path.exists('Latest_Notebooks'):
16
- call('mkdir Latest_Notebooks', shell=True)
17
- else:
18
- call('rm -r Latest_Notebooks', shell=True)
19
- call('mkdir Latest_Notebooks', shell=True)
20
- os.chdir('/notebooks/Latest_Notebooks')
21
- call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
22
- call('rm Notebooks.txt', shell=True)
23
- os.chdir('/notebooks')
24
- print('Modules and notebooks updated, dependencies already installed')
25
-
26
- else:
27
- print('Installing the dependencies...')
28
- call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
29
- if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
30
- os.chdir('/usr/local/lib/python3.9/dist-packages')
31
- call("rm -r torch torch-1.12.0+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
32
-
33
- os.chdir('/notebooks')
34
- if not os.path.exists('Latest_Notebooks'):
35
- call('mkdir Latest_Notebooks', shell=True)
36
- else:
37
- call('rm -r Latest_Notebooks', shell=True)
38
- call('mkdir Latest_Notebooks', shell=True)
39
- os.chdir('/notebooks/Latest_Notebooks')
40
- call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
41
- call('rm Notebooks.txt', shell=True)
42
- os.chdir('/notebooks')
43
- call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
44
- os.chdir('/notebooks')
45
- if not os.path.exists('/models'):
46
- call('mkdir /models', shell=True)
47
- if not os.path.exists('/notebooks/models'):
48
- call('ln -s /models /notebooks', shell=True)
49
- if os.path.exists('/deps'):
50
- call("rm -r /deps", shell=True)
51
- call('mkdir /deps', shell=True)
52
- if not os.path.exists('cache'):
53
- call('mkdir cache', shell=True)
54
- os.chdir('/deps')
55
- call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
56
- call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
57
- call('wget -q https://huggingface.co/TheLastBen/dependencies/resolve/main/pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
58
- call('tar -C / --zstd -xf pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
59
- call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
60
- os.chdir('/notebooks')
61
- call("git clone --depth 1 -q --branch updt https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'))
62
- if not os.path.exists('/notebooks/diffusers'):
63
- call('ln -s /diffusers /notebooks', shell=True)
64
- call("rm -r /deps", shell=True)
65
- os.chdir('/notebooks')
66
- clear_output()
67
-
68
- done()
69
-
70
-
71
-
72
- def repo():
73
-
74
- print('Installing/Updating the repo...')
75
- os.chdir('/notebooks')
76
- if not os.path.exists('/notebooks/sd/stablediffusion'):
77
- call('wget -q -O sd_rep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_rep.tar.zst', shell=True)
78
- call('tar --zstd -xf sd_rep.tar.zst', shell=True)
79
- call('rm sd_rep.tar.zst', shell=True)
80
-
81
- os.chdir('/notebooks/sd')
82
- if not os.path.exists('stable-diffusion-webui'):
83
- call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
84
-
85
- os.chdir('/notebooks/sd/stable-diffusion-webui/')
86
- call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
87
- print('')
88
- call('git pull', shell=True, stdout=open('/dev/null', 'w'))
89
- os.chdir('/notebooks')
90
- clear_output()
91
- done()
92
-
93
-
94
- def mdl(Original_Model_Version, Path_to_MODEL, MODEL_LINK, safetensors, Temporary_Storage):
95
- import gdown
96
- if Path_to_MODEL !='':
97
- if os.path.exists(str(Path_to_MODEL)):
98
- print('Using the trained model.')
99
- model=Path_to_MODEL
100
- else:
101
- print('Wrong path, check that the path to the model is correct')
102
-
103
- elif MODEL_LINK != "":
104
- modelname="model.safetensors" if safetensors else "model.ckpt"
105
- if Temporary_Storage:
106
- model=f'/models/{modelname}'
107
- else:
108
- model=f'/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/{modelname}'
109
- if os.path.exists(model):
110
- call('rm '+model, shell=True)
111
- gdown.download(url=MODEL_LINK, output=model, quiet=False, fuzzy=True)
112
-
113
- if os.path.exists(model) and os.path.getsize(model) > 1810671599:
114
- clear_output()
115
- print('Model downloaded, using the trained model.')
116
- else:
117
- print('Wrong link, check that the link is valid')
118
-
119
- else:
120
- if Original_Model_Version == "v1.5":
121
- model="/datasets/stable-diffusion-classic/v1-5-pruned-emaonly.ckpt"
122
- print('Using the original V1.5 model')
123
- elif Original_Model_Version == "v2-512":
124
- model="dataset"
125
- print('Using the original V2-512 model')
126
- elif Original_Model_Version == "v2-768":
127
- model="/datasets/stable-diffusion-v2-1/stable-diffusion-2-1/v2-1_768-nonema-pruned.safetensors"
128
- print('Using the original V2-768 model')
129
- else:
130
- model=""
131
- print('Wrong model version')
132
-
133
- return model
134
-
135
-
136
- def sd(User, Password, Use_localtunnel):
137
-
138
- auth=f"--gradio-auth {User}:{Password}"
139
- if User =="" or Password=="":
140
- auth=""
141
-
142
- if not os.path.exists('/usr/lib/node_modules/localtunnel'):
143
- call('npm install -g localtunnel --silent', shell=True, stdout=open('/dev/null', 'w'))
144
- clear_output()
145
-
146
-
147
- share=''
148
- call('wget -q -O /usr/local/lib/python3.9/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
149
-
150
- if not Use_localtunnel:
151
- share='--share'
152
-
153
- else:
154
- share=''
155
- os.chdir('/notebooks')
156
- call('nohup lt --port 7860 > srv.txt 2>&1 &', shell=True)
157
- time.sleep(2)
158
- call("grep -o 'https[^ ]*' /notebooks/srv.txt >srvr.txt", shell=True)
159
- time.sleep(2)
160
- srv= getoutput('cat /notebooks/srvr.txt')
161
-
162
- for line in fileinput.input('/usr/local/lib/python3.9/dist-packages/gradio/blocks.py', inplace=True):
163
- if line.strip().startswith('self.server_name ='):
164
- line = f' self.server_name = "{srv[8:]}"\n'
165
- if line.strip().startswith('self.server_port ='):
166
- line = ' self.server_port = 443\n'
167
- if line.strip().startswith('self.protocol = "https"'):
168
- line = ' self.protocol = "https"\n'
169
- if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
170
- line = ''
171
- if line.strip().startswith('else "http"'):
172
- line = ''
173
- sys.stdout.write(line)
174
-
175
- call('rm /notebooks/srv.txt', shell=True)
176
- call('rm /notebooks/srvr.txt', shell=True)
177
-
178
- os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
179
- call('wget -q -O paths.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/paths.py', shell=True)
180
- call("sed -i 's@/content/gdrive/MyDrive/sd/stablediffusion@/notebooks/sd/stablediffusion@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
181
- os.chdir('/notebooks/sd/stable-diffusion-webui')
182
- clear_output()
183
-
184
- configf="--disable-console-progressbars --no-half-vae --disable-safe-unpickle --api --xformers --medvram --skip-version-check "+auth+" "+share
185
-
186
- return configf, auth, share
187
-
188
-
189
- def done():
190
- done = widgets.Button(
191
- description='Done!',
192
- disabled=True,
193
- button_style='success',
194
- tooltip='',
195
- icon='check'
196
- )
197
- display(done)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Scripts/mainpaperspacev1.py DELETED
@@ -1,1271 +0,0 @@
1
- from IPython.display import clear_output
2
- from subprocess import call, getoutput
3
- from IPython.display import display
4
- import ipywidgets as widgets
5
- import io
6
- from PIL import Image, ImageDraw
7
- import fileinput
8
- import time
9
- import os
10
- from os import listdir
11
- from os.path import isfile
12
- from tqdm import tqdm
13
- import gdown
14
- import random
15
- import sys
16
- import cv2
17
- from io import BytesIO
18
- import requests
19
- from collections import defaultdict
20
- from math import log, sqrt
21
- import numpy as np
22
-
23
-
24
-
25
- def Deps(force_reinstall):
26
-
27
- if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
28
- os.chdir('/notebooks')
29
- if not os.path.exists('Latest_Notebooks'):
30
- call('mkdir Latest_Notebooks', shell=True)
31
- else:
32
- call('rm -r Latest_Notebooks', shell=True)
33
- call('mkdir Latest_Notebooks', shell=True)
34
- os.chdir('/notebooks/Latest_Notebooks')
35
- call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
36
- call('rm Notebooks.txt', shell=True)
37
- os.chdir('/notebooks')
38
- print('Modules and notebooks updated, dependencies already installed')
39
-
40
- else:
41
- print('Installing the dependencies...')
42
- call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
43
- if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
44
- os.chdir('/usr/local/lib/python3.9/dist-packages')
45
- call("rm -r torch torch-1.12.0+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
46
-
47
- os.chdir('/notebooks')
48
- if not os.path.exists('Latest_Notebooks'):
49
- call('mkdir Latest_Notebooks', shell=True)
50
- else:
51
- call('rm -r Latest_Notebooks', shell=True)
52
- call('mkdir Latest_Notebooks', shell=True)
53
- os.chdir('/notebooks/Latest_Notebooks')
54
- call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
55
- call('rm Notebooks.txt', shell=True)
56
- os.chdir('/notebooks')
57
- call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
58
- os.chdir('/notebooks')
59
- if not os.path.exists('/models'):
60
- call('mkdir /models', shell=True)
61
- if not os.path.exists('/notebooks/models'):
62
- call('ln -s /models /notebooks', shell=True)
63
- if os.path.exists('/deps'):
64
- call("rm -r /deps", shell=True)
65
- call('mkdir /deps', shell=True)
66
- if not os.path.exists('cache'):
67
- call('mkdir cache', shell=True)
68
- os.chdir('/deps')
69
- call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
70
- call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
71
- call('wget -q https://huggingface.co/TheLastBen/dependencies/resolve/main/pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
72
- call('tar -C / --zstd -xf pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
73
- call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
74
- os.chdir('/notebooks')
75
- call("git clone --depth 1 -q --branch updt https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'))
76
- if not os.path.exists('/notebooks/diffusers'):
77
- call('ln -s /diffusers /notebooks', shell=True)
78
- call("rm -r /deps", shell=True)
79
- os.chdir('/notebooks')
80
- clear_output()
81
-
82
- done()
83
-
84
-
85
- def downloadmodel_hf(Path_to_HuggingFace):
86
- import wget
87
-
88
- if os.path.exists('/models/stable-diffusion-custom'):
89
- call("rm -r /models/stable-diffusion-custom", shell=True)
90
- clear_output()
91
-
92
- if os.path.exists('/notebooks/Fast-Dreambooth/token.txt'):
93
- with open("/notebooks/Fast-Dreambooth/token.txt") as f:
94
- token = f.read()
95
- authe=f'https://USER:{token}@'
96
- else:
97
- authe="https://"
98
-
99
- clear_output()
100
- call("mkdir /models/stable-diffusion-custom", shell=True)
101
- os.chdir("/models/stable-diffusion-custom")
102
- call("git init", shell=True)
103
- call("git lfs install --system --skip-repo", shell=True)
104
- call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
105
- call("git config core.sparsecheckout true", shell=True)
106
- call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
107
- call("git pull origin main", shell=True)
108
- if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
109
- call("rm -r /models/stable-diffusion-custom/.git", shell=True)
110
- call("rm -r /models/stable-diffusion-custom/model_index.json", shell=True)
111
- wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/model_index.json')
112
- os.chdir('/notebooks')
113
- clear_output()
114
- done()
115
-
116
- while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
117
- print('Check the link you provided')
118
- os.chdir('/notebooks')
119
- time.sleep(5)
120
-
121
-
122
-
123
- def downloadmodel_pth(CKPT_Path):
124
- import wget
125
- os.chdir('/notebooks')
126
- clear_output()
127
- if os.path.exists(str(CKPT_Path)):
128
- wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz')
129
- call('unzip -o -q refmdlz', shell=True)
130
- call('rm -f refmdlz', shell=True)
131
- wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py')
132
- clear_output()
133
- call('python /notebooks/convertodiffv1.py '+CKPT_Path+' /models/stable-diffusion-custom --v1', shell=True)
134
- call('rm /notebooks/convertodiffv1.py', shell=True)
135
- call('rm -r /notebooks/refmdl', shell=True)
136
- if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
137
- clear_output()
138
- done()
139
- while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
140
- print('Conversion error')
141
- time.sleep(5)
142
-
143
- else:
144
- while not os.path.exists(str(CKPT_Path)):
145
- print('Wrong path, use the colab file explorer to copy the path')
146
- time.sleep(5)
147
-
148
-
149
- def downloadmodel_lnk(CKPT_Link):
150
- import wget
151
- os.chdir('/notebooks')
152
- call("gdown --fuzzy " +CKPT_Link+ " -O /models/model.ckpt", shell=True)
153
-
154
- if os.path.exists('/models/model.ckpt'):
155
- if os.path.getsize("/models/model.ckpt") > 1810671599:
156
- wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz')
157
- call('unzip -o -q refmdlz', shell=True)
158
- call('rm -f refmdlz', shell=True)
159
- wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py')
160
- clear_output()
161
- call('python /notebooks/convertodiffv1.py /models/model.ckpt /models/stable-diffusion-custom --v1', shell=True)
162
- call('rm /notebooks/convertodiffv1.py', shell=True)
163
- call('rm -r /notebooks/refmdl', shell=True)
164
- if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
165
- call('rm -r /models/model.ckpt', shell=True)
166
- clear_output()
167
- done()
168
- else:
169
- while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
170
- print('Conversion error')
171
- time.sleep(5)
172
- else:
173
- while os.path.getsize('/models/model.ckpt') < 1810671599:
174
- print('Wrong link, check that the link is valid')
175
- time.sleep(5)
176
-
177
-
178
- def dl(Path_to_HuggingFace, CKPT_Path, CKPT_Link):
179
-
180
- if Path_to_HuggingFace != "":
181
- downloadmodel_hf(Path_to_HuggingFace)
182
- MODEL_NAME="/models/stable-diffusion-custom"
183
- elif CKPT_Path !="":
184
- downloadmodel_pth(CKPT_Path)
185
- MODEL_NAME="/models/stable-diffusion-custom"
186
- elif CKPT_Link !="":
187
- downloadmodel_lnk(CKPT_Link)
188
- MODEL_NAME="/models/stable-diffusion-custom"
189
- else:
190
- MODEL_NAME="/datasets/stable-diffusion-diffusers/stable-diffusion-v1-5"
191
- print('Using the original V1.5 model')
192
-
193
- return MODEL_NAME
194
-
195
-
196
- def sess(Session_Name, Session_Link_optional, MODEL_NAME):
197
- import wget, gdown
198
- os.chdir('/notebooks')
199
- PT=""
200
-
201
- while Session_Name=="":
202
- print('Input the Session Name:')
203
- Session_Name=input("")
204
- Session_Name=Session_Name.replace(" ","_")
205
-
206
- WORKSPACE='/notebooks/Fast-Dreambooth'
207
-
208
- if Session_Link_optional !="":
209
- print('Downloading session...')
210
-
211
- if Session_Link_optional != "":
212
- if not os.path.exists(str(WORKSPACE+'/Sessions')):
213
- call("mkdir -p " +WORKSPACE+ "/Sessions", shell=True)
214
- time.sleep(1)
215
- os.chdir(WORKSPACE+'/Sessions')
216
- gdown.download_folder(url=Session_Link_optional, output=Session_Name, quiet=True, remaining_ok=True, use_cookies=False)
217
- os.chdir(Session_Name)
218
- call("rm -r " +instance_images, shell=True)
219
- call("unzip " +instance_images.zip, shell=True, stdout=open('/dev/null', 'w'))
220
- call("rm -r " +concept_images, shell=True)
221
- call("unzip " +concept_images.zip, shell=True, stdout=open('/dev/null', 'w'))
222
- call("rm -r " +captions, shell=True)
223
- call("unzip " +captions.zip, shell=True, stdout=open('/dev/null', 'w'))
224
- os.chdir('/notebooks')
225
- clear_output()
226
-
227
- INSTANCE_NAME=Session_Name
228
- OUTPUT_DIR="/models/"+Session_Name
229
- SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
230
- CONCEPT_DIR=SESSION_DIR+"/concept_images"
231
- INSTANCE_DIR=SESSION_DIR+"/instance_images"
232
- CAPTIONS_DIR=SESSION_DIR+'/captions'
233
- MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.ckpt')
234
- resume=False
235
-
236
- if os.path.exists(str(SESSION_DIR)):
237
- mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(".")[-1]=="ckpt"]
238
- if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):
239
-
240
- def f(n):
241
- k=0
242
- for i in mdls:
243
- if k==n:
244
- call('mv '+SESSION_DIR+'/'+i+' '+MDLPTH, shell=True)
245
- k=k+1
246
-
247
- k=0
248
- print('No final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\n')
249
-
250
- for i in mdls:
251
- print(str(k)+'- '+i)
252
- k=k+1
253
- n=input()
254
- while int(n)>k-1:
255
- n=input()
256
- if n!="000":
257
- f(int(n))
258
- print('Using the model '+ mdls[int(n)]+" ...")
259
- time.sleep(8)
260
- clear_output()
261
- else:
262
- print('Skipping the intermediary checkpoints.')
263
-
264
-
265
- if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
266
- print('Loading session with no previous model, using the original model or the custom downloaded model')
267
- if MODEL_NAME=="":
268
- print('No model found, use the "Model Download" cell to download a model.')
269
- else:
270
- print('Session Loaded, proceed to uploading instance images')
271
-
272
- elif os.path.exists(MDLPTH):
273
- print('Session found, loading the trained model ...')
274
- wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz')
275
- call('unzip -o -q refmdlz', shell=True, stdout=open('/dev/null', 'w'))
276
- call('rm -f refmdlz', shell=True, stdout=open('/dev/null', 'w'))
277
- wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py')
278
- call('python /notebooks/convertodiffv1.py '+MDLPTH+' '+OUTPUT_DIR+' --v1', shell=True)
279
- call('rm /notebooks/convertodiffv1.py', shell=True)
280
- call('rm -r /notebooks/refmdl', shell=True)
281
-
282
-
283
- if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
284
- resume=True
285
- clear_output()
286
- print('Session loaded.')
287
- else:
288
- if not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
289
- print('Conversion error, if the error persists, remove the CKPT file from the current session folder')
290
-
291
- elif not os.path.exists(str(SESSION_DIR)):
292
- call('mkdir -p '+INSTANCE_DIR, shell=True)
293
- print('Creating session...')
294
- if MODEL_NAME=="":
295
- print('No model found, use the "Model Download" cell to download a model.')
296
- else:
297
- print('Session created, proceed to uploading instance images')
298
-
299
- return PT, WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, CONCEPT_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAME, resume
300
-
301
-
302
-
303
- def done():
304
- done = widgets.Button(
305
- description='Done!',
306
- disabled=True,
307
- button_style='success',
308
- tooltip='',
309
- icon='check'
310
- )
311
- display(done)
312
-
313
-
314
-
315
- def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, ren):
316
-
317
- uploader = widgets.FileUpload(description="Choose images",accept='image/*', multiple=True)
318
- Upload = widgets.Button(
319
- description='Upload',
320
- disabled=False,
321
- button_style='info',
322
- tooltip='Click to upload the chosen instance images',
323
- icon=''
324
- )
325
-
326
-
327
- def up(Upload):
328
- with out:
329
- uploader.close()
330
- Upload.close()
331
- upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
332
- done()
333
- out=widgets.Output()
334
-
335
- if IMAGES_FOLDER_OPTIONAL=="":
336
- Upload.on_click(up)
337
- display(uploader, Upload, out)
338
- else:
339
- upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
340
- done()
341
-
342
-
343
-
344
-
345
- def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
346
-
347
-
348
- if os.path.exists(CAPTIONS_DIR+"off"):
349
- call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
350
- time.sleep(2)
351
-
352
- if Remove_existing_instance_images:
353
- if os.path.exists(str(INSTANCE_DIR)):
354
- call("rm -r " +INSTANCE_DIR, shell=True)
355
- if os.path.exists(str(CAPTIONS_DIR)):
356
- call("rm -r " +CAPTIONS_DIR, shell=True)
357
-
358
-
359
- if not os.path.exists(str(INSTANCE_DIR)):
360
- call("mkdir -p " +INSTANCE_DIR, shell=True)
361
- if not os.path.exists(str(CAPTIONS_DIR)):
362
- call("mkdir -p " +CAPTIONS_DIR, shell=True)
363
-
364
-
365
- if IMAGES_FOLDER_OPTIONAL !="":
366
- if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
367
- call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
368
- if Crop_images:
369
- os.chdir(str(IMAGES_FOLDER_OPTIONAL))
370
- call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
371
- os.chdir('/notebooks')
372
- for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
373
- extension = filename.split(".")[-1]
374
- identifier=filename.split(".")[0]
375
- new_path_with_file = os.path.join(INSTANCE_DIR, filename)
376
- file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
377
- width, height = file.size
378
- image = file
379
- if file.size !=(Crop_size, Crop_size):
380
- image=crop_image(file, Crop_size)
381
- if (extension.upper() == "JPG" or "jpg"):
382
- image[0].save(new_path_with_file, format="JPEG", quality = 100)
383
- else:
384
- image[0].save(new_path_with_file, format=extension.upper())
385
-
386
- else:
387
- call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
388
-
389
- else:
390
- for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
391
- call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
392
-
393
-
394
-
395
- elif IMAGES_FOLDER_OPTIONAL =="":
396
- up=""
397
- for filename, file in uploader.value.items():
398
- if filename.split(".")[-1]=="txt":
399
- with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
400
- f.write(file['content'].decode())
401
- up=[(filename, file) for filename, file in uploader.value.items() if filename.split(".")[-1]!="txt"]
402
- if Crop_images:
403
- for filename, file_info in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
404
- img = Image.open(io.BytesIO(file_info['content']))
405
- extension = filename.split(".")[-1]
406
- identifier=filename.split(".")[0]
407
-
408
- if (extension.upper() == "JPG" or "jpg"):
409
- img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
410
- else:
411
- img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
412
-
413
- new_path_with_file = os.path.join(INSTANCE_DIR, filename)
414
- file = Image.open(new_path_with_file)
415
- width, height = file.size
416
- image = img
417
- if file.size !=(Crop_size, Crop_size):
418
- image=crop_image(file, Crop_size)
419
- if (extension.upper() == "JPG" or "jpg"):
420
- image[0].save(new_path_with_file, format="JPEG", quality = 100)
421
- else:
422
- image[0].save(new_path_with_file, format=extension.upper())
423
-
424
- else:
425
- for filename, file_info in tqdm(uploader.value.items(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
426
- img = Image.open(io.BytesIO(file_info['content']))
427
-
428
- extension = filename.split(".")[-1]
429
- identifier=filename.split(".")[0]
430
-
431
- if (extension.upper() == "JPG" or "jpg"):
432
- img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
433
- else:
434
- img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
435
-
436
-
437
- if ren:
438
- i=0
439
- for filename in tqdm(os.listdir(INSTANCE_DIR), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Renamed'):
440
- extension = filename.split(".")[-1]
441
- identifier=filename.split(".")[0]
442
- new_path_with_file = os.path.join(INSTANCE_DIR, "conceptimagedb"+str(i)+"."+extension)
443
- call('mv "'+os.path.join(INSTANCE_DIR,filename)+'" "'+new_path_with_file+'"', shell=True)
444
- i=i+1
445
-
446
- os.chdir(INSTANCE_DIR)
447
- call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
448
- os.chdir(CAPTIONS_DIR)
449
- call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
450
- os.chdir('/notebooks')
451
-
452
-
453
-
454
- def caption(CAPTIONS_DIR, INSTANCE_DIR):
455
-
456
- if os.path.exists(CAPTIONS_DIR+"off"):
457
- call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
458
- time.sleep(2)
459
-
460
- paths=""
461
- out=""
462
- widgets_l=""
463
- clear_output()
464
- def Caption(path):
465
- if path!="Select an instance image to caption":
466
-
467
- name = os.path.splitext(os.path.basename(path))[0]
468
- ext=os.path.splitext(os.path.basename(path))[-1][1:]
469
- if ext=="jpg" or "JPG":
470
- ext="JPEG"
471
-
472
- if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
473
- with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
474
- text = f.read()
475
- else:
476
- with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
477
- f.write("")
478
- with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
479
- text = f.read()
480
-
481
- img=Image.open(os.path.join(INSTANCE_DIR,path))
482
- img=img.resize((420, 420))
483
- image_bytes = BytesIO()
484
- img.save(image_bytes, format=ext, qualiy=10)
485
- image_bytes.seek(0)
486
- image_data = image_bytes.read()
487
- img= image_data
488
- image = widgets.Image(
489
- value=img,
490
- width=420,
491
- height=420
492
- )
493
- text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
494
-
495
-
496
- def update_text(text):
497
- with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
498
- f.write(text)
499
-
500
- button = widgets.Button(description='Save', button_style='success')
501
- button.on_click(lambda b: update_text(text_area.value))
502
-
503
- return widgets.VBox([widgets.HBox([image, text_area, button])])
504
-
505
-
506
- paths = os.listdir(INSTANCE_DIR)
507
- widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
508
-
509
-
510
- out = widgets.Output()
511
-
512
- def click(change):
513
- with out:
514
- out.clear_output()
515
- display(Caption(change.new))
516
-
517
- widgets_l.observe(click, names='value')
518
- display(widgets.HBox([widgets_l, out]))
519
-
520
-
521
-
522
- def dbtrain(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Style_Training, Resolution, MODEL_NAME, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resume, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every):
523
-
524
- if resume and not Resume_Training:
525
- print('Overwrite your previously trained model ?, answering "yes" will train a new model, answering "no" will resume the training of the previous model?  yes or no ?')
526
- while True:
527
- ansres=input('')
528
- if ansres=='no':
529
- Resume_Training = True
530
- break
531
- elif ansres=='yes':
532
- Resume_Training = False
533
- resume= False
534
- break
535
-
536
- while not Resume_Training and not os.path.exists(MODEL_NAME+'/unet/diffusion_pytorch_model.bin'):
537
- print('No model found, use the "Model Download" cell to download a model.')
538
- time.sleep(5)
539
-
540
- if os.path.exists(CAPTIONS_DIR+"off"):
541
- call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
542
- time.sleep(2)
543
-
544
- MODELT_NAME=MODEL_NAME
545
-
546
- Seed=random.randint(1, 999999)
547
-
548
- Style=""
549
- if Style_Training:
550
- Style="--Style"
551
-
552
- extrnlcptn=""
553
- if External_Captions:
554
- extrnlcptn="--external_captions"
555
-
556
- precision="fp16"
557
-
558
- GCUNET="--gradient_checkpointing"
559
- if Resolution<=640:
560
- GCUNET=""
561
-
562
- resuming=""
563
- if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
564
- MODELT_NAME=OUTPUT_DIR
565
- print('Resuming Training...')
566
- resuming="Yes"
567
- elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
568
- print('Previous model not found, training a new model...')
569
- MODELT_NAME=MODEL_NAME
570
- while MODEL_NAME=="":
571
- print('No model found, use the "Model Download" cell to download a model.')
572
- time.sleep(5)
573
-
574
-
575
- trnonltxt=""
576
- if UNet_Training_Steps==0:
577
- trnonltxt="--train_only_text_encoder"
578
-
579
- Enable_text_encoder_training= True
580
- Enable_Text_Encoder_Concept_Training= True
581
-
582
-
583
- if Text_Encoder_Training_Steps==0 or External_Captions:
584
- Enable_text_encoder_training= False
585
- else:
586
- stptxt=Text_Encoder_Training_Steps
587
-
588
- if Text_Encoder_Concept_Training_Steps==0:
589
- Enable_Text_Encoder_Concept_Training= False
590
- else:
591
- stptxtc=Text_Encoder_Concept_Training_Steps
592
-
593
-
594
- if Save_Checkpoint_Every==None:
595
- Save_Checkpoint_Every=1
596
- stp=0
597
- if Start_saving_from_the_step==None:
598
- Start_saving_from_the_step=0
599
- if (Start_saving_from_the_step < 200):
600
- Start_saving_from_the_step=Save_Checkpoint_Every
601
- stpsv=Start_saving_from_the_step
602
- if Save_Checkpoint_Every_n_Steps:
603
- stp=Save_Checkpoint_Every
604
-
605
-
606
- def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
607
- call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
608
- '+trnonltxt+' \
609
- --train_text_encoder \
610
- --image_captions_filename \
611
- --dump_only_text_encoder \
612
- --pretrained_model_name_or_path='+MODELT_NAME+' \
613
- --instance_data_dir='+INSTANCE_DIR+' \
614
- --output_dir='+OUTPUT_DIR+' \
615
- --instance_prompt='+PT+' \
616
- --seed='+str(Seed)+' \
617
- --resolution=512 \
618
- --mixed_precision='+str(precision)+' \
619
- --train_batch_size=1 \
620
- --gradient_accumulation_steps=1 --gradient_checkpointing \
621
- --use_8bit_adam \
622
- --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
623
- --lr_scheduler="polynomial" \
624
- --lr_warmup_steps=0 \
625
- --max_train_steps='+str(Training_Steps), shell=True)
626
-
627
- def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps):
628
- clear_output()
629
- if resuming=="Yes":
630
- print('Resuming Training...')
631
- print('Training the UNet...')
632
- call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
633
- '+Style+' \
634
- '+extrnlcptn+' \
635
- --stop_text_encoder_training='+str(Text_Encoder_Training_Steps)+' \
636
- --image_captions_filename \
637
- --train_only_unet \
638
- --Session_dir='+SESSION_DIR+' \
639
- --save_starting_step='+str(stpsv)+' \
640
- --save_n_steps='+str(stp)+' \
641
- --pretrained_model_name_or_path='+MODELT_NAME+' \
642
- --instance_data_dir='+INSTANCE_DIR+' \
643
- --output_dir='+OUTPUT_DIR+' \
644
- --instance_prompt='+PT+' \
645
- --seed='+str(Seed)+' \
646
- --resolution='+str(Resolution)+' \
647
- --mixed_precision='+str(precision)+' \
648
- --train_batch_size=1 \
649
- --gradient_accumulation_steps=1 '+GCUNET+' \
650
- --use_8bit_adam \
651
- --learning_rate='+str(UNet_Learning_Rate)+' \
652
- --lr_scheduler="polynomial" \
653
- --lr_warmup_steps=0 \
654
- --max_train_steps='+str(Training_Steps), shell=True)
655
-
656
- if Enable_text_encoder_training :
657
- print('Training the text encoder...')
658
- if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):
659
- call('rm -r '+OUTPUT_DIR+'/text_encoder_trained', shell=True)
660
- dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)
661
-
662
- if Enable_Text_Encoder_Concept_Training:
663
- if os.path.exists(CONCEPT_DIR):
664
- if os.listdir(CONCEPT_DIR)!=[]:
665
- clear_output()
666
- if resuming=="Yes":
667
- print('Resuming Training...')
668
- print('Training the text encoder on the concept...')
669
- dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)
670
- else:
671
- clear_output()
672
- if resuming=="Yes":
673
- print('Resuming Training...')
674
- print('No concept images found, skipping concept training...')
675
- Text_Encoder_Concept_Training_Steps=0
676
- time.sleep(8)
677
- else:
678
- clear_output()
679
- if resuming=="Yes":
680
- print('Resuming Training...')
681
- print('No concept images found, skipping concept training...')
682
- Text_Encoder_Concept_Training_Steps=0
683
- time.sleep(8)
684
-
685
- if UNet_Training_Steps!=0:
686
- train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
687
-
688
- if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and External_Captions :
689
- print('Nothing to do')
690
- else:
691
- if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
692
-
693
- call('python /notebooks/diffusers/scripts/convertosdv2.py --fp16 '+OUTPUT_DIR+' '+SESSION_DIR+'/'+Session_Name+'.ckpt', shell=True)
694
- clear_output()
695
- if os.path.exists(SESSION_DIR+"/"+INSTANCE_NAME+'.ckpt'):
696
- clear_output()
697
- print("DONE, the CKPT model is in the session's folder")
698
- else:
699
- print("Something went wrong")
700
-
701
- else:
702
- print("Something went wrong")
703
-
704
- return resume
705
-
706
-
707
- def test(Custom_Path, Previous_Session_Name, Session_Name, User, Password, Use_localtunnel):
708
-
709
-
710
- if Previous_Session_Name!="":
711
- print("Loading a previous session model")
712
- mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Previous_Session_Name
713
- path_to_trained_model=mdldir+"/"+Previous_Session_Name+'.ckpt'
714
-
715
-
716
- while not os.path.exists(path_to_trained_model):
717
- print("There is no trained model in the previous session")
718
- time.sleep(5)
719
-
720
- elif Custom_Path!="":
721
- print("Loading model from a custom path")
722
- path_to_trained_model=Custom_Path
723
-
724
-
725
- while not os.path.exists(path_to_trained_model):
726
- print("Wrong Path")
727
- time.sleep(5)
728
-
729
- else:
730
- print("Loading the trained model")
731
- mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Session_Name
732
- path_to_trained_model=mdldir+"/"+Session_Name+'.ckpt'
733
-
734
-
735
- while not os.path.exists(path_to_trained_model):
736
- print("There is no trained model in this session")
737
- time.sleep(5)
738
-
739
- auth=f"--gradio-auth {User}:{Password}"
740
- if User =="" or Password=="":
741
- auth=""
742
-
743
- os.chdir('/notebooks')
744
- if not os.path.exists('/notebooks/sd/stablediffusion'):
745
- call('wget -q -O sd_rep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_rep.tar.zst', shell=True)
746
- call('tar --zstd -xf sd_rep.tar.zst', shell=True)
747
- call('rm sd_rep.tar.zst', shell=True)
748
-
749
- os.chdir('/notebooks/sd')
750
- if not os.path.exists('stable-diffusion-webui'):
751
- call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
752
-
753
- os.chdir('/notebooks/sd/stable-diffusion-webui/')
754
- call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
755
- print('')
756
- call('git pull', shell=True, stdout=open('/dev/null', 'w'))
757
- os.chdir('/notebooks')
758
- clear_output()
759
-
760
- if not os.path.exists('/usr/lib/node_modules/localtunnel'):
761
- call('npm install -g localtunnel --silent', shell=True, stdout=open('/dev/null', 'w'))
762
-
763
- share=''
764
- call('wget -q -O /usr/local/lib/python3.9/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
765
-
766
- if not Use_localtunnel:
767
- share='--share'
768
-
769
- else:
770
- share=''
771
- os.chdir('/notebooks')
772
- call('nohup lt --port 7860 > srv.txt 2>&1 &', shell=True)
773
- time.sleep(2)
774
- call("grep -o 'https[^ ]*' /notebooks/srv.txt >srvr.txt", shell=True)
775
- time.sleep(2)
776
- srv= getoutput('cat /notebooks/srvr.txt')
777
-
778
- for line in fileinput.input('/usr/local/lib/python3.9/dist-packages/gradio/blocks.py', inplace=True):
779
- if line.strip().startswith('self.server_name ='):
780
- line = f' self.server_name = "{srv[8:]}"\n'
781
- if line.strip().startswith('self.server_port ='):
782
- line = ' self.server_port = 443\n'
783
- if line.strip().startswith('self.protocol = "https"'):
784
- line = ' self.protocol = "https"\n'
785
- if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
786
- line = ''
787
- if line.strip().startswith('else "http"'):
788
- line = ''
789
- sys.stdout.write(line)
790
-
791
- call('rm /notebooks/srv.txt', shell=True)
792
- call('rm /notebooks/srvr.txt', shell=True)
793
-
794
-
795
-
796
- os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
797
- call('wget -q -O paths.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/paths.py', shell=True)
798
- call("sed -i 's@/content/gdrive/MyDrive/sd/stablediffusion@/notebooks/sd/stablediffusion@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
799
- os.chdir('/notebooks/sd/stable-diffusion-webui')
800
- clear_output()
801
-
802
- configf="--disable-console-progressbars --no-half-vae --disable-safe-unpickle --api --xformers --medvram --skip-version-check --ckpt "+path_to_trained_model+" "+auth+" "+share
803
-
804
- return configf
805
-
806
-
807
-
808
- def clean():
809
-
810
- Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
811
-
812
- s = widgets.Select(
813
- options=Sessions,
814
- rows=5,
815
- description='',
816
- disabled=False
817
- )
818
-
819
- out=widgets.Output()
820
-
821
- d = widgets.Button(
822
- description='Remove',
823
- disabled=False,
824
- button_style='warning',
825
- tooltip='Removet the selected session',
826
- icon='warning'
827
- )
828
-
829
- def rem(d):
830
- with out:
831
- if s.value is not None:
832
- clear_output()
833
- print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
834
- call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
835
- if os.path.exists('/notebooks/models/'+s.value):
836
- call('rm -r /notebooks/models/'+s.value, shell=True)
837
- s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
838
-
839
-
840
- else:
841
- d.close()
842
- s.close()
843
- clear_output()
844
- print("NOTHING TO REMOVE")
845
-
846
- d.on_click(rem)
847
- if s.value is not None:
848
- display(s,d,out)
849
- else:
850
- print("NOTHING TO REMOVE")
851
-
852
-
853
-
854
- def hf(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, OUTPUT_DIR, Session_Name, MDLPTH):
855
-
856
- from slugify import slugify
857
- from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
858
- from huggingface_hub import create_repo
859
- from IPython.display import display_markdown
860
-
861
-
862
- if(Name_of_your_concept == ""):
863
- Name_of_your_concept = Session_Name
864
- Name_of_your_concept=Name_of_your_concept.replace(" ","-")
865
-
866
-
867
-
868
- if hf_token_write =="":
869
- print('Your Hugging Face write access token : ')
870
- hf_token_write=input()
871
-
872
- hf_token = hf_token_write
873
-
874
- api = HfApi()
875
- your_username = api.whoami(token=hf_token)["name"]
876
-
877
- if(Save_concept_to == "Public_Library"):
878
- repo_id = f"sd-dreambooth-library/{slugify(Name_of_your_concept)}"
879
- #Join the Concepts Library organization if you aren't part of it already
880
- call("curl -X POST -H 'Authorization: Bearer '"+hf_token+" -H 'Content-Type: application/json' https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX", shell=True)
881
- else:
882
- repo_id = f"{your_username}/{slugify(Name_of_your_concept)}"
883
- output_dir = f'/notebooks/models/'+INSTANCE_NAME
884
-
885
- def bar(prg):
886
- br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
887
- return br
888
-
889
- print("Loading...")
890
-
891
-
892
- os.chdir(OUTPUT_DIR)
893
- call('rm -r safety_checker feature_extractor .git', shell=True)
894
- call('rm model_index.json', shell=True)
895
- call('git init', shell=True)
896
- call('git lfs install --system --skip-repo', shell=True)
897
- call('git remote add -f origin "https://USER:'+hf_token+'@huggingface.co/runwayml/stable-diffusion-v1-5"', shell=True)
898
- call('git config core.sparsecheckout true', shell=True)
899
- call('echo -e "\nfeature_extractor\nsafety_checker\nmodel_index.json" > .git/info/sparse-checkout', shell=True)
900
- call('git pull origin main', shell=True)
901
- call('rm -r .git', shell=True)
902
- os.chdir('/notebooks')
903
-
904
-
905
- print(bar(1))
906
-
907
- readme_text = f'''---
908
- license: creativeml-openrail-m
909
- tags:
910
- - text-to-image
911
- - stable-diffusion
912
- ---
913
- ### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook
914
-
915
- Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb)
916
- Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb)
917
- '''
918
- #Save the readme to a file
919
- readme_file = open("README.md", "w")
920
- readme_file.write(readme_text)
921
- readme_file.close()
922
-
923
- operations = [
924
- CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
925
- CommitOperationAdd(path_in_repo=f"{Session_Name}.ckpt",path_or_fileobj=MDLPTH)
926
-
927
- ]
928
- create_repo(repo_id,private=True, token=hf_token)
929
-
930
- api.create_commit(
931
- repo_id=repo_id,
932
- operations=operations,
933
- commit_message=f"Upload the concept {Name_of_your_concept} embeds and token",
934
- token=hf_token
935
- )
936
-
937
- api.upload_folder(
938
- folder_path=OUTPUT_DIR+"/feature_extractor",
939
- path_in_repo="feature_extractor",
940
- repo_id=repo_id,
941
- token=hf_token
942
- )
943
-
944
- clear_output()
945
- print(bar(4))
946
-
947
- api.upload_folder(
948
- folder_path=OUTPUT_DIR+"/safety_checker",
949
- path_in_repo="safety_checker",
950
- repo_id=repo_id,
951
- token=hf_token
952
- )
953
-
954
- clear_output()
955
- print(bar(8))
956
-
957
- api.upload_folder(
958
- folder_path=OUTPUT_DIR+"/scheduler",
959
- path_in_repo="scheduler",
960
- repo_id=repo_id,
961
- token=hf_token
962
- )
963
-
964
- clear_output()
965
- print(bar(9))
966
-
967
- api.upload_folder(
968
- folder_path=OUTPUT_DIR+"/text_encoder",
969
- path_in_repo="text_encoder",
970
- repo_id=repo_id,
971
- token=hf_token
972
- )
973
-
974
- clear_output()
975
- print(bar(12))
976
-
977
- api.upload_folder(
978
- folder_path=OUTPUT_DIR+"/tokenizer",
979
- path_in_repo="tokenizer",
980
- repo_id=repo_id,
981
- token=hf_token
982
- )
983
-
984
- clear_output()
985
- print(bar(13))
986
-
987
- api.upload_folder(
988
- folder_path=OUTPUT_DIR+"/unet",
989
- path_in_repo="unet",
990
- repo_id=repo_id,
991
- token=hf_token
992
- )
993
-
994
- clear_output()
995
- print(bar(21))
996
-
997
- api.upload_folder(
998
- folder_path=OUTPUT_DIR+"/vae",
999
- path_in_repo="vae",
1000
- repo_id=repo_id,
1001
- token=hf_token
1002
- )
1003
-
1004
- clear_output()
1005
- print(bar(23))
1006
-
1007
- api.upload_file(
1008
- path_or_fileobj=OUTPUT_DIR+"/model_index.json",
1009
- path_in_repo="model_index.json",
1010
- repo_id=repo_id,
1011
- token=hf_token
1012
- )
1013
-
1014
- clear_output()
1015
- print(bar(25))
1016
-
1017
- print("Your concept was saved successfully at https://huggingface.co/"+repo_id)
1018
- done()
1019
-
1020
-
1021
-
1022
- def crop_image(im, size):
1023
-
1024
- GREEN = "#0F0"
1025
- BLUE = "#00F"
1026
- RED = "#F00"
1027
-
1028
- def focal_point(im, settings):
1029
- corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
1030
- entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
1031
- face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
1032
-
1033
- pois = []
1034
-
1035
- weight_pref_total = 0
1036
- if len(corner_points) > 0:
1037
- weight_pref_total += settings.corner_points_weight
1038
- if len(entropy_points) > 0:
1039
- weight_pref_total += settings.entropy_points_weight
1040
- if len(face_points) > 0:
1041
- weight_pref_total += settings.face_points_weight
1042
-
1043
- corner_centroid = None
1044
- if len(corner_points) > 0:
1045
- corner_centroid = centroid(corner_points)
1046
- corner_centroid.weight = settings.corner_points_weight / weight_pref_total
1047
- pois.append(corner_centroid)
1048
-
1049
- entropy_centroid = None
1050
- if len(entropy_points) > 0:
1051
- entropy_centroid = centroid(entropy_points)
1052
- entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
1053
- pois.append(entropy_centroid)
1054
-
1055
- face_centroid = None
1056
- if len(face_points) > 0:
1057
- face_centroid = centroid(face_points)
1058
- face_centroid.weight = settings.face_points_weight / weight_pref_total
1059
- pois.append(face_centroid)
1060
-
1061
- average_point = poi_average(pois, settings)
1062
-
1063
- return average_point
1064
-
1065
-
1066
- def image_face_points(im, settings):
1067
-
1068
- np_im = np.array(im)
1069
- gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
1070
-
1071
- tries = [
1072
- [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
1073
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
1074
- [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
1075
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
1076
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
1077
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
1078
- [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
1079
- [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
1080
- ]
1081
- for t in tries:
1082
- classifier = cv2.CascadeClassifier(t[0])
1083
- minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
1084
- try:
1085
- faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
1086
- minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
1087
- except:
1088
- continue
1089
-
1090
- if len(faces) > 0:
1091
- rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
1092
- return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
1093
- return []
1094
-
1095
-
1096
- def image_corner_points(im, settings):
1097
- grayscale = im.convert("L")
1098
-
1099
- # naive attempt at preventing focal points from collecting at watermarks near the bottom
1100
- gd = ImageDraw.Draw(grayscale)
1101
- gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
1102
-
1103
- np_im = np.array(grayscale)
1104
-
1105
- points = cv2.goodFeaturesToTrack(
1106
- np_im,
1107
- maxCorners=100,
1108
- qualityLevel=0.04,
1109
- minDistance=min(grayscale.width, grayscale.height)*0.06,
1110
- useHarrisDetector=False,
1111
- )
1112
-
1113
- if points is None:
1114
- return []
1115
-
1116
- focal_points = []
1117
- for point in points:
1118
- x, y = point.ravel()
1119
- focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
1120
-
1121
- return focal_points
1122
-
1123
-
1124
- def image_entropy_points(im, settings):
1125
- landscape = im.height < im.width
1126
- portrait = im.height > im.width
1127
- if landscape:
1128
- move_idx = [0, 2]
1129
- move_max = im.size[0]
1130
- elif portrait:
1131
- move_idx = [1, 3]
1132
- move_max = im.size[1]
1133
- else:
1134
- return []
1135
-
1136
- e_max = 0
1137
- crop_current = [0, 0, settings.crop_width, settings.crop_height]
1138
- crop_best = crop_current
1139
- while crop_current[move_idx[1]] < move_max:
1140
- crop = im.crop(tuple(crop_current))
1141
- e = image_entropy(crop)
1142
-
1143
- if (e > e_max):
1144
- e_max = e
1145
- crop_best = list(crop_current)
1146
-
1147
- crop_current[move_idx[0]] += 4
1148
- crop_current[move_idx[1]] += 4
1149
-
1150
- x_mid = int(crop_best[0] + settings.crop_width/2)
1151
- y_mid = int(crop_best[1] + settings.crop_height/2)
1152
-
1153
- return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
1154
-
1155
-
1156
- def image_entropy(im):
1157
- # greyscale image entropy
1158
- # band = np.asarray(im.convert("L"))
1159
- band = np.asarray(im.convert("1"), dtype=np.uint8)
1160
- hist, _ = np.histogram(band, bins=range(0, 256))
1161
- hist = hist[hist > 0]
1162
- return -np.log2(hist / hist.sum()).sum()
1163
-
1164
- def centroid(pois):
1165
- x = [poi.x for poi in pois]
1166
- y = [poi.y for poi in pois]
1167
- return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1168
-
1169
-
1170
- def poi_average(pois, settings):
1171
- weight = 0.0
1172
- x = 0.0
1173
- y = 0.0
1174
- for poi in pois:
1175
- weight += poi.weight
1176
- x += poi.x * poi.weight
1177
- y += poi.y * poi.weight
1178
- avg_x = round(weight and x / weight)
1179
- avg_y = round(weight and y / weight)
1180
-
1181
- return PointOfInterest(avg_x, avg_y)
1182
-
1183
-
1184
- def is_landscape(w, h):
1185
- return w > h
1186
-
1187
-
1188
- def is_portrait(w, h):
1189
- return h > w
1190
-
1191
-
1192
- def is_square(w, h):
1193
- return w == h
1194
-
1195
-
1196
- class PointOfInterest:
1197
- def __init__(self, x, y, weight=1.0, size=10):
1198
- self.x = x
1199
- self.y = y
1200
- self.weight = weight
1201
- self.size = size
1202
-
1203
- def bounding(self, size):
1204
- return [
1205
- self.x - size//2,
1206
- self.y - size//2,
1207
- self.x + size//2,
1208
- self.y + size//2
1209
- ]
1210
-
1211
- class Settings:
1212
- def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1213
- self.crop_width = crop_width
1214
- self.crop_height = crop_height
1215
- self.corner_points_weight = corner_points_weight
1216
- self.entropy_points_weight = entropy_points_weight
1217
- self.face_points_weight = face_points_weight
1218
-
1219
- settings = Settings(
1220
- crop_width = size,
1221
- crop_height = size,
1222
- face_points_weight = 0.9,
1223
- entropy_points_weight = 0.15,
1224
- corner_points_weight = 0.5,
1225
- )
1226
-
1227
- scale_by = 1
1228
- if is_landscape(im.width, im.height):
1229
- scale_by = settings.crop_height / im.height
1230
- elif is_portrait(im.width, im.height):
1231
- scale_by = settings.crop_width / im.width
1232
- elif is_square(im.width, im.height):
1233
- if is_square(settings.crop_width, settings.crop_height):
1234
- scale_by = settings.crop_width / im.width
1235
- elif is_landscape(settings.crop_width, settings.crop_height):
1236
- scale_by = settings.crop_width / im.width
1237
- elif is_portrait(settings.crop_width, settings.crop_height):
1238
- scale_by = settings.crop_height / im.height
1239
-
1240
- im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1241
- im_debug = im.copy()
1242
-
1243
- focus = focal_point(im_debug, settings)
1244
-
1245
- # take the focal point and turn it into crop coordinates that try to center over the focal
1246
- # point but then get adjusted back into the frame
1247
- y_half = int(settings.crop_height / 2)
1248
- x_half = int(settings.crop_width / 2)
1249
-
1250
- x1 = focus.x - x_half
1251
- if x1 < 0:
1252
- x1 = 0
1253
- elif x1 + settings.crop_width > im.width:
1254
- x1 = im.width - settings.crop_width
1255
-
1256
- y1 = focus.y - y_half
1257
- if y1 < 0:
1258
- y1 = 0
1259
- elif y1 + settings.crop_height > im.height:
1260
- y1 = im.height - settings.crop_height
1261
-
1262
- x2 = x1 + settings.crop_width
1263
- y2 = y1 + settings.crop_height
1264
-
1265
- crop = [x1, y1, x2, y2]
1266
-
1267
- results = []
1268
-
1269
- results.append(im.crop(tuple(crop)))
1270
-
1271
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Scripts/mainpaperspacev2.py DELETED
@@ -1,1279 +0,0 @@
1
- from IPython.display import clear_output
2
- from subprocess import call, getoutput
3
- from IPython.display import display
4
- import ipywidgets as widgets
5
- import io
6
- from PIL import Image, ImageDraw
7
- import fileinput
8
- import time
9
- import os
10
- from os import listdir
11
- from os.path import isfile
12
- from tqdm import tqdm
13
- import gdown
14
- import random
15
- import sys
16
- import cv2
17
- from io import BytesIO
18
- import requests
19
- from collections import defaultdict
20
- from math import log, sqrt
21
- import numpy as np
22
-
23
-
24
-
25
- def Deps(force_reinstall):
26
-
27
- if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
28
- os.chdir('/notebooks')
29
- if not os.path.exists('Latest_Notebooks'):
30
- call('mkdir Latest_Notebooks', shell=True)
31
- else:
32
- call('rm -r Latest_Notebooks', shell=True)
33
- call('mkdir Latest_Notebooks', shell=True)
34
- os.chdir('/notebooks/Latest_Notebooks')
35
- call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
36
- call('rm Notebooks.txt', shell=True)
37
- os.chdir('/notebooks')
38
- print('Modules and notebooks updated, dependencies already installed')
39
-
40
- else:
41
- print('Installing the dependencies...')
42
- call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
43
- if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
44
- os.chdir('/usr/local/lib/python3.9/dist-packages')
45
- call("rm -r torch torch-1.12.0+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
46
-
47
- os.chdir('/notebooks')
48
- if not os.path.exists('Latest_Notebooks'):
49
- call('mkdir Latest_Notebooks', shell=True)
50
- else:
51
- call('rm -r Latest_Notebooks', shell=True)
52
- call('mkdir Latest_Notebooks', shell=True)
53
- os.chdir('/notebooks/Latest_Notebooks')
54
- call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
55
- call('rm Notebooks.txt', shell=True)
56
- os.chdir('/notebooks')
57
- call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
58
- os.chdir('/notebooks')
59
- if not os.path.exists('/models'):
60
- call('mkdir /models', shell=True)
61
- if not os.path.exists('/notebooks/models'):
62
- call('ln -s /models /notebooks', shell=True)
63
- if os.path.exists('/deps'):
64
- call("rm -r /deps", shell=True)
65
- call('mkdir /deps', shell=True)
66
- if not os.path.exists('cache'):
67
- call('mkdir cache', shell=True)
68
- os.chdir('/deps')
69
- call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
70
- call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
71
- call('wget -q https://huggingface.co/TheLastBen/dependencies/resolve/main/pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
72
- call('tar -C / --zstd -xf pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
73
- call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
74
- os.chdir('/notebooks')
75
- call("git clone --depth 1 -q --branch updt https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'))
76
- if not os.path.exists('/notebooks/diffusers'):
77
- call('ln -s /diffusers /notebooks', shell=True)
78
- call("rm -r /deps", shell=True)
79
- os.chdir('/notebooks')
80
- clear_output()
81
-
82
- done()
83
-
84
-
85
-
86
-
87
- def downloadmodel_hfv2(Path_to_HuggingFace):
88
- import wget
89
-
90
- if os.path.exists('/models/stable-diffusion-custom'):
91
- call("rm -r /models/stable-diffusion-custom", shell=True)
92
- clear_output()
93
-
94
- if os.path.exists('/notebooks/Fast-Dreambooth/token.txt'):
95
- with open("/notebooks/Fast-Dreambooth/token.txt") as f:
96
- token = f.read()
97
- authe=f'https://USER:{token}@'
98
- else:
99
- authe="https://"
100
-
101
- clear_output()
102
- call("mkdir /models/stable-diffusion-custom", shell=True)
103
- os.chdir("/models/stable-diffusion-custom")
104
- call("git init", shell=True)
105
- call("git lfs install --system --skip-repo", shell=True)
106
- call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
107
- call("git config core.sparsecheckout true", shell=True)
108
- call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
109
- call("git pull origin main", shell=True)
110
- if os.path.exists('unet/diffusion_pytorch_model.bin'):
111
- call("rm -r .git", shell=True)
112
- os.chdir('/notebooks')
113
- clear_output()
114
- done()
115
- while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
116
- print('Check the link you provided')
117
- os.chdir('/notebooks')
118
- time.sleep(5)
119
-
120
-
121
-
122
-
123
- def downloadmodel_pthv2(CKPT_Path, Custom_Model_Version):
124
- import wget
125
- os.chdir('/models')
126
- clear_output()
127
- if os.path.exists(str(CKPT_Path)):
128
- if Custom_Model_Version=='512':
129
- wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py')
130
- clear_output()
131
- call('python convertodiff.py '+CKPT_Path+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
132
- elif Custom_Model_Version=='768':
133
- wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py')
134
- clear_output()
135
- call('python convertodiff.py '+CKPT_Path+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
136
- call('rm convertodiff.py', shell=True)
137
- if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
138
- os.chdir('/notebooks')
139
- clear_output()
140
- done()
141
- while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
142
- print('Conversion error')
143
- os.chdir('/notebooks')
144
- time.sleep(5)
145
-
146
- else:
147
- while not os.path.exists(str(CKPT_Path)):
148
- print('Wrong path, use the colab file explorer to copy the path')
149
- os.chdir('/notebooks')
150
- time.sleep(5)
151
-
152
-
153
-
154
-
155
- def downloadmodel_lnkv2(CKPT_Link, Custom_Model_Version):
156
- import wget
157
- os.chdir('/models')
158
- call("gdown --fuzzy " +CKPT_Link+ " -O model.ckpt", shell=True)
159
-
160
- if os.path.exists('model.ckpt'):
161
- if os.path.getsize("model.ckpt") > 1810671599:
162
- wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py')
163
- if Custom_Model_Version=='512':
164
- call('python convertodiffv2.py model.ckpt stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
165
- elif Custom_Model_Version=='768':
166
- call('python convertodiffv2.py model.ckpt stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
167
- call('rm convertodiffv2.py', shell=True)
168
- if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
169
- call('rm model.ckpt', shell=True)
170
- os.chdir('/notebooks')
171
- clear_output()
172
- done()
173
- else:
174
- while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
175
- print('Conversion error')
176
- os.chdir('/notebooks')
177
- time.sleep(5)
178
- else:
179
- while os.path.getsize('/models/model.ckpt') < 1810671599:
180
- print('Wrong link, check that the link is valid')
181
- os.chdir('/notebooks')
182
- time.sleep(5)
183
-
184
-
185
-
186
-
187
- def dlv2(Path_to_HuggingFace, CKPT_Path, CKPT_Link, Model_Version, Custom_Model_Version):
188
-
189
- if Path_to_HuggingFace != "":
190
- downloadmodel_hfv2(Path_to_HuggingFace)
191
- MODEL_NAMEv2="/models/stable-diffusion-custom"
192
- elif CKPT_Path !="":
193
- downloadmodel_pthv2(CKPT_Path, Custom_Model_Version)
194
- MODEL_NAMEv2="/models/stable-diffusion-custom"
195
- elif CKPT_Link !="":
196
- downloadmodel_lnkv2(CKPT_Link, Custom_Model_Version)
197
- MODEL_NAMEv2="/models/stable-diffusion-custom"
198
- else:
199
- if Model_Version=="512":
200
- MODEL_NAMEv2="dataset"
201
- print('Using the original V2-512 model')
202
- elif Model_Version=="768":
203
- MODEL_NAMEv2="/datasets/stable-diffusion-v2-1/stable-diffusion-2-1"
204
- print('Using the original V2-768 model')
205
- else:
206
- MODEL_NAMEv2=""
207
- print('Wrong model version')
208
-
209
- return MODEL_NAMEv2
210
-
211
-
212
- def sessv2(Session_Name, Session_Link_optional, Model_Version, MODEL_NAMEv2):
213
- import gdown
214
- os.chdir('/notebooks')
215
- PT=""
216
-
217
- while Session_Name=="":
218
- print('Input the Session Name:')
219
- Session_Name=input("")
220
- Session_Name=Session_Name.replace(" ","_")
221
-
222
- WORKSPACE='/notebooks/Fast-Dreambooth'
223
-
224
- if Session_Link_optional !="":
225
- print('Downloading session...')
226
-
227
- if Session_Link_optional != "":
228
- if not os.path.exists(str(WORKSPACE+'/Sessions')):
229
- call("mkdir -p " +WORKSPACE+ "/Sessions", shell=True)
230
- time.sleep(1)
231
- os.chdir(WORKSPACE+'/Sessions')
232
- gdown.download_folder(url=Session_Link_optional, output=Session_Name, quiet=True, remaining_ok=True, use_cookies=False)
233
- os.chdir(Session_Name)
234
- call("rm -r " +instance_images, shell=True)
235
- call("unzip " +instance_images.zip, shell=True, stdout=open('/dev/null', 'w'))
236
- call("rm -r " +concept_images, shell=True)
237
- call("unzip " +concept_images.zip, shell=True, stdout=open('/dev/null', 'w'))
238
- call("rm -r " +captions, shell=True)
239
- call("unzip " +captions.zip, shell=True, stdout=open('/dev/null', 'w'))
240
- os.chdir('/notebooks')
241
- clear_output()
242
-
243
- INSTANCE_NAME=Session_Name
244
- OUTPUT_DIR="/models/"+Session_Name
245
- SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
246
- CONCEPT_DIR=SESSION_DIR+"/concept_images"
247
- INSTANCE_DIR=SESSION_DIR+"/instance_images"
248
- CAPTIONS_DIR=SESSION_DIR+'/captions'
249
- MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.ckpt')
250
- resumev2=False
251
-
252
- if os.path.exists(str(SESSION_DIR)):
253
- mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(".")[-1]=="ckpt"]
254
- if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):
255
-
256
- def f(n):
257
- k=0
258
- for i in mdls:
259
- if k==n:
260
- call('mv '+SESSION_DIR+'/'+i+' '+MDLPTH, shell=True)
261
- k=k+1
262
-
263
- k=0
264
- print('No final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\n')
265
-
266
- for i in mdls:
267
- print(str(k)+'- '+i)
268
- k=k+1
269
- n=input()
270
- while int(n)>k-1:
271
- n=input()
272
- if n!="000":
273
- f(int(n))
274
- print('Using the model '+ mdls[int(n)]+" ...")
275
- time.sleep(8)
276
- else:
277
- print('Skipping the intermediary checkpoints.')
278
-
279
-
280
- if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
281
- print('Loading session with no previous model, using the original model or the custom downloaded model')
282
- if MODEL_NAMEv2=="":
283
- print('No model found, use the "Model Download" cell to download a model.')
284
- else:
285
- print('Session Loaded, proceed to uploading instance images')
286
-
287
- elif os.path.exists(MDLPTH):
288
- print('Session found, loading the trained model ...')
289
- if Model_Version=='512':
290
- call("wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py", shell=True)
291
- clear_output()
292
- print('Session found, loading the trained model ...')
293
- call('python /notebooks/convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
294
-
295
- elif Model_Version=='768':
296
- call('wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py', shell=True)
297
- clear_output()
298
- print('Session found, loading the trained model ...')
299
- call('python /notebooks/convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
300
-
301
- call('rm /notebooks/convertodiff.py', shell=True)
302
-
303
- if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
304
- resumev2=True
305
- clear_output()
306
- print('Session loaded.')
307
- else:
308
- if not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
309
- print('Conversion error, if the error persists, remove the CKPT file from the current session folder')
310
-
311
- elif not os.path.exists(str(SESSION_DIR)):
312
- call('mkdir -p '+INSTANCE_DIR, shell=True)
313
- print('Creating session...')
314
- if MODEL_NAMEv2=="":
315
- print('No model found, use the "Model Download" cell to download a model.')
316
- else:
317
- print('Session created, proceed to uploading instance images')
318
-
319
- return PT, WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, CONCEPT_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAMEv2, resumev2
320
-
321
-
322
-
323
- def done():
324
- done = widgets.Button(
325
- description='Done!',
326
- disabled=True,
327
- button_style='success',
328
- tooltip='',
329
- icon='check'
330
- )
331
- display(done)
332
-
333
-
334
-
335
-
336
- def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, ren):
337
-
338
- uploader = widgets.FileUpload(description="Choose images",accept='image/*', multiple=True)
339
- Upload = widgets.Button(
340
- description='Upload',
341
- disabled=False,
342
- button_style='info',
343
- tooltip='Click to upload the chosen instance images',
344
- icon=''
345
- )
346
-
347
-
348
- def up(Upload):
349
- with out:
350
- uploader.close()
351
- Upload.close()
352
- upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
353
- done()
354
- out=widgets.Output()
355
-
356
- if IMAGES_FOLDER_OPTIONAL=="":
357
- Upload.on_click(up)
358
- display(uploader, Upload, out)
359
- else:
360
- upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
361
- done()
362
-
363
-
364
-
365
-
366
- def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
367
-
368
-
369
- if os.path.exists(CAPTIONS_DIR+"off"):
370
- call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
371
- time.sleep(2)
372
-
373
- if Remove_existing_instance_images:
374
- if os.path.exists(str(INSTANCE_DIR)):
375
- call("rm -r " +INSTANCE_DIR, shell=True)
376
- if os.path.exists(str(CAPTIONS_DIR)):
377
- call("rm -r " +CAPTIONS_DIR, shell=True)
378
-
379
-
380
- if not os.path.exists(str(INSTANCE_DIR)):
381
- call("mkdir -p " +INSTANCE_DIR, shell=True)
382
- if not os.path.exists(str(CAPTIONS_DIR)):
383
- call("mkdir -p " +CAPTIONS_DIR, shell=True)
384
-
385
-
386
- if IMAGES_FOLDER_OPTIONAL !="":
387
- if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
388
- call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
389
- if Crop_images:
390
- os.chdir(str(IMAGES_FOLDER_OPTIONAL))
391
- call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
392
- os.chdir('/notebooks')
393
- for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
394
- extension = filename.split(".")[-1]
395
- identifier=filename.split(".")[0]
396
- new_path_with_file = os.path.join(INSTANCE_DIR, filename)
397
- file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
398
- width, height = file.size
399
- image = file
400
- if file.size !=(Crop_size, Crop_size):
401
- image=crop_image(file, Crop_size)
402
- if (extension.upper() == "JPG" or "jpg"):
403
- image[0].save(new_path_with_file, format="JPEG", quality = 100)
404
- else:
405
- image[0].save(new_path_with_file, format=extension.upper())
406
-
407
- else:
408
- call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
409
-
410
- else:
411
- for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
412
- call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
413
-
414
-
415
-
416
- elif IMAGES_FOLDER_OPTIONAL =="":
417
- up=""
418
- for filename, file in uploader.value.items():
419
- if filename.split(".")[-1]=="txt":
420
- with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
421
- f.write(file['content'].decode())
422
- up=[(filename, file) for filename, file in uploader.value.items() if filename.split(".")[-1]!="txt"]
423
- if Crop_images:
424
- for filename, file_info in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
425
- img = Image.open(io.BytesIO(file_info['content']))
426
- extension = filename.split(".")[-1]
427
- identifier=filename.split(".")[0]
428
-
429
- if (extension.upper() == "JPG" or "jpg"):
430
- img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
431
- else:
432
- img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
433
-
434
- new_path_with_file = os.path.join(INSTANCE_DIR, filename)
435
- file = Image.open(new_path_with_file)
436
- width, height = file.size
437
- image = img
438
- if file.size !=(Crop_size, Crop_size):
439
- image=crop_image(file, Crop_size)
440
- if (extension.upper() == "JPG" or "jpg"):
441
- image[0].save(new_path_with_file, format="JPEG", quality = 100)
442
- else:
443
- image[0].save(new_path_with_file, format=extension.upper())
444
-
445
- else:
446
- for filename, file_info in tqdm(uploader.value.items(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
447
- img = Image.open(io.BytesIO(file_info['content']))
448
-
449
- extension = filename.split(".")[-1]
450
- identifier=filename.split(".")[0]
451
-
452
- if (extension.upper() == "JPG" or "jpg"):
453
- img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
454
- else:
455
- img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
456
-
457
-
458
- if ren:
459
- i=0
460
- for filename in tqdm(os.listdir(INSTANCE_DIR), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Renamed'):
461
- extension = filename.split(".")[-1]
462
- identifier=filename.split(".")[0]
463
- new_path_with_file = os.path.join(INSTANCE_DIR, "conceptimagedb"+str(i)+"."+extension)
464
- call('mv "'+os.path.join(INSTANCE_DIR,filename)+'" "'+new_path_with_file+'"', shell=True)
465
- i=i+1
466
-
467
- os.chdir(INSTANCE_DIR)
468
- call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
469
- os.chdir(CAPTIONS_DIR)
470
- call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
471
- os.chdir('/notebooks')
472
-
473
-
474
- def caption(CAPTIONS_DIR, INSTANCE_DIR):
475
-
476
- if os.path.exists(CAPTIONS_DIR+"off"):
477
- call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
478
- time.sleep(2)
479
-
480
- paths=""
481
- out=""
482
- widgets_l=""
483
- clear_output()
484
- def Caption(path):
485
- if path!="Select an instance image to caption":
486
-
487
- name = os.path.splitext(os.path.basename(path))[0]
488
- ext=os.path.splitext(os.path.basename(path))[-1][1:]
489
- if ext=="jpg" or "JPG":
490
- ext="JPEG"
491
-
492
- if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
493
- with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
494
- text = f.read()
495
- else:
496
- with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
497
- f.write("")
498
- with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
499
- text = f.read()
500
-
501
- img=Image.open(os.path.join(INSTANCE_DIR,path))
502
- img=img.resize((420, 420))
503
- image_bytes = BytesIO()
504
- img.save(image_bytes, format=ext, qualiy=10)
505
- image_bytes.seek(0)
506
- image_data = image_bytes.read()
507
- img= image_data
508
- image = widgets.Image(
509
- value=img,
510
- width=420,
511
- height=420
512
- )
513
- text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
514
-
515
-
516
- def update_text(text):
517
- with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
518
- f.write(text)
519
-
520
- button = widgets.Button(description='Save', button_style='success')
521
- button.on_click(lambda b: update_text(text_area.value))
522
-
523
- return widgets.VBox([widgets.HBox([image, text_area, button])])
524
-
525
-
526
- paths = os.listdir(INSTANCE_DIR)
527
- widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
528
-
529
-
530
- out = widgets.Output()
531
-
532
- def click(change):
533
- with out:
534
- out.clear_output()
535
- display(Caption(change.new))
536
-
537
- widgets_l.observe(click, names='value')
538
- display(widgets.HBox([widgets_l, out]))
539
-
540
-
541
-
542
-
543
- def dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Style_Training, Resolution, MODEL_NAMEv2, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resumev2, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every):
544
-
545
- if resumev2 and not Resume_Training:
546
- print('Overwrite your previously trained model ?, answering "yes" will train a new model, answering "no" will resumev2 the training of the previous model?  yes or no ?')
547
- while True:
548
- ansres=input('')
549
- if ansres=='no':
550
- Resume_Training = True
551
- break
552
- elif ansres=='yes':
553
- Resume_Training = False
554
- resumev2= False
555
- break
556
-
557
- while not Resume_Training and not os.path.exists(MODEL_NAMEv2+'/unet/diffusion_pytorch_model.bin'):
558
- print('No model found, use the "Model Download" cell to download a model.')
559
- time.sleep(5)
560
-
561
- if os.path.exists(CAPTIONS_DIR+"off"):
562
- call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
563
- time.sleep(2)
564
-
565
- MODELT_NAME=MODEL_NAMEv2
566
-
567
- Seed=random.randint(1, 999999)
568
-
569
- Style=""
570
- if Style_Training:
571
- Style="--Style"
572
-
573
- extrnlcptn=""
574
- if External_Captions:
575
- extrnlcptn="--external_captions"
576
-
577
- precision="fp16"
578
-
579
- GCUNET="--gradient_checkpointing"
580
- if Resolution<=640:
581
- GCUNET=""
582
-
583
- resuming=""
584
- if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
585
- MODELT_NAME=OUTPUT_DIR
586
- print('Resuming Training...')
587
- resuming="Yes"
588
- elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
589
- print('Previous model not found, training a new model...')
590
- MODELT_NAME=MODEL_NAMEv2
591
- while MODEL_NAMEv2=="":
592
- print('No model found, use the "Model Download" cell to download a model.')
593
- time.sleep(5)
594
-
595
-
596
- trnonltxt=""
597
- if UNet_Training_Steps==0:
598
- trnonltxt="--train_only_text_encoder"
599
-
600
- Enable_text_encoder_training= True
601
- Enable_Text_Encoder_Concept_Training= True
602
-
603
-
604
- if Text_Encoder_Training_Steps==0 or External_Captions:
605
- Enable_text_encoder_training= False
606
- else:
607
- stptxt=Text_Encoder_Training_Steps
608
-
609
- if Text_Encoder_Concept_Training_Steps==0:
610
- Enable_Text_Encoder_Concept_Training= False
611
- else:
612
- stptxtc=Text_Encoder_Concept_Training_Steps
613
-
614
-
615
- if Save_Checkpoint_Every==None:
616
- Save_Checkpoint_Every=1
617
- stp=0
618
- if Start_saving_from_the_step==None:
619
- Start_saving_from_the_step=0
620
- if (Start_saving_from_the_step < 200):
621
- Start_saving_from_the_step=Save_Checkpoint_Every
622
- stpsv=Start_saving_from_the_step
623
- if Save_Checkpoint_Every_n_Steps:
624
- stp=Save_Checkpoint_Every
625
-
626
-
627
- def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
628
- call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
629
- '+trnonltxt+' \
630
- --train_text_encoder \
631
- --image_captions_filename \
632
- --dump_only_text_encoder \
633
- --pretrained_model_name_or_path='+MODELT_NAME+' \
634
- --instance_data_dir='+INSTANCE_DIR+' \
635
- --output_dir='+OUTPUT_DIR+' \
636
- --instance_prompt='+PT+' \
637
- --seed='+str(Seed)+' \
638
- --resolution=512 \
639
- --mixed_precision='+str(precision)+' \
640
- --train_batch_size=1 \
641
- --gradient_accumulation_steps=1 --gradient_checkpointing \
642
- --use_8bit_adam \
643
- --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
644
- --lr_scheduler="polynomial" \
645
- --lr_warmup_steps=0 \
646
- --max_train_steps='+str(Training_Steps), shell=True)
647
-
648
- def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps):
649
- clear_output()
650
- if resuming=="Yes":
651
- print('Resuming Training...')
652
- print('Training the UNet...')
653
- call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
654
- '+Style+' \
655
- '+extrnlcptn+' \
656
- --stop_text_encoder_training='+str(Text_Encoder_Training_Steps)+' \
657
- --image_captions_filename \
658
- --train_only_unet \
659
- --Session_dir='+SESSION_DIR+' \
660
- --save_starting_step='+str(stpsv)+' \
661
- --save_n_steps='+str(stp)+' \
662
- --pretrained_model_name_or_path='+MODELT_NAME+' \
663
- --instance_data_dir='+INSTANCE_DIR+' \
664
- --output_dir='+OUTPUT_DIR+' \
665
- --instance_prompt='+PT+' \
666
- --seed='+str(Seed)+' \
667
- --resolution='+str(Resolution)+' \
668
- --mixed_precision='+str(precision)+' \
669
- --train_batch_size=1 \
670
- --gradient_accumulation_steps=1 '+GCUNET+' \
671
- --use_8bit_adam \
672
- --learning_rate='+str(UNet_Learning_Rate)+' \
673
- --lr_scheduler="polynomial" \
674
- --lr_warmup_steps=0 \
675
- --max_train_steps='+str(Training_Steps), shell=True)
676
-
677
- if Enable_text_encoder_training :
678
- print('Training the text encoder...')
679
- if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):
680
- call('rm -r '+OUTPUT_DIR+'/text_encoder_trained', shell=True)
681
- dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)
682
-
683
- if Enable_Text_Encoder_Concept_Training:
684
- if os.path.exists(CONCEPT_DIR):
685
- if os.listdir(CONCEPT_DIR)!=[]:
686
- clear_output()
687
- if resuming=="Yes":
688
- print('Resuming Training...')
689
- print('Training the text encoder on the concept...')
690
- dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)
691
- else:
692
- clear_output()
693
- if resuming=="Yes":
694
- print('Resuming Training...')
695
- print('No concept images found, skipping concept training...')
696
- Text_Encoder_Concept_Training_Steps=0
697
- time.sleep(8)
698
- else:
699
- clear_output()
700
- if resuming=="Yes":
701
- print('Resuming Training...')
702
- print('No concept images found, skipping concept training...')
703
- Text_Encoder_Concept_Training_Steps=0
704
- time.sleep(8)
705
-
706
- if UNet_Training_Steps!=0:
707
- train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
708
-
709
- if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and External_Captions :
710
- print('Nothing to do')
711
- else:
712
- if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
713
-
714
- call('python /notebooks/diffusers/scripts/convertosdv2.py --fp16 '+OUTPUT_DIR+' '+SESSION_DIR+'/'+Session_Name+'.ckpt', shell=True)
715
- clear_output()
716
- if os.path.exists(SESSION_DIR+"/"+INSTANCE_NAME+'.ckpt'):
717
- clear_output()
718
- print("DONE, the CKPT model is in the session's folder")
719
- else:
720
- print("Something went wrong")
721
-
722
- else:
723
- print("Something went wrong")
724
-
725
- return resumev2
726
-
727
-
728
- def test(Custom_Path, Previous_Session_Name, Session_Name, User, Password, Use_localtunnel):
729
-
730
-
731
- if Previous_Session_Name!="":
732
- print("Loading a previous session model")
733
- mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Previous_Session_Name
734
- path_to_trained_model=mdldir+"/"+Previous_Session_Name+'.ckpt'
735
-
736
-
737
- while not os.path.exists(path_to_trained_model):
738
- print("There is no trained model in the previous session")
739
- time.sleep(5)
740
-
741
- elif Custom_Path!="":
742
- print("Loading model from a custom path")
743
- path_to_trained_model=Custom_Path
744
-
745
-
746
- while not os.path.exists(path_to_trained_model):
747
- print("Wrong Path")
748
- time.sleep(5)
749
-
750
- else:
751
- print("Loading the trained model")
752
- mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Session_Name
753
- path_to_trained_model=mdldir+"/"+Session_Name+'.ckpt'
754
-
755
-
756
- while not os.path.exists(path_to_trained_model):
757
- print("There is no trained model in this session")
758
- time.sleep(5)
759
-
760
- auth=f"--gradio-auth {User}:{Password}"
761
- if User =="" or Password=="":
762
- auth=""
763
-
764
- os.chdir('/notebooks')
765
- if not os.path.exists('/notebooks/sd/stablediffusion'):
766
- call('wget -q -O sd_rep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_rep.tar.zst', shell=True)
767
- call('tar --zstd -xf sd_rep.tar.zst', shell=True)
768
- call('rm sd_rep.tar.zst', shell=True)
769
-
770
- os.chdir('/notebooks/sd')
771
- if not os.path.exists('stable-diffusion-webui'):
772
- call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
773
-
774
- os.chdir('/notebooks/sd/stable-diffusion-webui/')
775
- call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
776
- print('')
777
- call('git pull', shell=True, stdout=open('/dev/null', 'w'))
778
- os.chdir('/notebooks')
779
- clear_output()
780
-
781
- if not os.path.exists('/usr/lib/node_modules/localtunnel'):
782
- call('npm install -g localtunnel --silent', shell=True, stdout=open('/dev/null', 'w'))
783
-
784
- share=''
785
- call('wget -q -O /usr/local/lib/python3.9/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
786
-
787
- if not Use_localtunnel:
788
- share='--share'
789
-
790
- else:
791
- share=''
792
- os.chdir('/notebooks')
793
- call('nohup lt --port 7860 > srv.txt 2>&1 &', shell=True)
794
- time.sleep(2)
795
- call("grep -o 'https[^ ]*' /notebooks/srv.txt >srvr.txt", shell=True)
796
- time.sleep(2)
797
- srv= getoutput('cat /notebooks/srvr.txt')
798
-
799
- for line in fileinput.input('/usr/local/lib/python3.9/dist-packages/gradio/blocks.py', inplace=True):
800
- if line.strip().startswith('self.server_name ='):
801
- line = f' self.server_name = "{srv[8:]}"\n'
802
- if line.strip().startswith('self.server_port ='):
803
- line = ' self.server_port = 443\n'
804
- if line.strip().startswith('self.protocol = "https"'):
805
- line = ' self.protocol = "https"\n'
806
- if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
807
- line = ''
808
- if line.strip().startswith('else "http"'):
809
- line = ''
810
- sys.stdout.write(line)
811
-
812
- call('rm /notebooks/srv.txt', shell=True)
813
- call('rm /notebooks/srvr.txt', shell=True)
814
-
815
-
816
-
817
- os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
818
- call('wget -q -O paths.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/paths.py', shell=True)
819
- call("sed -i 's@/content/gdrive/MyDrive/sd/stablediffusion@/notebooks/sd/stablediffusion@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
820
- os.chdir('/notebooks/sd/stable-diffusion-webui')
821
- clear_output()
822
-
823
- configf="--disable-console-progressbars --no-half-vae --disable-safe-unpickle --api --xformers --medvram --skip-version-check --ckpt "+path_to_trained_model+" "+auth+" "+share
824
-
825
- return configf
826
-
827
-
828
-
829
- def clean():
830
-
831
- Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
832
-
833
- s = widgets.Select(
834
- options=Sessions,
835
- rows=5,
836
- description='',
837
- disabled=False
838
- )
839
-
840
- out=widgets.Output()
841
-
842
- d = widgets.Button(
843
- description='Remove',
844
- disabled=False,
845
- button_style='warning',
846
- tooltip='Removet the selected session',
847
- icon='warning'
848
- )
849
-
850
- def rem(d):
851
- with out:
852
- if s.value is not None:
853
- clear_output()
854
- print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
855
- call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
856
- if os.path.exists('/notebooks/models/'+s.value):
857
- call('rm -r /notebooks/models/'+s.value, shell=True)
858
- s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
859
-
860
-
861
- else:
862
- d.close()
863
- s.close()
864
- clear_output()
865
- print("NOTHING TO REMOVE")
866
-
867
- d.on_click(rem)
868
- if s.value is not None:
869
- display(s,d,out)
870
- else:
871
- print("NOTHING TO REMOVE")
872
-
873
-
874
-
875
- def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, OUTPUT_DIR, Session_Name, MDLPTH):
876
-
877
- from slugify import slugify
878
- from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
879
- from huggingface_hub import create_repo
880
- from IPython.display import display_markdown
881
-
882
- if(Name_of_your_concept == ""):
883
- Name_of_your_concept = Session_Name
884
- Name_of_your_concept=Name_of_your_concept.replace(" ","-")
885
-
886
-
887
-
888
- if hf_token_write =="":
889
- print('Your Hugging Face write access token : ')
890
- hf_token_write=input()
891
-
892
- hf_token = hf_token_write
893
-
894
- api = HfApi()
895
- your_username = api.whoami(token=hf_token)["name"]
896
-
897
- if(Save_concept_to == "Public_Library"):
898
- repo_id = f"sd-dreambooth-library/{slugify(Name_of_your_concept)}"
899
- #Join the Concepts Library organization if you aren't part of it already
900
- call("curl -X POST -H 'Authorization: Bearer '"+hf_token+" -H 'Content-Type: application/json' https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX", shell=True)
901
- else:
902
- repo_id = f"{your_username}/{slugify(Name_of_your_concept)}"
903
- output_dir = f'/notebooks/models/'+INSTANCE_NAME
904
-
905
- def bar(prg):
906
- br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
907
- return br
908
-
909
- print("Loading...")
910
-
911
- os.chdir(OUTPUT_DIR)
912
- call('rm -r feature_extractor .git', shell=True)
913
- clear_output()
914
- call('git init', shell=True)
915
- call('git lfs install --system --skip-repo', shell=True)
916
- call('git remote add -f origin "https://USER:'+hf_token+'@huggingface.co/stabilityai/stable-diffusion-2-1"', shell=True)
917
- call('git config core.sparsecheckout true', shell=True)
918
- call('echo -e "\nfeature_extractor" > .git/info/sparse-checkout', shell=True)
919
- call('git pull origin main', shell=True)
920
- call('rm -r .git', shell=True)
921
- os.chdir('/notebooks')
922
- clear_output()
923
-
924
- print(bar(1))
925
-
926
- readme_text = f'''---
927
- license: creativeml-openrail-m
928
- tags:
929
- - text-to-image
930
- - stable-diffusion
931
- ---
932
- ### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook
933
-
934
- Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb)
935
- '''
936
- #Save the readme to a file
937
- readme_file = open("README.md", "w")
938
- readme_file.write(readme_text)
939
- readme_file.close()
940
-
941
- operations = [
942
- CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
943
- CommitOperationAdd(path_in_repo=f"{Session_Name}.ckpt",path_or_fileobj=MDLPTH)
944
-
945
- ]
946
- create_repo(repo_id,private=True, token=hf_token)
947
-
948
- api.create_commit(
949
- repo_id=repo_id,
950
- operations=operations,
951
- commit_message=f"Upload the concept {Name_of_your_concept} embeds and token",
952
- token=hf_token
953
- )
954
-
955
- api.upload_folder(
956
- folder_path=OUTPUT_DIR+"/feature_extractor",
957
- path_in_repo="feature_extractor",
958
- repo_id=repo_id,
959
- token=hf_token
960
- )
961
-
962
- clear_output()
963
- print(bar(8))
964
-
965
- api.upload_folder(
966
- folder_path=OUTPUT_DIR+"/scheduler",
967
- path_in_repo="scheduler",
968
- repo_id=repo_id,
969
- token=hf_token
970
- )
971
-
972
- clear_output()
973
- print(bar(9))
974
-
975
- api.upload_folder(
976
- folder_path=OUTPUT_DIR+"/text_encoder",
977
- path_in_repo="text_encoder",
978
- repo_id=repo_id,
979
- token=hf_token
980
- )
981
-
982
- clear_output()
983
- print(bar(12))
984
-
985
- api.upload_folder(
986
- folder_path=OUTPUT_DIR+"/tokenizer",
987
- path_in_repo="tokenizer",
988
- repo_id=repo_id,
989
- token=hf_token
990
- )
991
-
992
- clear_output()
993
- print(bar(13))
994
-
995
- api.upload_folder(
996
- folder_path=OUTPUT_DIR+"/unet",
997
- path_in_repo="unet",
998
- repo_id=repo_id,
999
- token=hf_token
1000
- )
1001
-
1002
- clear_output()
1003
- print(bar(21))
1004
-
1005
- api.upload_folder(
1006
- folder_path=OUTPUT_DIR+"/vae",
1007
- path_in_repo="vae",
1008
- repo_id=repo_id,
1009
- token=hf_token
1010
- )
1011
-
1012
- clear_output()
1013
- print(bar(23))
1014
-
1015
- api.upload_file(
1016
- path_or_fileobj=OUTPUT_DIR+"/model_index.json",
1017
- path_in_repo="model_index.json",
1018
- repo_id=repo_id,
1019
- token=hf_token
1020
- )
1021
-
1022
- clear_output()
1023
- print(bar(25))
1024
-
1025
- print("Your concept was saved successfully at https://huggingface.co/"+repo_id)
1026
- done()
1027
-
1028
-
1029
-
1030
- def crop_image(im, size):
1031
-
1032
- GREEN = "#0F0"
1033
- BLUE = "#00F"
1034
- RED = "#F00"
1035
-
1036
- def focal_point(im, settings):
1037
- corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
1038
- entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
1039
- face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
1040
-
1041
- pois = []
1042
-
1043
- weight_pref_total = 0
1044
- if len(corner_points) > 0:
1045
- weight_pref_total += settings.corner_points_weight
1046
- if len(entropy_points) > 0:
1047
- weight_pref_total += settings.entropy_points_weight
1048
- if len(face_points) > 0:
1049
- weight_pref_total += settings.face_points_weight
1050
-
1051
- corner_centroid = None
1052
- if len(corner_points) > 0:
1053
- corner_centroid = centroid(corner_points)
1054
- corner_centroid.weight = settings.corner_points_weight / weight_pref_total
1055
- pois.append(corner_centroid)
1056
-
1057
- entropy_centroid = None
1058
- if len(entropy_points) > 0:
1059
- entropy_centroid = centroid(entropy_points)
1060
- entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
1061
- pois.append(entropy_centroid)
1062
-
1063
- face_centroid = None
1064
- if len(face_points) > 0:
1065
- face_centroid = centroid(face_points)
1066
- face_centroid.weight = settings.face_points_weight / weight_pref_total
1067
- pois.append(face_centroid)
1068
-
1069
- average_point = poi_average(pois, settings)
1070
-
1071
- return average_point
1072
-
1073
-
1074
- def image_face_points(im, settings):
1075
-
1076
- np_im = np.array(im)
1077
- gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
1078
-
1079
- tries = [
1080
- [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
1081
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
1082
- [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
1083
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
1084
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
1085
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
1086
- [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
1087
- [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
1088
- ]
1089
- for t in tries:
1090
- classifier = cv2.CascadeClassifier(t[0])
1091
- minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
1092
- try:
1093
- faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
1094
- minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
1095
- except:
1096
- continue
1097
-
1098
- if len(faces) > 0:
1099
- rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
1100
- return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
1101
- return []
1102
-
1103
-
1104
- def image_corner_points(im, settings):
1105
- grayscale = im.convert("L")
1106
-
1107
-
1108
- gd = ImageDraw.Draw(grayscale)
1109
- gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
1110
-
1111
- np_im = np.array(grayscale)
1112
-
1113
- points = cv2.goodFeaturesToTrack(
1114
- np_im,
1115
- maxCorners=100,
1116
- qualityLevel=0.04,
1117
- minDistance=min(grayscale.width, grayscale.height)*0.06,
1118
- useHarrisDetector=False,
1119
- )
1120
-
1121
- if points is None:
1122
- return []
1123
-
1124
- focal_points = []
1125
- for point in points:
1126
- x, y = point.ravel()
1127
- focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
1128
-
1129
- return focal_points
1130
-
1131
-
1132
- def image_entropy_points(im, settings):
1133
- landscape = im.height < im.width
1134
- portrait = im.height > im.width
1135
- if landscape:
1136
- move_idx = [0, 2]
1137
- move_max = im.size[0]
1138
- elif portrait:
1139
- move_idx = [1, 3]
1140
- move_max = im.size[1]
1141
- else:
1142
- return []
1143
-
1144
- e_max = 0
1145
- crop_current = [0, 0, settings.crop_width, settings.crop_height]
1146
- crop_best = crop_current
1147
- while crop_current[move_idx[1]] < move_max:
1148
- crop = im.crop(tuple(crop_current))
1149
- e = image_entropy(crop)
1150
-
1151
- if (e > e_max):
1152
- e_max = e
1153
- crop_best = list(crop_current)
1154
-
1155
- crop_current[move_idx[0]] += 4
1156
- crop_current[move_idx[1]] += 4
1157
-
1158
- x_mid = int(crop_best[0] + settings.crop_width/2)
1159
- y_mid = int(crop_best[1] + settings.crop_height/2)
1160
-
1161
- return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
1162
-
1163
-
1164
- def image_entropy(im):
1165
- # greyscale image entropy
1166
- # band = np.asarray(im.convert("L"))
1167
- band = np.asarray(im.convert("1"), dtype=np.uint8)
1168
- hist, _ = np.histogram(band, bins=range(0, 256))
1169
- hist = hist[hist > 0]
1170
- return -np.log2(hist / hist.sum()).sum()
1171
-
1172
- def centroid(pois):
1173
- x = [poi.x for poi in pois]
1174
- y = [poi.y for poi in pois]
1175
- return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1176
-
1177
-
1178
- def poi_average(pois, settings):
1179
- weight = 0.0
1180
- x = 0.0
1181
- y = 0.0
1182
- for poi in pois:
1183
- weight += poi.weight
1184
- x += poi.x * poi.weight
1185
- y += poi.y * poi.weight
1186
- avg_x = round(weight and x / weight)
1187
- avg_y = round(weight and y / weight)
1188
-
1189
- return PointOfInterest(avg_x, avg_y)
1190
-
1191
-
1192
- def is_landscape(w, h):
1193
- return w > h
1194
-
1195
-
1196
- def is_portrait(w, h):
1197
- return h > w
1198
-
1199
-
1200
- def is_square(w, h):
1201
- return w == h
1202
-
1203
-
1204
- class PointOfInterest:
1205
- def __init__(self, x, y, weight=1.0, size=10):
1206
- self.x = x
1207
- self.y = y
1208
- self.weight = weight
1209
- self.size = size
1210
-
1211
- def bounding(self, size):
1212
- return [
1213
- self.x - size//2,
1214
- self.y - size//2,
1215
- self.x + size//2,
1216
- self.y + size//2
1217
- ]
1218
-
1219
- class Settings:
1220
- def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1221
- self.crop_width = crop_width
1222
- self.crop_height = crop_height
1223
- self.corner_points_weight = corner_points_weight
1224
- self.entropy_points_weight = entropy_points_weight
1225
- self.face_points_weight = face_points_weight
1226
-
1227
- settings = Settings(
1228
- crop_width = size,
1229
- crop_height = size,
1230
- face_points_weight = 0.9,
1231
- entropy_points_weight = 0.15,
1232
- corner_points_weight = 0.5,
1233
- )
1234
-
1235
- scale_by = 1
1236
- if is_landscape(im.width, im.height):
1237
- scale_by = settings.crop_height / im.height
1238
- elif is_portrait(im.width, im.height):
1239
- scale_by = settings.crop_width / im.width
1240
- elif is_square(im.width, im.height):
1241
- if is_square(settings.crop_width, settings.crop_height):
1242
- scale_by = settings.crop_width / im.width
1243
- elif is_landscape(settings.crop_width, settings.crop_height):
1244
- scale_by = settings.crop_width / im.width
1245
- elif is_portrait(settings.crop_width, settings.crop_height):
1246
- scale_by = settings.crop_height / im.height
1247
-
1248
- im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1249
- im_debug = im.copy()
1250
-
1251
- focus = focal_point(im_debug, settings)
1252
-
1253
- # take the focal point and turn it into crop coordinates that try to center over the focal
1254
- # point but then get adjusted back into the frame
1255
- y_half = int(settings.crop_height / 2)
1256
- x_half = int(settings.crop_width / 2)
1257
-
1258
- x1 = focus.x - x_half
1259
- if x1 < 0:
1260
- x1 = 0
1261
- elif x1 + settings.crop_width > im.width:
1262
- x1 = im.width - settings.crop_width
1263
-
1264
- y1 = focus.y - y_half
1265
- if y1 < 0:
1266
- y1 = 0
1267
- elif y1 + settings.crop_height > im.height:
1268
- y1 = im.height - settings.crop_height
1269
-
1270
- x2 = x1 + settings.crop_width
1271
- y2 = y1 + settings.crop_height
1272
-
1273
- crop = [x1, y1, x2, y2]
1274
-
1275
- results = []
1276
-
1277
- results.append(im.crop(tuple(crop)))
1278
-
1279
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
TheLastBen--PPS/text-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:497c68007f9a776d47f980250ffcb3bb4a0c853696a06b7087efc5f7eb932f08
3
+ size 1381