Upload indexed_text_encoding_converter.ipynb
Browse files
Google Colab Notebooks/indexed_text_encoding_converter.ipynb
CHANGED
@@ -27,13 +27,7 @@
|
|
27 |
},
|
28 |
{
|
29 |
"cell_type": "code",
|
30 |
-
"execution_count": null,
|
31 |
-
"metadata": {
|
32 |
-
"id": "cskYkw0zXHEm"
|
33 |
-
},
|
34 |
-
"outputs": [],
|
35 |
"source": [
|
36 |
-
"# @title Make your own text_encodings .safetensor file for later use (using GPU is recommended to speed things up)\n",
|
37 |
"\n",
|
38 |
"import json\n",
|
39 |
"import pandas as pd\n",
|
@@ -50,11 +44,36 @@
|
|
50 |
"if using_Kaggle : home_directory = '/kaggle/working/'\n",
|
51 |
"%cd {home_directory}\n",
|
52 |
"#-------#\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
"\n",
|
54 |
"# User input\n",
|
55 |
-
"target = home_directory + 'text-to-image-prompts/
|
56 |
-
"output_folder = home_directory + 'output/
|
57 |
-
"root_filename = '
|
58 |
"NUM_FILES = 1\n",
|
59 |
"#--------#\n",
|
60 |
"\n",
|
@@ -73,14 +92,6 @@
|
|
73 |
"my_mkdirs(output_folder_text_encodings)\n",
|
74 |
"#-------#\n",
|
75 |
"\n",
|
76 |
-
"# Load the data if not already loaded\n",
|
77 |
-
"try:\n",
|
78 |
-
" loaded\n",
|
79 |
-
"except:\n",
|
80 |
-
" %cd {home_directory}\n",
|
81 |
-
" !git clone https://huggingface.co/datasets/codeShare/text-to-image-prompts\n",
|
82 |
-
" loaded = True\n",
|
83 |
-
"#--------#\n",
|
84 |
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
|
85 |
"from transformers import AutoTokenizer\n",
|
86 |
"tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
|
@@ -229,9 +240,11 @@
|
|
229 |
{
|
230 |
"cell_type": "code",
|
231 |
"source": [
|
|
|
232 |
"from google.colab import drive\n",
|
233 |
"drive.mount('/content/drive')\n",
|
234 |
-
"
|
|
|
235 |
],
|
236 |
"metadata": {
|
237 |
"id": "zTRmgabymGI1"
|
|
|
27 |
},
|
28 |
{
|
29 |
"cell_type": "code",
|
|
|
|
|
|
|
|
|
|
|
30 |
"source": [
|
|
|
31 |
"\n",
|
32 |
"import json\n",
|
33 |
"import pandas as pd\n",
|
|
|
44 |
"if using_Kaggle : home_directory = '/kaggle/working/'\n",
|
45 |
"%cd {home_directory}\n",
|
46 |
"#-------#\n",
|
47 |
+
"# Load the data if not already loaded\n",
|
48 |
+
"try:\n",
|
49 |
+
" loaded\n",
|
50 |
+
"except:\n",
|
51 |
+
" %cd {home_directory}\n",
|
52 |
+
" !git clone https://huggingface.co/datasets/codeShare/text-to-image-prompts\n",
|
53 |
+
" loaded = True\n",
|
54 |
+
"#--------#"
|
55 |
+
],
|
56 |
+
"metadata": {
|
57 |
+
"id": "xow5kaB2SgPs"
|
58 |
+
},
|
59 |
+
"execution_count": null,
|
60 |
+
"outputs": []
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"cell_type": "code",
|
64 |
+
"execution_count": null,
|
65 |
+
"metadata": {
|
66 |
+
"id": "cskYkw0zXHEm"
|
67 |
+
},
|
68 |
+
"outputs": [],
|
69 |
+
"source": [
|
70 |
+
"\n",
|
71 |
+
"# @title Make your own text_encodings .safetensor file for later use (using GPU is recommended to speed things up)\n",
|
72 |
"\n",
|
73 |
"# User input\n",
|
74 |
+
"target = home_directory + 'text-to-image-prompts/suffix_tripple/'\n",
|
75 |
+
"output_folder = home_directory + 'output/suffix_tripple/'\n",
|
76 |
+
"root_filename = 'suffix_tripple'\n",
|
77 |
"NUM_FILES = 1\n",
|
78 |
"#--------#\n",
|
79 |
"\n",
|
|
|
92 |
"my_mkdirs(output_folder_text_encodings)\n",
|
93 |
"#-------#\n",
|
94 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
|
96 |
"from transformers import AutoTokenizer\n",
|
97 |
"tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
|
|
|
240 |
{
|
241 |
"cell_type": "code",
|
242 |
"source": [
|
243 |
+
"# @title Download the text_encodings to google drive as .zip\n",
|
244 |
"from google.colab import drive\n",
|
245 |
"drive.mount('/content/drive')\n",
|
246 |
+
"zip_dest = '/content/drive/MyDrive/suffix_tripple.zip'\n",
|
247 |
+
"!zip -r {zip_dest} {output_folder}"
|
248 |
],
|
249 |
"metadata": {
|
250 |
"id": "zTRmgabymGI1"
|