MVV commited on
Commit
7e98901
β€’
1 Parent(s): 7becc28

Upload model.ipynb

Browse files
Files changed (1) hide show
  1. train/model.ipynb +667 -0
train/model.ipynb ADDED
@@ -0,0 +1,667 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "dd07a8e6-5809-4bb7-ba3a-bd6c15b22ff2",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stderr",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "/home/user/conda/envs/senv/lib/python3.9/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
14
+ " from .autonotebook import tqdm as notebook_tqdm\n"
15
+ ]
16
+ }
17
+ ],
18
+ "source": [
19
+ "import random\n",
20
+ "from statistics import mean\n",
21
+ "from datetime import datetime\n",
22
+ "from typing import List, Tuple\n",
23
+ "import copy\n",
24
+ "\n",
25
+ "import torch as th\n",
26
+ "import pytorch_lightning as pl\n",
27
+ "from pytorch_lightning.callbacks import ModelCheckpoint\n",
28
+ "from jaxtyping import Float, Float16, Int\n",
29
+ "\n",
30
+ "import trimesh as tm\n",
31
+ "import numpy as np\n",
32
+ "import numba\n",
33
+ "\n",
34
+ "from torch_geometric.nn.conv import GATv2Conv\n",
35
+ "\n",
36
+ "import h5py\n",
37
+ "\n",
38
+ "# Clone SAP from original repo https://github.com/autonomousvision/shape_as_points.git\n",
39
+ "from SAP.dpsr import DPSR\n",
40
+ "from SAP.model import PSR2Mesh"
41
+ ]
42
+ },
43
+ {
44
+ "cell_type": "markdown",
45
+ "id": "59c87491-5650-4c59-8d33-5153d29fb1a9",
46
+ "metadata": {
47
+ "tags": []
48
+ },
49
+ "source": [
50
+ "# Constants"
51
+ ]
52
+ },
53
+ {
54
+ "cell_type": "code",
55
+ "execution_count": 2,
56
+ "id": "26d62fb9-dae9-406b-ba30-3fec1a43a29a",
57
+ "metadata": {
58
+ "tags": []
59
+ },
60
+ "outputs": [],
61
+ "source": [
62
+ "th.manual_seed(0)\n",
63
+ "np.random.seed(0)"
64
+ ]
65
+ },
66
+ {
67
+ "cell_type": "code",
68
+ "execution_count": 3,
69
+ "id": "9ab9502f-e822-4475-9c90-019ff28f12d0",
70
+ "metadata": {},
71
+ "outputs": [],
72
+ "source": [
73
+ "IS_DEBUG = True"
74
+ ]
75
+ },
76
+ {
77
+ "cell_type": "code",
78
+ "execution_count": 4,
79
+ "id": "7095231b-e8ed-4c4d-997f-8f58664e9877",
80
+ "metadata": {},
81
+ "outputs": [],
82
+ "source": [
83
+ "BATCH_SIZE = 1 # BS\n",
84
+ "LR = 0.001\n",
85
+ "\n",
86
+ "IN_DIM = 1 \n",
87
+ "OUT_DIM = 1\n",
88
+ "LATENT_DIM = 32\n",
89
+ "\n",
90
+ "DROPOUT_PROB = 0.1\n",
91
+ "\n",
92
+ "PADDING = 1.2 # Scaling\n",
93
+ "\n",
94
+ "GRID_SIZE = 128\n",
95
+ "SIGMA = 5.0"
96
+ ]
97
+ },
98
+ {
99
+ "cell_type": "code",
100
+ "execution_count": 5,
101
+ "id": "27b7a406-cbb0-4a36-be1e-a8d8aa82c702",
102
+ "metadata": {
103
+ "tags": []
104
+ },
105
+ "outputs": [],
106
+ "source": [
107
+ "DATASET = \"Synthetic\"\n",
108
+ "LOG_IDX = 14\n",
109
+ "LOG_VISUALS = not IS_DEBUG\n",
110
+ "\n",
111
+ "CHECKPOINTS_PATH = \"./checkpoints/\"\n",
112
+ "\n",
113
+ "FIELDS_H5_PATH = f\"./Standart_fields/{DATASET}_fields_32_512.h5\"\n",
114
+ "PATH_ORIG_H5 = f\"./Standart_h5/{DATASET}.h5\"\n",
115
+ "PATH_NOISY_H5 = f\"./Standart_h5/{DATASET}_noisy.h5\"\n",
116
+ "MIN_V_NUMBER = 1_000\n",
117
+ "MAX_V_NUMBER = 100_000"
118
+ ]
119
+ },
120
+ {
121
+ "cell_type": "markdown",
122
+ "id": "1690b667-0af4-465a-8e3c-4a29622e9e66",
123
+ "metadata": {
124
+ "tags": []
125
+ },
126
+ "source": [
127
+ "# Data Preparation"
128
+ ]
129
+ },
130
+ {
131
+ "cell_type": "code",
132
+ "execution_count": 6,
133
+ "id": "2e774809-1293-4f80-8350-59ae7fc86cbb",
134
+ "metadata": {},
135
+ "outputs": [],
136
+ "source": [
137
+ "@numba.njit\n",
138
+ "def generate_grid_edge_list(gs: int = 128):\n",
139
+ " grid_edge_list = []\n",
140
+ "\n",
141
+ " for k in range(gs):\n",
142
+ " for j in range(gs):\n",
143
+ " for i in range(gs):\n",
144
+ " current_idx = i + gs*j + k*gs*gs\n",
145
+ " if (i - 1) >= 0:\n",
146
+ " grid_edge_list.append([current_idx, i-1 + gs*j + k*gs*gs])\n",
147
+ " if (i + 1) < gs:\n",
148
+ " grid_edge_list.append([current_idx, i+1 + gs*j + k*gs*gs])\n",
149
+ " if (j - 1) >= 0:\n",
150
+ " grid_edge_list.append([current_idx, i + gs*(j-1) + k*gs*gs])\n",
151
+ " if (j + 1) < gs:\n",
152
+ " grid_edge_list.append([current_idx, i + gs*(j+1) + k*gs*gs])\n",
153
+ " if (k - 1) >= 0:\n",
154
+ " grid_edge_list.append([current_idx, i + gs*j + (k-1)*gs*gs])\n",
155
+ " if (k + 1) < gs:\n",
156
+ " grid_edge_list.append([current_idx, i + gs*j + (k+1)*gs*gs])\n",
157
+ " return grid_edge_list\n",
158
+ "\n",
159
+ "GRID_EDGE_LIST = None"
160
+ ]
161
+ },
162
+ {
163
+ "cell_type": "code",
164
+ "execution_count": 7,
165
+ "id": "4486968b-3416-41c5-9ecd-429f7cf193de",
166
+ "metadata": {},
167
+ "outputs": [],
168
+ "source": [
169
+ "class StandartH5DataSet(th.utils.data.Dataset):\n",
170
+ " \n",
171
+ " def _load_data(self, key: str):\n",
172
+ " key_orig = key.replace(\"_n1\", \"\")\n",
173
+ " key_orig = key_orig.replace(\"_n2\", \"\")\n",
174
+ " key_orig = key_orig.replace(\"_n3\", \"\")\n",
175
+ " key_orig = key_orig.replace(\"_noisy\", \"\")\n",
176
+ "\n",
177
+ " vertices = th.tensor(self._noisy_meshes_h5[key][\"vertices\"][:], dtype=th.float)\n",
178
+ " vertices_normals = th.tensor(self._noisy_meshes_h5[key][\"vertices_normals\"][:], dtype=th.float)\n",
179
+ " vertices_gt = th.tensor(self._orig_meshes_h5[key_orig][\"vertices\"][:], dtype=th.float)\n",
180
+ " vertices_normals_gt = th.tensor(self._orig_meshes_h5[key_orig][\"vertices_normals\"][:], dtype=th.float)\n",
181
+ " field_gt = self.dpsr(vertices_gt.unsqueeze(0), vertices_normals_gt.unsqueeze(0)).squeeze(0)\n",
182
+ "\n",
183
+ " adj = np.array(self._noisy_meshes_h5[key][\"edge_index\"][:], dtype=np.int64)\n",
184
+ " adj = th.tensor(adj, dtype=th.int64)\n",
185
+ " \n",
186
+ " return vertices, vertices_normals, vertices_gt, vertices_normals_gt, field_gt, adj\n",
187
+ " \n",
188
+ " def __init__(self, \n",
189
+ " orig_meshes_h5: h5py.Group,\n",
190
+ " noisy_meshes_h5: h5py.Group,\n",
191
+ " fields_grid_size: int,\n",
192
+ " min_verts: int,\n",
193
+ " max_verts: int) -> None:\n",
194
+ " super().__init__()\n",
195
+ " \n",
196
+ " self.dpsr = DPSR([GRID_SIZE, GRID_SIZE, GRID_SIZE], sig=SIGMA)\n",
197
+ " \n",
198
+ " self._orig_meshes_h5 = orig_meshes_h5\n",
199
+ " self._noisy_meshes_h5 = noisy_meshes_h5\n",
200
+ " \n",
201
+ " self._fields_grid_size = str(fields_grid_size)\n",
202
+ " self._min_verts = min_verts\n",
203
+ " self._max_verts = max_verts\n",
204
+ " \n",
205
+ " self._data = {}\n",
206
+ " self._keys = []\n",
207
+ " \n",
208
+ " # filter keys to load only meshes with requested amount of vertices\n",
209
+ " for key in self._noisy_meshes_h5.keys():\n",
210
+ " v_number = self._noisy_meshes_h5[key][\"vertices\"].shape[0]\n",
211
+ " if (v_number >= self._min_verts) and (v_number <= self._max_verts):\n",
212
+ " self._keys.append(key)\n",
213
+ " self._keys = np.array(self._keys, dtype=str)\n",
214
+ " self._loaded = np.full(shape=self._keys.shape, fill_value=False, dtype=bool)\n",
215
+ " \n",
216
+ " def __len__(self) -> int:\n",
217
+ " return self._keys.shape[0]\n",
218
+ " \n",
219
+ " def __getitem__(self, index: int) -> Tuple[Float[th.Tensor, \"N 3\"],\n",
220
+ " Float[th.Tensor, \"N 3\"],\n",
221
+ " Float[th.Tensor, \"N 3\"],\n",
222
+ " Float[th.Tensor, \"N 3\"],\n",
223
+ " Float[th.Tensor, \"GR GR GR\"],\n",
224
+ " Float[th.Tensor, \"2 E\"]]:\n",
225
+ " if self._loaded[index] == False:\n",
226
+ " data = self._load_data(self._keys[index])\n",
227
+ " self._data[index] = data\n",
228
+ " self._loaded[index] = True\n",
229
+ " return copy.deepcopy(self._data[index])\n",
230
+ " \n",
231
+ " @property\n",
232
+ " def fields_grid_size(self):\n",
233
+ " return int(self._fields_grid_size)\n",
234
+ " \n",
235
+ " def renew_grid_size(self, new_grid_size: int):\n",
236
+ " self._fields_grid_size = str(new_grid_size)\n",
237
+ " self._loaded = np.full(shape=self._keys.shape, fill_value=False, dtype=bool)"
238
+ ]
239
+ },
240
+ {
241
+ "cell_type": "markdown",
242
+ "id": "13c69a49-5107-4d3e-9b14-1d456768f128",
243
+ "metadata": {
244
+ "tags": []
245
+ },
246
+ "source": [
247
+ "# Model"
248
+ ]
249
+ },
250
+ {
251
+ "cell_type": "markdown",
252
+ "id": "1d9a9aac-d229-489a-844d-a1d1cbd34c56",
253
+ "metadata": {
254
+ "tags": []
255
+ },
256
+ "source": [
257
+ "### Form Optimizer "
258
+ ]
259
+ },
260
+ {
261
+ "cell_type": "code",
262
+ "execution_count": 8,
263
+ "id": "940babdc-3e4f-4310-8bfd-48b23d0758dc",
264
+ "metadata": {
265
+ "tags": []
266
+ },
267
+ "outputs": [],
268
+ "source": [
269
+ "class FormOptimizer(th.nn.Module):\n",
270
+ " def __init__(self) -> None:\n",
271
+ " super().__init__()\n",
272
+ " \n",
273
+ " layers = []\n",
274
+ " \n",
275
+ " self.gconv1 = GATv2Conv(in_channels=IN_DIM, out_channels=LATENT_DIM, heads=1, dropout=DROPOUT_PROB)\n",
276
+ " self.gconv2 = GATv2Conv(in_channels=LATENT_DIM, out_channels=LATENT_DIM, heads=1, dropout=DROPOUT_PROB)\n",
277
+ " \n",
278
+ " self.actv = th.nn.Sigmoid()\n",
279
+ " self.head = th.nn.Linear(in_features=LATENT_DIM, out_features=OUT_DIM)\n",
280
+ "\n",
281
+ " def forward(self, \n",
282
+ " field: Float[th.Tensor, \"GS GS GS\"]) -> Float[th.Tensor, \"GS GS GS\"]:\n",
283
+ " \"\"\"\n",
284
+ " Args:\n",
285
+ " field (Tensor [GS, GS, GS]): vertices and normals tensor.\n",
286
+ " \"\"\"\n",
287
+ " vertex_features = field.clone()\n",
288
+ " vertex_features = vertex_features.reshape(GRID_SIZE*GRID_SIZE*GRID_SIZE, IN_DIM)\n",
289
+ " \n",
290
+ " vertex_features = self.gconv1(x=vertex_features, edge_index=GRID_EDGE_LIST) \n",
291
+ " vertex_features = self.gconv2(x=vertex_features, edge_index=GRID_EDGE_LIST) \n",
292
+ " field_delta = self.head(self.actv(vertex_features))\n",
293
+ " \n",
294
+ " field_delta = field_delta.reshape(BATCH_SIZE, GRID_SIZE, GRID_SIZE, GRID_SIZE)\n",
295
+ " field_delta += field \n",
296
+ " field_delta = th.clamp(field_delta, min=-0.5, max=0.5)\n",
297
+ " \n",
298
+ " return field_delta"
299
+ ]
300
+ },
301
+ {
302
+ "cell_type": "markdown",
303
+ "id": "67b40c5b-ff1b-416d-b892-c544386eaa95",
304
+ "metadata": {
305
+ "toc-hr-collapsed": true
306
+ },
307
+ "source": [
308
+ "### Full"
309
+ ]
310
+ },
311
+ {
312
+ "cell_type": "code",
313
+ "execution_count": 9,
314
+ "id": "bce3aa63-9bd7-4ac8-939d-395d63dd3cad",
315
+ "metadata": {
316
+ "scrolled": true,
317
+ "tags": []
318
+ },
319
+ "outputs": [],
320
+ "source": [
321
+ "class Model(pl.LightningModule):\n",
322
+ " def __init__(self):\n",
323
+ " super().__init__()\n",
324
+ " self.form_optimizer = FormOptimizer()\n",
325
+ " \n",
326
+ " self.dpsr = DPSR([GRID_SIZE, GRID_SIZE, GRID_SIZE], sig=SIGMA)\n",
327
+ " self.field2mesh = PSR2Mesh().apply\n",
328
+ "\n",
329
+ " self.metric = th.nn.MSELoss()\n",
330
+ "\n",
331
+ " #video logging databases\n",
332
+ " dateTimeObj = datetime.now()\n",
333
+ " start_time = dateTimeObj.strftime(\"%d-%b-%Y_%H-%M-%S\")\n",
334
+ " \n",
335
+ " if LOG_VISUALS:\n",
336
+ " self.h5_frame = 0\n",
337
+ " self.log_points_file = h5py.File(f\"./logs/points_{start_time}\", \"w\")\n",
338
+ " self.log_normals_file = h5py.File(f\"./logs/normals_{start_time}\", \"w\")\n",
339
+ " \n",
340
+ " self.val_losses = []\n",
341
+ " self.train_losses = []\n",
342
+ "\n",
343
+ " def log_h5(self, points, normals):\n",
344
+ " dset = self.log_points_file.create_dataset(\n",
345
+ " name=str(self.h5_frame),\n",
346
+ " shape=points.shape,\n",
347
+ " dtype=np.float16, \n",
348
+ " compression=\"gzip\")\n",
349
+ " dset[:] = points\n",
350
+ " dset = self.log_normals_file.create_dataset(\n",
351
+ " name=str(self.h5_frame),\n",
352
+ " shape=normals.shape,\n",
353
+ " dtype=np.float16, \n",
354
+ " compression=\"gzip\")\n",
355
+ " dset[:] = normals\n",
356
+ " self.h5_frame += 1\n",
357
+ " \n",
358
+ " def forward(self, \n",
359
+ " v: Float[th.Tensor, \"BS N 3\"],\n",
360
+ " n: Float[th.Tensor, \"BS N 3\"]) -> Tuple[Float[th.Tensor, \"BS N 3\"], # v - vertices\n",
361
+ " Int[th.Tensor, \"2 E\"], # f - faces\n",
362
+ " Float[th.Tensor, \"BS N 3\"], # n - vertices normals\n",
363
+ " Float[th.Tensor, \"BS GR GR GR\"]]: # field: \n",
364
+ " field = self.dpsr(v, n)\n",
365
+ " field = self.form_optimizer(field)\n",
366
+ " v, f, n = self.field2mesh(field)\n",
367
+ " return v, f, n, field\n",
368
+ "\n",
369
+ " def training_step(self, batch, batch_idx) -> Float[th.Tensor, \"1\"]:\n",
370
+ " vertices, vertices_normals, vertices_gt, vertices_normals_gt, field_gt, adj = batch\n",
371
+ " \n",
372
+ " mask = th.rand((vertices.shape[1], ), device=th.device(\"cuda\")) < (random.random() / 2.0 + 0.5)\n",
373
+ " vertices = vertices[:, mask]\n",
374
+ " vertices_normals = vertices_normals[:, mask]\n",
375
+ " \n",
376
+ " vr, fr, nr, field_r = model(vertices, vertices_normals)\n",
377
+ " \n",
378
+ " loss = self.metric(field_r, field_gt)\n",
379
+ " if LOG_VISUALS and (LOG_IDX == batch_idx):\n",
380
+ " self.log_h5(vr.squeeze(0).detach().cpu().numpy(), nr.squeeze(0).detach().cpu().numpy())\n",
381
+ " train_per_step_loss = loss.item()\n",
382
+ " self.train_losses.append(train_per_step_loss)\n",
383
+ " \n",
384
+ " return loss\n",
385
+ " \n",
386
+ " def on_train_epoch_end(self):\n",
387
+ " mean_train_per_epoch_loss = mean(self.train_losses)\n",
388
+ " self.log(\"mean_train_per_epoch_loss\", mean_train_per_epoch_loss, on_step=False, on_epoch=True)\n",
389
+ " self.train_losses = []\n",
390
+ " \n",
391
+ " def validation_step(self, batch, batch_idx):\n",
392
+ " vertices, vertices_normals, vertices_gt, vertices_normals_gt, field_gt, adj = batch\n",
393
+ " \n",
394
+ " vr, fr, nr, field_r = model(vertices, vertices_normals)\n",
395
+ " \n",
396
+ " loss = self.metric(field_r, field_gt)\n",
397
+ " val_per_step_loss = loss.item()\n",
398
+ " self.val_losses.append(val_per_step_loss)\n",
399
+ " return loss\n",
400
+ " \n",
401
+ " def on_validation_epoch_end(self):\n",
402
+ " mean_val_per_epoch_loss = mean(self.val_losses)\n",
403
+ " self.log(\"mean_val_per_epoch_loss\", mean_val_per_epoch_loss, on_step=False, on_epoch=True)\n",
404
+ " self.val_losses = []\n",
405
+ "\n",
406
+ " def configure_optimizers(self):\n",
407
+ " optimizer = th.optim.Adam(self.parameters(), lr=LR)\n",
408
+ " scheduler = th.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, factor=0.5)\n",
409
+ " \n",
410
+ " return {\n",
411
+ " \"optimizer\": optimizer,\n",
412
+ " \"lr_scheduler\": {\n",
413
+ " \"scheduler\": scheduler, \n",
414
+ " \"monitor\": \"mean_val_per_epoch_loss\",\n",
415
+ " \"interval\": \"epoch\",\n",
416
+ " \"frequency\": 1,\n",
417
+ " \"strict\": True,\n",
418
+ " \"name\": None,\n",
419
+ " }\n",
420
+ " }\n"
421
+ ]
422
+ },
423
+ {
424
+ "cell_type": "markdown",
425
+ "id": "1fb2c5a5-43ee-4a4e-be08-0dcfcb6816de",
426
+ "metadata": {
427
+ "tags": []
428
+ },
429
+ "source": [
430
+ "# Loop"
431
+ ]
432
+ },
433
+ {
434
+ "cell_type": "code",
435
+ "execution_count": 10,
436
+ "id": "c94c6a68-3986-48af-9da5-cab8c02a8b7b",
437
+ "metadata": {},
438
+ "outputs": [],
439
+ "source": [
440
+ "checkpoint_callback = ModelCheckpoint(\n",
441
+ " monitor='mean_val_per_epoch_loss', # monitor the validation loss\n",
442
+ " mode='min', # mode 'min' to save the lowest monitored value\n",
443
+ " save_top_k=1, # save only the best checkpoint (top 1)\n",
444
+ ")"
445
+ ]
446
+ },
447
+ {
448
+ "cell_type": "code",
449
+ "execution_count": 11,
450
+ "id": "03cdddbc-223e-4d40-9fb0-e663beddefda",
451
+ "metadata": {},
452
+ "outputs": [
453
+ {
454
+ "name": "stderr",
455
+ "output_type": "stream",
456
+ "text": [
457
+ "/home/user/conda/envs/senv/lib/python3.9/site-packages/torch/functional.py:504: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at /opt/conda/conda-bld/pytorch_1670525551200/work/aten/src/ATen/native/TensorShape.cpp:3190.)\n",
458
+ " return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\n",
459
+ "/home/user/conda/envs/senv/lib/python3.9/site-packages/lightning_fabric/connector.py:554: UserWarning: 16 is supported for historical reasons but its usage is discouraged. Please set your precision to 16-mixed instead!\n",
460
+ " rank_zero_warn(\n",
461
+ "Using 16bit Automatic Mixed Precision (AMP)\n",
462
+ "GPU available: True (cuda), used: True\n",
463
+ "TPU available: False, using: 0 TPU cores\n",
464
+ "IPU available: False, using: 0 IPUs\n",
465
+ "HPU available: False, using: 0 HPUs\n",
466
+ "Running in `fast_dev_run` mode: will run the requested loop using 300 batch(es). Logging and checkpointing is suppressed.\n",
467
+ "You are using a CUDA device ('A100-PCIE-40GB') that has Tensor Cores. To properly utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off precision for performance. For more details, read https://pytorch.org/docs/stable/generated/torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision\n",
468
+ "/home/user/conda/envs/senv/lib/python3.9/site-packages/pytorch_lightning/callbacks/model_checkpoint.py:617: UserWarning: Checkpoint directory /home/jovyan/Mashurov/GINSAP/checkpoints exists and is not empty.\n",
469
+ " rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n",
470
+ "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n",
471
+ "\n",
472
+ " | Name | Type | Params\n",
473
+ "-------------------------------------------------\n",
474
+ "0 | form_optimizer | FormOptimizer | 2.4 K \n",
475
+ "1 | dpsr | DPSR | 0 \n",
476
+ "2 | metric | MSELoss | 0 \n",
477
+ "-------------------------------------------------\n",
478
+ "2.4 K Trainable params\n",
479
+ "0 Non-trainable params\n",
480
+ "2.4 K Total params\n",
481
+ "0.010 Total estimated model params size (MB)\n",
482
+ "/home/user/conda/envs/senv/lib/python3.9/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, train_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 64 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.\n",
483
+ " rank_zero_warn(\n",
484
+ "/home/user/conda/envs/senv/lib/python3.9/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:442: PossibleUserWarning: The dataloader, val_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 64 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.\n",
485
+ " rank_zero_warn(\n"
486
+ ]
487
+ },
488
+ {
489
+ "name": "stdout",
490
+ "output_type": "stream",
491
+ "text": [
492
+ "Epoch 0: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 60/60 [00:17<00:00, 3.52it/s]\n",
493
+ "Validation: 0it [00:00, ?it/s]\u001b[A\n",
494
+ "Validation: 0%| | 0/84 [00:00<?, ?it/s]\u001b[A\n",
495
+ "Validation DataLoader 0: 0%| | 0/84 [00:00<?, ?it/s]\u001b[A\n",
496
+ "Validation DataLoader 0: 1%| | 1/84 [00:00<00:09, 8.99it/s]\u001b[A\n",
497
+ "Validation DataLoader 0: 2%|▏ | 2/84 [00:00<00:10, 7.88it/s]\u001b[A\n",
498
+ "Validation DataLoader 0: 4%|β–Ž | 3/84 [00:00<00:10, 7.47it/s]\u001b[A\n",
499
+ "Validation DataLoader 0: 5%|▍ | 4/84 [00:00<00:11, 7.22it/s]\u001b[A\n",
500
+ "Validation DataLoader 0: 6%|β–Œ | 5/84 [00:00<00:11, 7.13it/s]\u001b[A\n",
501
+ "Validation DataLoader 0: 7%|β–‹ | 6/84 [00:00<00:10, 7.09it/s]\u001b[A\n",
502
+ "Validation DataLoader 0: 8%|β–Š | 7/84 [00:01<00:11, 6.95it/s]\u001b[A\n",
503
+ "Validation DataLoader 0: 10%|β–‰ | 8/84 [00:01<00:11, 6.86it/s]\u001b[A\n",
504
+ "Validation DataLoader 0: 11%|β–ˆ | 9/84 [00:01<00:11, 6.81it/s]\u001b[A\n",
505
+ "Validation DataLoader 0: 12%|β–ˆβ– | 10/84 [00:01<00:11, 6.71it/s]\u001b[A\n",
506
+ "Validation DataLoader 0: 13%|β–ˆβ–Ž | 11/84 [00:01<00:10, 6.64it/s]\u001b[A\n",
507
+ "Validation DataLoader 0: 14%|β–ˆβ– | 12/84 [00:01<00:10, 6.60it/s]\u001b[A\n",
508
+ "Validation DataLoader 0: 15%|β–ˆβ–Œ | 13/84 [00:01<00:10, 6.53it/s]\u001b[A\n",
509
+ "Validation DataLoader 0: 17%|β–ˆβ–‹ | 14/84 [00:02<00:10, 6.48it/s]\u001b[A\n",
510
+ "Validation DataLoader 0: 18%|β–ˆβ–Š | 15/84 [00:02<00:10, 6.44it/s]\u001b[A\n",
511
+ "Validation DataLoader 0: 19%|β–ˆβ–‰ | 16/84 [00:02<00:10, 6.34it/s]\u001b[A\n",
512
+ "Validation DataLoader 0: 20%|β–ˆβ–ˆ | 17/84 [00:02<00:10, 6.27it/s]\u001b[A\n",
513
+ "Validation DataLoader 0: 21%|β–ˆβ–ˆβ– | 18/84 [00:02<00:10, 6.20it/s]\u001b[A\n",
514
+ "Validation DataLoader 0: 23%|β–ˆβ–ˆβ–Ž | 19/84 [00:03<00:10, 6.16it/s]\u001b[A\n",
515
+ "Validation DataLoader 0: 24%|β–ˆβ–ˆβ– | 20/84 [00:03<00:10, 6.16it/s]\u001b[A\n",
516
+ "Validation DataLoader 0: 25%|β–ˆβ–ˆβ–Œ | 21/84 [00:03<00:10, 6.16it/s]\u001b[A\n",
517
+ "Validation DataLoader 0: 26%|β–ˆβ–ˆβ–Œ | 22/84 [00:03<00:10, 6.16it/s]\u001b[A\n",
518
+ "Validation DataLoader 0: 27%|β–ˆβ–ˆβ–‹ | 23/84 [00:03<00:09, 6.17it/s]\u001b[A\n",
519
+ "Validation DataLoader 0: 29%|β–ˆβ–ˆβ–Š | 24/84 [00:03<00:09, 6.18it/s]\u001b[A\n",
520
+ "Validation DataLoader 0: 30%|β–ˆβ–ˆβ–‰ | 25/84 [00:04<00:09, 6.20it/s]\u001b[A\n",
521
+ "Validation DataLoader 0: 31%|β–ˆβ–ˆβ–ˆ | 26/84 [00:04<00:09, 6.22it/s]\u001b[A\n",
522
+ "Validation DataLoader 0: 32%|β–ˆβ–ˆβ–ˆβ– | 27/84 [00:04<00:09, 6.25it/s]\u001b[A\n",
523
+ "Validation DataLoader 0: 33%|β–ˆβ–ˆβ–ˆβ–Ž | 28/84 [00:04<00:08, 6.22it/s]\u001b[A\n",
524
+ "Validation DataLoader 0: 35%|β–ˆβ–ˆβ–ˆβ– | 29/84 [00:04<00:08, 6.20it/s]\u001b[A\n",
525
+ "Validation DataLoader 0: 36%|β–ˆβ–ˆβ–ˆβ–Œ | 30/84 [00:04<00:08, 6.19it/s]\u001b[A\n",
526
+ "Validation DataLoader 0: 37%|β–ˆβ–ˆβ–ˆβ–‹ | 31/84 [00:04<00:08, 6.21it/s]\u001b[A\n",
527
+ "Validation DataLoader 0: 38%|β–ˆβ–ˆβ–ˆβ–Š | 32/84 [00:05<00:08, 6.23it/s]\u001b[A\n",
528
+ "Validation DataLoader 0: 39%|β–ˆβ–ˆβ–ˆβ–‰ | 33/84 [00:05<00:08, 6.25it/s]\u001b[A\n",
529
+ "Validation DataLoader 0: 40%|β–ˆβ–ˆβ–ˆβ–ˆ | 34/84 [00:05<00:08, 6.17it/s]\u001b[A\n",
530
+ "Validation DataLoader 0: 42%|β–ˆβ–ˆβ–ˆβ–ˆβ– | 35/84 [00:05<00:08, 6.12it/s]\u001b[A\n",
531
+ "Validation DataLoader 0: 43%|β–ˆβ–ˆβ–ˆβ–ˆβ–Ž | 36/84 [00:05<00:07, 6.07it/s]\u001b[A\n",
532
+ "Validation DataLoader 0: 44%|β–ˆβ–ˆβ–ˆβ–ˆβ– | 37/84 [00:06<00:07, 6.08it/s]\u001b[A\n",
533
+ "Validation DataLoader 0: 45%|β–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 38/84 [00:06<00:07, 6.10it/s]\u001b[A\n",
534
+ "Validation DataLoader 0: 46%|β–ˆβ–ˆβ–ˆβ–ˆβ–‹ | 39/84 [00:06<00:07, 6.11it/s]\u001b[A\n",
535
+ "Validation DataLoader 0: 48%|β–ˆβ–ˆβ–ˆβ–ˆβ–Š | 40/84 [00:06<00:07, 6.11it/s]\u001b[A\n",
536
+ "Validation DataLoader 0: 49%|β–ˆβ–ˆβ–ˆβ–ˆβ–‰ | 41/84 [00:06<00:07, 6.11it/s]\u001b[A\n",
537
+ "Validation DataLoader 0: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 42/84 [00:06<00:06, 6.11it/s]\u001b[A\n",
538
+ "Validation DataLoader 0: 51%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 43/84 [00:07<00:06, 6.11it/s]\u001b[A\n",
539
+ "Validation DataLoader 0: 52%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ– | 44/84 [00:07<00:06, 6.10it/s]\u001b[A\n",
540
+ "Validation DataLoader 0: 54%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Ž | 45/84 [00:07<00:06, 6.07it/s]\u001b[A\n",
541
+ "Validation DataLoader 0: 55%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ– | 46/84 [00:07<00:06, 6.06it/s]\u001b[A\n",
542
+ "Validation DataLoader 0: 56%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 47/84 [00:07<00:06, 6.05it/s]\u001b[A\n",
543
+ "Validation DataLoader 0: 57%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‹ | 48/84 [00:07<00:05, 6.06it/s]\u001b[A\n",
544
+ "Validation DataLoader 0: 58%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Š | 49/84 [00:08<00:05, 6.04it/s]\u001b[A\n",
545
+ "Validation DataLoader 0: 60%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰ | 50/84 [00:08<00:05, 6.02it/s]\u001b[A\n",
546
+ "Validation DataLoader 0: 61%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 51/84 [00:08<00:05, 6.01it/s]\u001b[A\n",
547
+ "Validation DataLoader 0: 62%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ– | 52/84 [00:08<00:05, 6.03it/s]\u001b[A\n",
548
+ "Validation DataLoader 0: 63%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Ž | 53/84 [00:08<00:05, 6.05it/s]\u001b[A\n",
549
+ "Validation DataLoader 0: 64%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ– | 54/84 [00:08<00:04, 6.07it/s]\u001b[A\n",
550
+ "Validation DataLoader 0: 65%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 55/84 [00:09<00:04, 6.07it/s]\u001b[A\n",
551
+ "Validation DataLoader 0: 67%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‹ | 56/84 [00:09<00:04, 6.07it/s]\u001b[A\n",
552
+ "Validation DataLoader 0: 68%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Š | 57/84 [00:09<00:04, 6.07it/s]\u001b[A\n",
553
+ "Validation DataLoader 0: 69%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰ | 58/84 [00:09<00:04, 6.06it/s]\u001b[A\n",
554
+ "Validation DataLoader 0: 70%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 59/84 [00:09<00:04, 6.05it/s]\u001b[A\n",
555
+ "Validation DataLoader 0: 71%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ– | 60/84 [00:09<00:03, 6.04it/s]\u001b[A\n",
556
+ "Validation DataLoader 0: 73%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Ž | 61/84 [00:10<00:03, 6.02it/s]\u001b[A\n",
557
+ "Validation DataLoader 0: 74%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ– | 62/84 [00:10<00:03, 6.00it/s]\u001b[A\n",
558
+ "Validation DataLoader 0: 75%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 63/84 [00:10<00:03, 5.99it/s]\u001b[A\n",
559
+ "Validation DataLoader 0: 76%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 64/84 [00:10<00:03, 5.99it/s]\u001b[A\n",
560
+ "Validation DataLoader 0: 77%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‹ | 65/84 [00:10<00:03, 6.00it/s]\u001b[A\n",
561
+ "Validation DataLoader 0: 79%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Š | 66/84 [00:10<00:02, 6.00it/s]\u001b[A\n",
562
+ "Validation DataLoader 0: 80%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰ | 67/84 [00:11<00:02, 6.00it/s]\u001b[A\n",
563
+ "Validation DataLoader 0: 81%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 68/84 [00:11<00:02, 6.00it/s]\u001b[A\n",
564
+ "Validation DataLoader 0: 82%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ– | 69/84 [00:11<00:02, 6.00it/s]\u001b[A\n",
565
+ "Validation DataLoader 0: 83%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Ž | 70/84 [00:11<00:02, 6.01it/s]\u001b[A\n",
566
+ "Validation DataLoader 0: 85%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ– | 71/84 [00:11<00:02, 6.02it/s]\u001b[A\n",
567
+ "Validation DataLoader 0: 86%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 72/84 [00:11<00:01, 6.03it/s]\u001b[A\n",
568
+ "Validation DataLoader 0: 87%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‹ | 73/84 [00:12<00:01, 6.04it/s]\u001b[A\n",
569
+ "Validation DataLoader 0: 88%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Š | 74/84 [00:12<00:01, 6.05it/s]\u001b[A\n",
570
+ "Validation DataLoader 0: 89%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰ | 75/84 [00:12<00:01, 6.06it/s]\u001b[A\n",
571
+ "Validation DataLoader 0: 90%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 76/84 [00:12<00:01, 6.06it/s]\u001b[A\n",
572
+ "Validation DataLoader 0: 92%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–| 77/84 [00:12<00:01, 6.07it/s]\u001b[A\n",
573
+ "Validation DataLoader 0: 93%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Ž| 78/84 [00:12<00:00, 6.08it/s]\u001b[A\n",
574
+ "Validation DataLoader 0: 94%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–| 79/84 [00:12<00:00, 6.09it/s]\u001b[A\n",
575
+ "Validation DataLoader 0: 95%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ| 80/84 [00:13<00:00, 6.10it/s]\u001b[A\n",
576
+ "Validation DataLoader 0: 96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‹| 81/84 [00:13<00:00, 6.11it/s]\u001b[A\n",
577
+ "Validation DataLoader 0: 98%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Š| 82/84 [00:13<00:00, 6.09it/s]\u001b[A\n",
578
+ "Validation DataLoader 0: 99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰| 83/84 [00:13<00:00, 6.09it/s]\u001b[A\n",
579
+ "Validation DataLoader 0: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 84/84 [00:13<00:00, 6.08it/s]\u001b[A\n",
580
+ "Epoch 0: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 60/60 [00:30<00:00, 1.94it/s] \u001b[A"
581
+ ]
582
+ },
583
+ {
584
+ "name": "stderr",
585
+ "output_type": "stream",
586
+ "text": [
587
+ "`Trainer.fit` stopped: `max_epochs=1` reached.\n"
588
+ ]
589
+ },
590
+ {
591
+ "name": "stdout",
592
+ "output_type": "stream",
593
+ "text": [
594
+ "Epoch 0: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 60/60 [00:30<00:00, 1.94it/s]\n"
595
+ ]
596
+ }
597
+ ],
598
+ "source": [
599
+ "if __name__ == \"__main__\":\n",
600
+ " \n",
601
+ " GRID_EDGE_LIST = generate_grid_edge_list(GRID_SIZE)\n",
602
+ " GRID_EDGE_LIST = th.tensor(GRID_EDGE_LIST, dtype=th.int)\n",
603
+ " GRID_EDGE_LIST = GRID_EDGE_LIST.T\n",
604
+ " GRID_EDGE_LIST = GRID_EDGE_LIST.to(th.device(\"cuda\"))\n",
605
+ " \n",
606
+ " noisy_meshes_h5 = h5py.File(\"./Standart_h5/Synthetic_noisy.h5\", \"r\")\n",
607
+ " orig_meshes_h5 = h5py.File(\"./Standart_h5/Synthetic.h5\", \"r\")\n",
608
+ " \n",
609
+ " train_dataset = StandartH5DataSet(orig_meshes_h5=orig_meshes_h5['train'],\n",
610
+ " noisy_meshes_h5=noisy_meshes_h5['train'],\n",
611
+ " fields_grid_size=GRID_SIZE,\n",
612
+ " min_verts=MIN_V_NUMBER,\n",
613
+ " max_verts=MAX_V_NUMBER)\n",
614
+ " test_dataset = StandartH5DataSet(orig_meshes_h5=orig_meshes_h5['test'],\n",
615
+ " noisy_meshes_h5=noisy_meshes_h5['test'],\n",
616
+ " fields_grid_size=GRID_SIZE,\n",
617
+ " min_verts=MIN_V_NUMBER,\n",
618
+ " max_verts=MAX_V_NUMBER)\n",
619
+ "\n",
620
+ " train_dataloader = th.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=False)\n",
621
+ " test_dataloader = th.utils.data.DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)\n",
622
+ "\n",
623
+ " trainer = pl.Trainer(accelerator=\"gpu\", \n",
624
+ " callbacks=[checkpoint_callback],\n",
625
+ " log_every_n_steps=len(train_dataset)+len(test_dataset),\n",
626
+ " fast_dev_run=(300 if IS_DEBUG else False),\n",
627
+ " max_epochs=200,\n",
628
+ " precision=16)\n",
629
+ " \n",
630
+ " model = Model()\n",
631
+ " trainer.fit(model, train_dataloaders=train_dataloader, val_dataloaders=test_dataloader)\n",
632
+ " if LOG_VISUALS:\n",
633
+ " model.log_points_file.close()\n",
634
+ " model.log_normals_file.close()"
635
+ ]
636
+ },
637
+ {
638
+ "cell_type": "code",
639
+ "execution_count": null,
640
+ "id": "bda6c1bf-7674-4e59-8cc7-dfcba9d689d9",
641
+ "metadata": {},
642
+ "outputs": [],
643
+ "source": []
644
+ }
645
+ ],
646
+ "metadata": {
647
+ "kernelspec": {
648
+ "display_name": "senv",
649
+ "language": "python",
650
+ "name": "senv"
651
+ },
652
+ "language_info": {
653
+ "codemirror_mode": {
654
+ "name": "ipython",
655
+ "version": 3
656
+ },
657
+ "file_extension": ".py",
658
+ "mimetype": "text/x-python",
659
+ "name": "python",
660
+ "nbconvert_exporter": "python",
661
+ "pygments_lexer": "ipython3",
662
+ "version": "3.9.16"
663
+ }
664
+ },
665
+ "nbformat": 4,
666
+ "nbformat_minor": 5
667
+ }