Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- -9AzT4oBgHgl3EQf_f5A/content/2301.01948v1.pdf +3 -0
- -9AzT4oBgHgl3EQf_f5A/vector_store/index.faiss +3 -0
- -9AzT4oBgHgl3EQf_f5A/vector_store/index.pkl +3 -0
- .gitattributes +99 -0
- 29AyT4oBgHgl3EQfb_di/vector_store/index.pkl +3 -0
- 2dE4T4oBgHgl3EQfzw3P/content/tmp_files/2301.05277v1.pdf.txt +1361 -0
- 2dE4T4oBgHgl3EQfzw3P/content/tmp_files/load_file.txt +0 -0
- 4dFAT4oBgHgl3EQfExzK/content/tmp_files/2301.08424v1.pdf.txt +1016 -0
- 4dFAT4oBgHgl3EQfExzK/content/tmp_files/load_file.txt +0 -0
- 4tFKT4oBgHgl3EQfRy39/content/2301.11773v1.pdf +3 -0
- 4tFKT4oBgHgl3EQfRy39/vector_store/index.faiss +3 -0
- 4tFKT4oBgHgl3EQfRy39/vector_store/index.pkl +3 -0
- 59E0T4oBgHgl3EQfvwFr/content/2301.02622v1.pdf +3 -0
- 59E0T4oBgHgl3EQfvwFr/vector_store/index.pkl +3 -0
- 5tFIT4oBgHgl3EQf8CtK/content/2301.11400v1.pdf +3 -0
- 5tFIT4oBgHgl3EQf8CtK/vector_store/index.pkl +3 -0
- 69E1T4oBgHgl3EQfBgJT/content/tmp_files/2301.02852v1.pdf.txt +1274 -0
- 69E1T4oBgHgl3EQfBgJT/content/tmp_files/load_file.txt +0 -0
- 6NAyT4oBgHgl3EQfpfik/content/tmp_files/2301.00527v1.pdf.txt +775 -0
- 6NAyT4oBgHgl3EQfpfik/content/tmp_files/load_file.txt +0 -0
- 79FLT4oBgHgl3EQfAy4h/vector_store/index.faiss +3 -0
- 7tE4T4oBgHgl3EQf2g0r/content/2301.05298v1.pdf +3 -0
- 7tE4T4oBgHgl3EQf2g0r/vector_store/index.faiss +3 -0
- 7tE4T4oBgHgl3EQf2g0r/vector_store/index.pkl +3 -0
- 99AyT4oBgHgl3EQf3fke/content/tmp_files/2301.00768v1.pdf.txt +2921 -0
- 99AyT4oBgHgl3EQf3fke/content/tmp_files/load_file.txt +0 -0
- 9NE4T4oBgHgl3EQfdgy1/vector_store/index.faiss +3 -0
- 9tFLT4oBgHgl3EQfCC72/content/2301.11974v1.pdf +3 -0
- AdFLT4oBgHgl3EQfEy_H/vector_store/index.faiss +3 -0
- B9AzT4oBgHgl3EQfwP5n/content/tmp_files/2301.01719v1.pdf.txt +393 -0
- B9AzT4oBgHgl3EQfwP5n/content/tmp_files/load_file.txt +156 -0
- B9FJT4oBgHgl3EQfACzo/content/tmp_files/2301.11418v1.pdf.txt +980 -0
- B9FJT4oBgHgl3EQfACzo/content/tmp_files/load_file.txt +0 -0
- BNE3T4oBgHgl3EQfTwqq/vector_store/index.pkl +3 -0
- BNE5T4oBgHgl3EQfTA98/content/2301.05533v1.pdf +3 -0
- BNE5T4oBgHgl3EQfTA98/vector_store/index.faiss +3 -0
- BNE5T4oBgHgl3EQfTA98/vector_store/index.pkl +3 -0
- C9E1T4oBgHgl3EQfWATV/content/2301.03110v1.pdf +3 -0
- C9E1T4oBgHgl3EQfWATV/vector_store/index.pkl +3 -0
- CdFQT4oBgHgl3EQfOTbk/content/2301.13275v1.pdf +3 -0
- CdFQT4oBgHgl3EQfOTbk/vector_store/index.faiss +3 -0
- CdFQT4oBgHgl3EQfOTbk/vector_store/index.pkl +3 -0
- DNAzT4oBgHgl3EQfTvz-/content/2301.01257v1.pdf +3 -0
- DNAzT4oBgHgl3EQfTvz-/vector_store/index.faiss +3 -0
- DNAzT4oBgHgl3EQfTvz-/vector_store/index.pkl +3 -0
- ENFRT4oBgHgl3EQfAze2/content/2301.13463v1.pdf +3 -0
- ENFRT4oBgHgl3EQfAze2/vector_store/index.pkl +3 -0
- EdE4T4oBgHgl3EQffQ16/vector_store/index.faiss +3 -0
- EdE4T4oBgHgl3EQffQ16/vector_store/index.pkl +3 -0
- FdE3T4oBgHgl3EQfVwo4/content/2301.04462v1.pdf +3 -0
-9AzT4oBgHgl3EQf_f5A/content/2301.01948v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:33ce56f197bd5c331e0dbf38aca2dd93597e6b0bf4ed8a6b0d3f371ffeacf53b
|
3 |
+
size 1527886
|
-9AzT4oBgHgl3EQf_f5A/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8e4db0944738dca8dbe85fa3533f56e285fbe267102de4fa8d0df262840f5991
|
3 |
+
size 6029357
|
-9AzT4oBgHgl3EQf_f5A/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bf91db6e34569b72041f7b29ba26bd4f89dc5be0c36cb48e3d0b8c3a285d7756
|
3 |
+
size 222831
|
.gitattributes
CHANGED
@@ -1235,3 +1235,102 @@ LtAyT4oBgHgl3EQfgPig/content/2301.00356v1.pdf filter=lfs diff=lfs merge=lfs -tex
|
|
1235 |
99A0T4oBgHgl3EQfO__U/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1236 |
edFST4oBgHgl3EQfFzg4/content/2301.13719v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1237 |
edFST4oBgHgl3EQfFzg4/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1235 |
99A0T4oBgHgl3EQfO__U/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1236 |
edFST4oBgHgl3EQfFzg4/content/2301.13719v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1237 |
edFST4oBgHgl3EQfFzg4/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1238 |
+
iNE1T4oBgHgl3EQfMwPW/content/2301.02994v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1239 |
+
oNAzT4oBgHgl3EQfqf3Z/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1240 |
+
9NE4T4oBgHgl3EQfdgy1/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1241 |
+
79FLT4oBgHgl3EQfAy4h/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1242 |
+
sdE3T4oBgHgl3EQfMwm8/content/2301.04377v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1243 |
+
d9FIT4oBgHgl3EQfoiuQ/content/2301.11319v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1244 |
+
WtAyT4oBgHgl3EQfWPdu/content/2301.00159v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1245 |
+
KdE4T4oBgHgl3EQfiA17/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1246 |
+
VdAyT4oBgHgl3EQfhfjt/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1247 |
+
LtAyT4oBgHgl3EQfgPig/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1248 |
+
VdAyT4oBgHgl3EQfhfjt/content/2301.00380v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1249 |
+
iNE0T4oBgHgl3EQfYAA8/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1250 |
+
BNE5T4oBgHgl3EQfTA98/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1251 |
+
TdE5T4oBgHgl3EQfAQ7r/content/2301.05378v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1252 |
+
XtAzT4oBgHgl3EQfmf3D/content/2301.01565v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1253 |
+
oNE5T4oBgHgl3EQfkA8i/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1254 |
+
oNFPT4oBgHgl3EQfKjR1/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1255 |
+
GNFLT4oBgHgl3EQfGi_B/content/2301.11993v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1256 |
+
wtE0T4oBgHgl3EQf-QJD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1257 |
+
GNFLT4oBgHgl3EQfGi_B/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1258 |
+
TdE5T4oBgHgl3EQfAQ7r/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1259 |
+
4tFKT4oBgHgl3EQfRy39/content/2301.11773v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1260 |
+
BNE5T4oBgHgl3EQfTA98/content/2301.05533v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1261 |
+
XtAzT4oBgHgl3EQfmf3D/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1262 |
+
DNAzT4oBgHgl3EQfTvz-/content/2301.01257v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1263 |
+
DNAzT4oBgHgl3EQfTvz-/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1264 |
+
ptFRT4oBgHgl3EQfdjek/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1265 |
+
r9A0T4oBgHgl3EQfK__H/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1266 |
+
b9E5T4oBgHgl3EQfEg5T/content/2301.05414v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1267 |
+
XtFJT4oBgHgl3EQf5y0l/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1268 |
+
XtFJT4oBgHgl3EQf5y0l/content/2301.11671v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1269 |
+
WdFRT4oBgHgl3EQfMjc9/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1270 |
+
ydE0T4oBgHgl3EQf-wKD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1271 |
+
sdE3T4oBgHgl3EQfMwm8/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1272 |
+
U9FIT4oBgHgl3EQfgSvJ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1273 |
+
MNFQT4oBgHgl3EQfVDaQ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1274 |
+
lNE3T4oBgHgl3EQf5wv2/content/2301.04785v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1275 |
+
PdE4T4oBgHgl3EQf-Q54/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1276 |
+
MNFQT4oBgHgl3EQfVDaQ/content/2301.13299v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1277 |
+
-9AzT4oBgHgl3EQf_f5A/content/2301.01948v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1278 |
+
PdE4T4oBgHgl3EQf-Q54/content/2301.05362v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1279 |
+
d9FIT4oBgHgl3EQfoiuQ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1280 |
+
LtE3T4oBgHgl3EQfYQrP/content/2301.04487v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1281 |
+
xdFPT4oBgHgl3EQfQTRV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1282 |
+
xdFPT4oBgHgl3EQfQTRV/content/2301.13041v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1283 |
+
stAzT4oBgHgl3EQf6f5B/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1284 |
+
Q9E5T4oBgHgl3EQfZg88/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1285 |
+
ydE0T4oBgHgl3EQf-wKD/content/2301.02818v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1286 |
+
Q9E5T4oBgHgl3EQfZg88/content/2301.05581v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1287 |
+
i9E2T4oBgHgl3EQfIAa1/content/2301.03675v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1288 |
+
stAzT4oBgHgl3EQf6f5B/content/2301.01875v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1289 |
+
f9AzT4oBgHgl3EQfavwP/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1290 |
+
ItE4T4oBgHgl3EQfIgwL/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1291 |
+
AdFLT4oBgHgl3EQfEy_H/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1292 |
+
xdAzT4oBgHgl3EQfCfp8/content/2301.00960v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1293 |
+
ItE4T4oBgHgl3EQfIgwL/content/2301.04912v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1294 |
+
lNE3T4oBgHgl3EQf5wv2/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1295 |
+
7tE4T4oBgHgl3EQf2g0r/content/2301.05298v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1296 |
+
VtE5T4oBgHgl3EQfcA97/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1297 |
+
FdE3T4oBgHgl3EQfVwo4/content/2301.04462v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1298 |
+
VtE5T4oBgHgl3EQfcA97/content/2301.05600v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1299 |
+
QNA0T4oBgHgl3EQfDP_p/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1300 |
+
YNE1T4oBgHgl3EQfcATB/content/2301.03180v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1301 |
+
xdAzT4oBgHgl3EQfCfp8/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1302 |
+
QNA0T4oBgHgl3EQfDP_p/content/2301.02002v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1303 |
+
RtFRT4oBgHgl3EQf8ThG/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1304 |
+
RtFRT4oBgHgl3EQf8ThG/content/2301.13683v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1305 |
+
C9E1T4oBgHgl3EQfWATV/content/2301.03110v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1306 |
+
4tFKT4oBgHgl3EQfRy39/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1307 |
+
WtAyT4oBgHgl3EQfWPdu/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1308 |
+
CdFQT4oBgHgl3EQfOTbk/content/2301.13275v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1309 |
+
b9E5T4oBgHgl3EQfEg5T/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1310 |
+
a9E0T4oBgHgl3EQf4gJ0/content/2301.02739v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1311 |
+
LdAzT4oBgHgl3EQfkf0t/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1312 |
+
LtE3T4oBgHgl3EQfYQrP/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1313 |
+
-9AzT4oBgHgl3EQf_f5A/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1314 |
+
CdFQT4oBgHgl3EQfOTbk/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1315 |
+
7tE4T4oBgHgl3EQf2g0r/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1316 |
+
ytFQT4oBgHgl3EQfyTYb/content/2301.13408v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1317 |
+
rdE1T4oBgHgl3EQfPwNi/content/2301.03031v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1318 |
+
FdE3T4oBgHgl3EQfVwo4/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1319 |
+
q9AzT4oBgHgl3EQfrP2-/content/2301.01642v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1320 |
+
EdE4T4oBgHgl3EQffQ16/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1321 |
+
rdE1T4oBgHgl3EQfPwNi/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1322 |
+
yNAzT4oBgHgl3EQfQvuD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1323 |
+
ddFAT4oBgHgl3EQfYx31/content/2301.08542v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1324 |
+
idE3T4oBgHgl3EQfIwmA/content/2301.04337v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1325 |
+
t9AzT4oBgHgl3EQfr_3p/content/2301.01654v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1326 |
+
JtE2T4oBgHgl3EQfAQZo/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1327 |
+
q9AzT4oBgHgl3EQfrP2-/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1328 |
+
SNE0T4oBgHgl3EQf1wKp/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1329 |
+
XNE4T4oBgHgl3EQfNQyg/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1330 |
+
5tFIT4oBgHgl3EQf8CtK/content/2301.11400v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1331 |
+
SNE0T4oBgHgl3EQf1wKp/content/2301.02704v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1332 |
+
59E0T4oBgHgl3EQfvwFr/content/2301.02622v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1333 |
+
idE3T4oBgHgl3EQfIwmA/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
1334 |
+
9tFLT4oBgHgl3EQfCC72/content/2301.11974v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1335 |
+
ENFRT4oBgHgl3EQfAze2/content/2301.13463v1.pdf filter=lfs diff=lfs merge=lfs -text
|
1336 |
+
YNE1T4oBgHgl3EQfcATB/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
29AyT4oBgHgl3EQfb_di/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a5f25b190acff38a90579a1c04ea0a45033aea90fb4b6bce0eff1c1ac76f6358
|
3 |
+
size 179867
|
2dE4T4oBgHgl3EQfzw3P/content/tmp_files/2301.05277v1.pdf.txt
ADDED
@@ -0,0 +1,1361 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
arXiv:2301.05277v1 [cs.HC] 12 Jan 2023
|
2 |
+
DriCon: On-device Just-in-Time Context
|
3 |
+
Characterization for Unexpected Driving Events
|
4 |
+
Debasree Das, Sandip Chakraborty, Bivas Mitra
|
5 |
+
Department of Computer Science and Engineering, Indian Institute of Technology Kharagpur, INDIA 721302
|
6 |
+
Email: {debasreedas1994, sandipchkraborty, bivasmitra}@gmail.com
|
7 |
+
Abstract—Driving is a complex task carried out under the
|
8 |
+
influence of diverse spatial objects and their temporal inter-
|
9 |
+
actions. Therefore, a sudden fluctuation in driving behavior
|
10 |
+
can be due to either a lack of driving skill or the effect of
|
11 |
+
various on-road spatial factors such as pedestrian movements,
|
12 |
+
peer vehicles’ actions, etc. Therefore, understanding the context
|
13 |
+
behind a degraded driving behavior just-in-time is necessary
|
14 |
+
to ensure on-road safety. In this paper, we develop a system
|
15 |
+
called DriCon that exploits the information acquired from a
|
16 |
+
dashboard-mounted edge-device to understand the context in
|
17 |
+
terms of micro-events from a diverse set of on-road spatial
|
18 |
+
factors and in-vehicle driving maneuvers taken. DriCon uses the
|
19 |
+
live in-house testbed and the largest publicly available driving
|
20 |
+
dataset to generate human interpretable explanations against the
|
21 |
+
unexpected driving events. Also, it provides a better insight with
|
22 |
+
an improved similarity of 80% over 50 hours of driving data
|
23 |
+
than the existing driving behavior characterization techniques.
|
24 |
+
Index Terms—Driving behavior, spatial events, context analysis
|
25 |
+
I. INTRODUCTION
|
26 |
+
With an increase in the traffic population, we witnessed
|
27 |
+
a phenomenal rise in road accidents in the past few years.
|
28 |
+
According to the World Health Organization (WHO) [1], the
|
29 |
+
loss is not only limited to humans but affects the GDP of
|
30 |
+
the country as well. The officially reported road crashes are
|
31 |
+
inspected mostly based on the macro circumstances, such as
|
32 |
+
the vehicle’s speed, the road’s situation, etc. Close inspection
|
33 |
+
of those macro circumstances reveals a series of micro-events,
|
34 |
+
which are responsible for such fatalities. For example, suppose
|
35 |
+
a driver hit the road divider and faced an injury while driving
|
36 |
+
on a non-congested road. From the macro perspective, we
|
37 |
+
might presume it is due to the driver’s amateurish driving
|
38 |
+
skill or the vehicle’s high speed. But, it is also possible that
|
39 |
+
some unexpected obstacles (say, crossing pedestrians/animals)
|
40 |
+
arrived at that moment out of sight. The driver deviated from
|
41 |
+
his lane while decelerating to avoid colliding with them.
|
42 |
+
Therefore, recording these micro-events are crucial in iden-
|
43 |
+
tifying the reasoning behind such accidents. Such contextual
|
44 |
+
information, or micro-events, thus, can help various stakehold-
|
45 |
+
ers like car insurance or app-cab companies to analyze the on-
|
46 |
+
road driving behavior of their drivers. Interestingly, an app-cab
|
47 |
+
company can penalize or incentivize their drivers based on how
|
48 |
+
they handle such context and take counter-measures to avoid
|
49 |
+
accidents.
|
50 |
+
A naive solution to extract the context information is
|
51 |
+
to analyze the traffic videos. Notably, CCTV cameras [2]
|
52 |
+
capture only static snapshots of the events concerning the
|
53 |
+
Live Deployment Setup
|
54 |
+
(a)
|
55 |
+
Output of DriCon
|
56 |
+
(b)
|
57 |
+
The Preceding
|
58 |
+
Vehicle Suddenly
|
59 |
+
Braked and
|
60 |
+
The Ego Vehicle
|
61 |
+
Abruptly Stopped
|
62 |
+
and
|
63 |
+
Faced Severe
|
64 |
+
Jerkiness
|
65 |
+
Fig. 1: DriCon: Hardware components and a running instance
|
66 |
+
when a vehicle faced severe jerks
|
67 |
+
moving vehicles. Existing works [2], [3] use dash-cam videos
|
68 |
+
along with IMU sensor data for manual or partly automated
|
69 |
+
investigation of the accident. Note that, human intervention is
|
70 |
+
error-prone and labor-intensive with higher costs. The situation
|
71 |
+
gets further complicated when multiple events are responsible
|
72 |
+
for the accident. For instance, suppose the preceding vehicle
|
73 |
+
suddenly brakes to avoid collision with a pedestrian or at
|
74 |
+
a run-yellow traffic signal. Consequently, the ego vehicle
|
75 |
+
has to decelerate abruptly, resulting in a two-step chain of
|
76 |
+
responsible events for the unexpected stop. Thus, identifying
|
77 |
+
spatiotemporal interactions among traffic objects are crucial in
|
78 |
+
characterizing the root cause behind such incidents.
|
79 |
+
Importantly, understanding the contexts behind the degraded
|
80 |
+
driving behavior on the fly is not trivial and poses multi-
|
81 |
+
ple challenges. First, this involves continuous monitoring of
|
82 |
+
the driving behavior of the driver as well as an exhaustive
|
83 |
+
knowledge of various on-road spatial micro-events. Expensive
|
84 |
+
vehicles use LiDAR, Radar etc., to sense the driver and
|
85 |
+
the environment [4], [5]; however, app-cab companies are
|
86 |
+
resistant to invest in such high-end vehicles due to low-profit
|
87 |
+
margin. Second, depending on the driving maneuvers taken,
|
88 |
+
temporally interlinking the micro-events based on the vehicle’s
|
89 |
+
interaction with on-road spatial objects is a significant research
|
90 |
+
challenge. For example, if adverse snowy weather is observed
|
91 |
+
on one day, its effect on traffic movements may last till the
|
92 |
+
next day. In contrast, reckless driving would impact only a few
|
93 |
+
other vehicles around and will not be temporally significant
|
94 |
+
after a few minutes. Such temporal impacts of an event
|
95 |
+
would vary depending on the type and space of the event.
|
96 |
+
Third, spatial positions of the surrounding objects impact the
|
97 |
+
driving maneuver. Precisely, along with temporal dependency,
|
98 |
+
the distance between the ego vehicle and the surrounding
|
99 |
+
|
100 |
+
objects plays a vital role. For example, a far-sighted pedestrian
|
101 |
+
might cross the road at high speed, keeping a safe distance,
|
102 |
+
but it is fatal if the distance to the vehicle is low. Existing
|
103 |
+
literature [6], [7] have attempted to identify risky driving,
|
104 |
+
e.g., vehicle-pedestrian interaction, through IMU and video
|
105 |
+
analysis; however, they fail to capture such temporal scaling
|
106 |
+
or the spatial dependency among surrounding objects. Fourth,
|
107 |
+
identifying the context in real-time over an edge-device (such
|
108 |
+
as a dashcam) is essential for providing a just-in-time feed-
|
109 |
+
back. But, deploying such a system for context characterization
|
110 |
+
and analysis from multi-modal data over resource-constrained
|
111 |
+
edge-device is not straightforward.
|
112 |
+
To address these challenges, we propose DriCon that
|
113 |
+
develops
|
114 |
+
a
|
115 |
+
smart
|
116 |
+
dash-cam
|
117 |
+
mounted
|
118 |
+
on
|
119 |
+
the
|
120 |
+
vehicle’s
|
121 |
+
dashboard to characterize the micro-events to provide just-in-
|
122 |
+
time contextual feedback to the driver and other stakeholders
|
123 |
+
(like the cab companies). It senses the maneuvers taken by
|
124 |
+
the ego vehicle through IMU and GPS sensors. In addition, a
|
125 |
+
front camera mounted on the device itself, is used to analyze
|
126 |
+
the relationship between various on-road micro-events and the
|
127 |
+
driving maneuvers taken. This facilitates the system to run in
|
128 |
+
each vehicle in a silo and makes it low-cost and lightweight.
|
129 |
+
Fig. 1 shows a snapshot of the hardware components of
|
130 |
+
our system mounted on a vehicle, and an example scenario
|
131 |
+
where DriCon generates a live contextual explanation behind
|
132 |
+
a sudden jerk observed in the vehicle. In summary, our
|
133 |
+
contributions to this paper are as follows.
|
134 |
+
(1) Pilot Study to Motivate Micro-Event Characterization:
|
135 |
+
We perform a set of pilot studies over the Berkeley Deep
|
136 |
+
Drive (BDD) dataset [8], the largest public driving dataset
|
137 |
+
available on the Internet (as of January 16, 2023), to
|
138 |
+
investigate the variations in driving behavior depending on
|
139 |
+
various road types, time of the day, day of the week, etc.,
|
140 |
+
and highlight the spatiotemporal micro-events causing abrupt
|
141 |
+
changes in driving maneuvers.
|
142 |
+
(2) Designing a Human Explainable Lightweight Causal
|
143 |
+
Model: The development of DriCon relies on the (i) IMU
|
144 |
+
& GPS data to infer the driving maneuvers, and (ii) object
|
145 |
+
detection model & perspective transformation [9] to detect the
|
146 |
+
surrounding objects and their actions to capture various spatial
|
147 |
+
micro-events. Subsequently, we identify the spatiotemporal
|
148 |
+
contexts whenever the driving behavior deteriorates during a
|
149 |
+
trip. Finally, we implement Self Organizing Maps (SOMs),
|
150 |
+
a lightweight but effective causal model to capture the
|
151 |
+
spatiotemporal dependency among features to learn the
|
152 |
+
context and generate human-interpretable explanations.
|
153 |
+
(3)
|
154 |
+
Deployment
|
155 |
+
on
|
156 |
+
the
|
157 |
+
Edge: We deploy the whole
|
158 |
+
architecture of DriCon on a Raspberry Pi 3 model, embedded
|
159 |
+
with a front camera, IMU and GPS sensors (Fig. 1). For this
|
160 |
+
purpose, we make both the IMU and visual processing of
|
161 |
+
the data lightweight and delay-intolerant. Following this, the
|
162 |
+
pre-trained model generates recommendations based on the
|
163 |
+
ongoing driving trip and makes it efficient to run live for
|
164 |
+
just-in-time causal inferences.
|
165 |
+
(4) Evaluating DriCon on a Live System Deployment and
|
166 |
+
with BDD Dataset: We evaluate DriCon on our live in-house
|
167 |
+
deployment, as well as on the BDD dataset [8] (over the
|
168 |
+
annotated data [10]), comprising 33 hours and 17 hours of
|
169 |
+
driving, respectively. We obtain on average 70% and 80%
|
170 |
+
similarity between the derived and the ground-truth causal
|
171 |
+
features, respectively, with top-3 and top-5 features returned
|
172 |
+
by the model, in correctly identifying the micro-events causing
|
173 |
+
a change in the driving behavior. Notably, in most cases,
|
174 |
+
we observe a good causal relationship (in terms of average
|
175 |
+
treatment effect) between the derived features and the observed
|
176 |
+
driving behavior. In addition, we perform different studies of
|
177 |
+
the resource consumption benchmarks on the edge-device to
|
178 |
+
get better insights into the proposed model.
|
179 |
+
II. RELATED WORK
|
180 |
+
Several works have been proposed in the literature on
|
181 |
+
understanding road traffic and its implications for road fa-
|
182 |
+
talities. Early research focused on traffic surveillance-based
|
183 |
+
techniques to prevent road accidents. For instance, National
|
184 |
+
Highway Traffic Safety Administration (NHTSA) [3] had
|
185 |
+
recorded statistics about fatal accident cases; TUAT [2] has
|
186 |
+
been collecting video records from taxis and drivers’ facial
|
187 |
+
images since 2005 to derive injury instances into several
|
188 |
+
classes along with driving behavior estimation. In India, the
|
189 |
+
source of information behind the causes of traffic injuries is the
|
190 |
+
local traffic police [11]. In contrast, works like [12], [13] learn
|
191 |
+
the crime type and aviator mobility pattern just-in-time from
|
192 |
+
street view images and raw trajectory streams, respectively.
|
193 |
+
Apart from harnessing videos and crowd-sourced information,
|
194 |
+
several works [14], [15] are done on abnormal driving behavior
|
195 |
+
detection by exploiting IMU and GPS data. To prevent fatal
|
196 |
+
accidents, authors [16]–[18] try to alert the drivers whenever
|
197 |
+
risky driving signature is observed, such as lane departure or
|
198 |
+
sudden slow-down indicating congestion. However, they have
|
199 |
+
not looked into the effect of neighboring vehicles or other
|
200 |
+
surrounding factors on various driving maneuvers.
|
201 |
+
Interaction among the ego vehicle and other obstacles,
|
202 |
+
such as pedestrians, adverse weather in complex city traffic,
|
203 |
+
often affects the vehicle’s motion, consequently affecting
|
204 |
+
the driving behavior. Existing studies [19] reveal that road
|
205 |
+
category, unsignalized crosswalks, and vehicle speed often
|
206 |
+
lead to a disagreement among pedestrians to cross the road,
|
207 |
+
leading to road fatalities. A more detailed study [20], [21]
|
208 |
+
focuses on causality analysis for autonomous driving, faces
|
209 |
+
infeasibility in real-time deployment. Moreover, they only use
|
210 |
+
a limited set of driving maneuvers, e.g., speed change only.
|
211 |
+
Particularly, causal inferencing is challenging due to high
|
212 |
+
variance in driving data and spurious correlation [22] between
|
213 |
+
traffic objects and maneuvers. The existing works limit their
|
214 |
+
study by considering only static road attributes or relying
|
215 |
+
on single or multi-modalities from a connected road network
|
216 |
+
system. Such methodologies will not be applicable for a
|
217 |
+
single vehicle in real-time deployment unless connected to the
|
218 |
+
|
219 |
+
system. In contrast, leveraging multi-modalities from onboard
|
220 |
+
vehicle sensors can efficiently characterize the continuous
|
221 |
+
and dynamic contexts behind unexpected driving behavior
|
222 |
+
fluctuations. DriCon develops a system in this direction.
|
223 |
+
III. MOTIVATION
|
224 |
+
In an ideal scenario, two vehicles are likely to follow similar
|
225 |
+
maneuvers under the same driving environment; but this is
|
226 |
+
not the case in reality. Driving behavior varies according
|
227 |
+
to the driver’s unique skill set and is influenced by the
|
228 |
+
impact of various on-road events, such as the movement
|
229 |
+
of other heavy and light vehicles, movement of pedestrians,
|
230 |
+
road congestion, maneuvers taken by the preceding vehicle,
|
231 |
+
etc., which we call spatial micro-events or micro-events, in
|
232 |
+
short. In this section, we perform a set of pilot studies to
|
233 |
+
answer the following questions. (a) Does a driver’s driving
|
234 |
+
behavior exhibit spatiotemporal variations? (b) Do all
|
235 |
+
micro-events occurrences during a trip similarly impact the
|
236 |
+
driving behavior? (c) Does a sequence of inter-dependent
|
237 |
+
micro-events collectively influence the driving behavior?
|
238 |
+
Following this, we analyze the publicly-available open-source
|
239 |
+
driving dataset named Berkeley Deep Drive dataset (BDD) [8]
|
240 |
+
to answer these questions stating the impact of different micro-
|
241 |
+
events on the driving behavior. The dataset contains 100k
|
242 |
+
trips crowd-sourced by 10k voluntary drivers over 18 cities
|
243 |
+
across two nations – the USA and Israel. The dataset has been
|
244 |
+
annotated with a driving score on the Likert scale of 1 (worst
|
245 |
+
driving) to 5 (best driving) for each 5-second of driving trips.
|
246 |
+
A. Variation in Driving Behavior over Space and Time
|
247 |
+
We first check whether the on-road driving behavior exhibits
|
248 |
+
a spatiotemporal variation. For this purpose, we vary two
|
249 |
+
parameters – road type as the spatial parameter (say, “High-
|
250 |
+
way”, “City Street”, “Residential”), and time of the day as
|
251 |
+
the temporal one (say, “Daytime”, “Nighttime”, “Dawn/Dusk”)
|
252 |
+
in the BDD dataset. In this pilot study, we form 9 groups
|
253 |
+
with 30 trips each, in a total of 270, where the trips under
|
254 |
+
a group are randomly picked from the BDD dataset. We plot
|
255 |
+
the distribution of the driving scores over all the trips for each
|
256 |
+
group. From Fig. 2(a), it is evident that the score distribution
|
257 |
+
varies both (a) for a single type of road at different times of
|
258 |
+
the day, and (b) for different types of road at any given time of
|
259 |
+
the day (with p < 0.05 reflecting its statistical significance).
|
260 |
+
In the following, we investigate the role played by various
|
261 |
+
micro-events behind the variations in driving behavior.
|
262 |
+
B. Role of Spatial Micro-events
|
263 |
+
Next, we inspect whether various on-road micro-events,
|
264 |
+
which are characterized by the movements of other spatial
|
265 |
+
objects such as “cars”, “pedestrians”, “trucks”, “buses”, “mo-
|
266 |
+
torcycles”, “bicycles”, etc., impact a driver’s driving behavior
|
267 |
+
in the same way across different times of the day. We
|
268 |
+
perform this study by handpicking 30 trips along with their
|
269 |
+
annotated driving scores for both day and night time from
|
270 |
+
the BDD dataset. We compute the volume (say, count) of
|
271 |
+
spatial objects extracted using the existing object detection
|
272 |
+
algorithm [23] from the video captured during the trip and
|
273 |
+
take the average count of each object for a 5-second time
|
274 |
+
window. Thus, for both daytime and nighttime, we get two
|
275 |
+
time-series distributions, (a) the count of each on-road spatial
|
276 |
+
object captured over the trip video during each time window,
|
277 |
+
and (b) the annotated driving scores at those time windows.
|
278 |
+
Next, we compute the Spearman’s Correlation Coefficient
|
279 |
+
(SCC) among these two distributions for day time and night
|
280 |
+
time, respectively. From Fig. 2(b), we infer that mostly all the
|
281 |
+
on-road spatial objects adversely affect the driving behavior
|
282 |
+
(depicting a negative correlation). Cars and pedestrians affect
|
283 |
+
the driving score majorly during the daytime. Whereas, at
|
284 |
+
night time, trucks and buses, along with the cars, impact the
|
285 |
+
driving behavior because heavy vehicles such as trucks move
|
286 |
+
primarily during the nighttime. However, the effect of light
|
287 |
+
vehicles such as motorcycles and bicycles is insignificant due
|
288 |
+
to the dedicated lanes for their movements. This observation
|
289 |
+
is further extended to Fig. 2(c), where the same study is done
|
290 |
+
for weekdays vs. weekends. We extracted the day of the week
|
291 |
+
using already provided timestamps in the BDD dataset and
|
292 |
+
clubbed 30 trips from Monday to Friday for weekdays and 30
|
293 |
+
trips from Saturday to Sunday for the weekend. From Fig. 2(c),
|
294 |
+
we observe that during the early days of the week, cars,
|
295 |
+
pedestrians, and trucks adversely affect the driving behavior,
|
296 |
+
whereas the impact is less during the weekend. Hence, we
|
297 |
+
conclude that different on-road objects exert diverse temporal
|
298 |
+
effects on the driving behavior.
|
299 |
+
C. Micro-events Contributing to Sudden Driving Maneuver:
|
300 |
+
Abrupt Stop as a Use-case
|
301 |
+
Finally, we explore whether multiple inter-dependent micro-
|
302 |
+
events can be responsible for a particular driving maneuver
|
303 |
+
that might degrade the driving behavior. For this purpose,
|
304 |
+
we choose abrupt stop as the maneuver, which we extract
|
305 |
+
from the GPS and the IMU data (the situations when a
|
306 |
+
stop creates a severe jerkiness [24]). We take 30 trips for
|
307 |
+
each scenario, including daytime, nighttime, weekdays, and
|
308 |
+
weekends. For each scenario, we extract the instances when
|
309 |
+
an abrupt stop is taken and record the corresponding micro-
|
310 |
+
events observed at those instances. Precisely, we extract the
|
311 |
+
presence/absence of the following micro-events: red traffic
|
312 |
+
signal, pedestrian movements, presence of heavy vehicles as
|
313 |
+
truck & bus, light vehicles as motorcycle & bicycle, and the
|
314 |
+
preceding vehicles’ braking action (as peer vehicle maneuver),
|
315 |
+
using well-established methodologies [10], [23]. We compute
|
316 |
+
the cumulative count of the presence of each micro-events and
|
317 |
+
the number of abrupt stops taken over all the trips for the
|
318 |
+
four scenarios mentioned above. From Fig. 2(d) and (e), we
|
319 |
+
observe that the red traffic signal, the peer vehicle maneuvers,
|
320 |
+
and heavy vehicles mostly cause an abrupt stop during the
|
321 |
+
nighttime and on weekdays. Therefore, we argue that multiple
|
322 |
+
on-road micro-events, such as the reckless movement of heavy
|
323 |
+
vehicles at night, force even an excellent driver to slam on the
|
324 |
+
brake and take an unsafe maneuver.
|
325 |
+
|
326 |
+
Highway
|
327 |
+
City Street
|
328 |
+
Residential
|
329 |
+
2.0
|
330 |
+
2.5
|
331 |
+
3.0
|
332 |
+
3.5
|
333 |
+
4.0
|
334 |
+
4.5
|
335 |
+
5.0
|
336 |
+
Driving Score
|
337 |
+
(a)
|
338 |
+
Cars
|
339 |
+
Pedestrians
|
340 |
+
Truck
|
341 |
+
Bus
|
342 |
+
Motorcycle
|
343 |
+
Bicycle
|
344 |
+
−0.8
|
345 |
+
−0.6
|
346 |
+
−0.4
|
347 |
+
−0.2
|
348 |
+
0.0
|
349 |
+
0.2
|
350 |
+
0.4
|
351 |
+
0.6
|
352 |
+
Spearman's Correlation
|
353 |
+
Day Time
|
354 |
+
Night Time
|
355 |
+
(b)
|
356 |
+
Cars
|
357 |
+
Pedestrians
|
358 |
+
Truck
|
359 |
+
Bus
|
360 |
+
Motorcycle
|
361 |
+
Bicycle
|
362 |
+
−0.6
|
363 |
+
−0.4
|
364 |
+
−0.2
|
365 |
+
0.0
|
366 |
+
0.2
|
367 |
+
Spearman's Correlation
|
368 |
+
Week Day
|
369 |
+
Weekend
|
370 |
+
(c)
|
371 |
+
Red
|
372 |
+
Signal
|
373 |
+
Pedestrians
|
374 |
+
Heavy
|
375 |
+
Vehicles
|
376 |
+
Light
|
377 |
+
Vehicles
|
378 |
+
Peer
|
379 |
+
Vehicle
|
380 |
+
Action
|
381 |
+
0
|
382 |
+
10
|
383 |
+
20
|
384 |
+
30
|
385 |
+
40
|
386 |
+
50
|
387 |
+
60
|
388 |
+
70
|
389 |
+
%age of Occurences
|
390 |
+
Day Time
|
391 |
+
Night Time
|
392 |
+
(d)
|
393 |
+
Red
|
394 |
+
Signal
|
395 |
+
Pedestrians
|
396 |
+
Heavy
|
397 |
+
Vehicles
|
398 |
+
Light
|
399 |
+
Vehicles
|
400 |
+
Peer
|
401 |
+
Vehicle
|
402 |
+
Action
|
403 |
+
0
|
404 |
+
10
|
405 |
+
20
|
406 |
+
30
|
407 |
+
40
|
408 |
+
50
|
409 |
+
%age of Occurences
|
410 |
+
Week Day
|
411 |
+
Weekend
|
412 |
+
(e)
|
413 |
+
Fig. 2: (a) Variation of Driving Behavior with respect to Road Type and Time of the Day, (b)-(c) Impact of Spatial Micro-events
|
414 |
+
on the Driving Score at Different (i) Time of the Day, (ii) Day of the Week, (d)-(e) Contributing Factors Observed behind
|
415 |
+
Abrupt Stop at Different (i) Time of the Day, (ii) Day of the Week
|
416 |
+
IV. PROBLEM STATEMENT AND SYSTEM OVERVIEW
|
417 |
+
A. Problem Statement
|
418 |
+
Consider that FM denotes the set of driving maneuvers
|
419 |
+
and FS be the set of spatial micro-events. Fi be the set
|
420 |
+
of temporally-represented feature variables corresponding to
|
421 |
+
the driving maneuvers taken and on-road spatial micro-events
|
422 |
+
encountered during a trip i. Let Ri
|
423 |
+
T be the driving score
|
424 |
+
at time T during the trip i. We are interested in inspecting
|
425 |
+
the events occurred, representing the feature values Fi, when
|
426 |
+
|Ri
|
427 |
+
T − ˆRi
|
428 |
+
T −1| > ǫ (ǫ is a hyper-parameter, we set ǫ = 1),
|
429 |
+
reflecting the fluctuations in driving behavior. Here, ˆRi
|
430 |
+
T −1 =
|
431 |
+
⌈mean([Ri
|
432 |
+
1, Ri
|
433 |
+
T −1])⌉ represents the mean driving behavior till
|
434 |
+
T −1. The output of the system is a characterization of {Fi
|
435 |
+
M,
|
436 |
+
Fi
|
437 |
+
S}, as to whether a fluctuation in the driving behavior is due
|
438 |
+
to the driving maneuvers only (Fi
|
439 |
+
M) or forced by the spatially
|
440 |
+
causal micro-events (Fi
|
441 |
+
S). Finally, we target to generate the
|
442 |
+
explanations based on {Fi
|
443 |
+
M, Fi
|
444 |
+
S} to give feedback to the
|
445 |
+
stakeholders for further analysis of the driving profile.
|
446 |
+
B. Feature Selection
|
447 |
+
Leveraging the existing literature [24], we identified a
|
448 |
+
set of feature variables at timestamp T representing various
|
449 |
+
driving maneuvers FM of the ego vehicle. These features
|
450 |
+
are – Weaving (AW
|
451 |
+
T ), Swerving (AS
|
452 |
+
T ), Side-slipping (AL
|
453 |
+
T ),
|
454 |
+
Abrupt Stop (AQ
|
455 |
+
T ), Sharp Turns (AU
|
456 |
+
T ), and Severe Jerkiness
|
457 |
+
(AJ
|
458 |
+
T ). Similarly, we consider the following feature variables
|
459 |
+
corresponding to the spatial micro-events FS – Relative Speed
|
460 |
+
(ST ) and Distance (DT ) between the ego and the preceding
|
461 |
+
vehicle, preceding vehicle’s Braking Action (BT), volume
|
462 |
+
of the peer vehicles in front of the ego vehicle indicating
|
463 |
+
Congestion in the road (CT ), Pedestrian (PT ), and it’s speed
|
464 |
+
(QT ), Traffic light (LT ), Heavy vehicles: {Bus & Truck}
|
465 |
+
(HT ), Type of the Road (GT ), and Weather condition (WT ).
|
466 |
+
Note that, we empirically select these features based on the
|
467 |
+
existing literature and observations from the dataset; additional
|
468 |
+
features can also be incorporated in DriCon without losing its
|
469 |
+
generality.
|
470 |
+
We next broadly introduce our system architecture. DriCon
|
471 |
+
captures IMU, GPS, and video data from a dashcam (say,
|
472 |
+
an edge-device) and characterizes the context behind the
|
473 |
+
improved/degraded driving behavior on the fly. The system
|
474 |
+
comprises three components: (a) Data Preprocessing and
|
475 |
+
Feature Extraction, (b) Detection of Improved/Degraded
|
476 |
+
Driving Behavior, and (c) Identification of Possible Context
|
477 |
+
(see Fig. 3).
|
478 |
+
4
|
479 |
+
Pedestrian Crossed and
|
480 |
+
The Ego Vehicle Abruptly
|
481 |
+
Stopped and Faced Severe
|
482 |
+
Jerkiness.
|
483 |
+
Inferred Features: Crossing
|
484 |
+
Pedestrians, Severe
|
485 |
+
Jerkiness, Abrupt Stops
|
486 |
+
INPUT: IMU, GPS & Video
|
487 |
+
+
|
488 |
+
Driving Maneuvers
|
489 |
+
Spatial Micro-events
|
490 |
+
2
|
491 |
+
4
|
492 |
+
4
|
493 |
+
Detect Change
|
494 |
+
in Driving
|
495 |
+
Behavior
|
496 |
+
Capture Time-
|
497 |
+
Series Dependency
|
498 |
+
Among Features
|
499 |
+
Output: Generated Explanations
|
500 |
+
(a)
|
501 |
+
(b)
|
502 |
+
(c)
|
503 |
+
(d)
|
504 |
+
(e)
|
505 |
+
Data Preprocessing and Feature Extraction
|
506 |
+
Identification of Possible Context
|
507 |
+
Model Output
|
508 |
+
Model
|
509 |
+
Construction
|
510 |
+
Fig. 3: DriCon System Flow and Modeling Pipeline
|
511 |
+
C. Data Preprocessing and Feature Extraction
|
512 |
+
The collected IMU and GPS sensor data are prone to
|
513 |
+
noise due to the earth’s gravitational force, signal attenuation,
|
514 |
+
and atmospheric interference. Hence, we implement a low-
|
515 |
+
pass filter to eliminate such noises from IMU and GPS to
|
516 |
+
compute inertial features for the extraction of the driving
|
517 |
+
maneuvers (FM). Next, we preprocess the video data before
|
518 |
+
extracting on-road spatial micro-events and their actions (FS).
|
519 |
+
We up/downsample the acquired videos to a resolution of
|
520 |
+
960 × 540p, preserving the signal-to-noise ratio above 20 dB.
|
521 |
+
1) Driving Maneuvers - FM: In order to generate the
|
522 |
+
features corresponding to different driving maneuvers (FM),
|
523 |
+
we extract the instances of Weaving (AW
|
524 |
+
T ), Swerving (AS
|
525 |
+
T ),
|
526 |
+
Side-slipping (AL
|
527 |
+
T ), Abrupt Stop (AQ
|
528 |
+
T ), Sharp Turns (AU
|
529 |
+
T ),
|
530 |
+
and Severe Jerkiness (AJ
|
531 |
+
T ) from the IMU data using standard
|
532 |
+
accelerometry analysis [10], [24].
|
533 |
+
2) Spatial Micro-events - FS: Next, we implement the
|
534 |
+
state-of-the-art video data-based object detection algorithms
|
535 |
+
and further fine-tune them based on our requirements, as
|
536 |
+
developing vision-based algorithms is beyond the scope of our
|
537 |
+
work. We leverage the YOLO-V3 [23] algorithm trained on
|
538 |
+
the COCO dataset [25] to detect a subset of traffic objects such
|
539 |
+
as Pedestrians, Cars, Buses, Trucks, and Traffic Lights (de-
|
540 |
+
picted as FS). Next, we estimate the influence of pedestrians’
|
541 |
+
interactions, the presence of heavy vehicles (buses & trucks),
|
542 |
+
traffic light signal transitions (red, yellow & green), and the
|
543 |
+
cars on the driving behavior of the ego vehicle. Next, we
|
544 |
+
discard the detected objects which depict a confidence score
|
545 |
+
|
546 |
+
troficintao.50o6
|
547 |
+
treffieliehtzo5
|
548 |
+
cor.
|
549 |
+
0.90r0.680
|
550 |
+
trotmcliehtr0.27Daytime
|
551 |
+
Nightime
|
552 |
+
Dawn/Dusk< 50% and bounding boxes of area < 10k, capturing the fact
|
553 |
+
that the far-sighted traffic objects around the ego vehicle exert
|
554 |
+
marginal impact compared to the near-sighted ones. Addition-
|
555 |
+
ally, the traffic objects in the mid-way of the road, broadly
|
556 |
+
visible from the driver’s dashboard, will be of more influence
|
557 |
+
than the left or right lanes, as the ego vehicle will follow
|
558 |
+
them immediately. Thus, we divide each of the frames into
|
559 |
+
0.2:0.6:0.2 ratio along the horizontal axis, as left:middle:right
|
560 |
+
lanes. Therefore, we keep the Pedestrians PT , Cars, Heavy
|
561 |
+
Vehicles as {Buses & Trucks} HT , which have bounding
|
562 |
+
box co-ordinates within the middle lane boundary, and Traffic
|
563 |
+
Light Signal Transitions LT (Red, Yellow & Green) without
|
564 |
+
the lane information as traffic lights are often positioned on
|
565 |
+
the left and right lanes. Since our pilot study demonstrated
|
566 |
+
that the pedestrians and peer vehicles’ action significantly
|
567 |
+
impact the driving maneuvers of the ego vehicle, (a) we extract
|
568 |
+
the Pedestrian Speed (QT ), as well as identify the crossing
|
569 |
+
pedestrians in the mid-way, and (b) we compute the preceding
|
570 |
+
vehicle’s Braking Action (BT ), and Congestion (CT), as
|
571 |
+
well as detect the Relative Speed (ST ) and Distance (DT )
|
572 |
+
variation among the ego and the preceding vehicle. We apply
|
573 |
+
perspective transformation and deep learning methods [9], [26]
|
574 |
+
to infer the above. Finally, the above pipeline runs on each
|
575 |
+
frame where the video is re-sampled to 15 frames-per-second.
|
576 |
+
D. Detection of Driving Behavior Fluctuations
|
577 |
+
The crux of DriCon is to capture the temporal dependency
|
578 |
+
of various driving maneuvers and spatial micro-events when
|
579 |
+
a change in the driving behavior is observed during the trip.
|
580 |
+
For a run-time annotation of the driving behavior, we use an
|
581 |
+
existing study [10] that provides a driving behavior score on
|
582 |
+
the Likert scale [1 − 5] by analyzing driving maneuvers and
|
583 |
+
other surrounding factors. We divide the trip into continuous
|
584 |
+
non-overlapping time windows of size δ and compute the
|
585 |
+
driving score at the end of every window U (denoted as RP
|
586 |
+
U ),
|
587 |
+
using the feature values captured during that window [10].
|
588 |
+
To quantitatively monitor whether there is a change in the
|
589 |
+
driving behavior during a window U, we compare RP
|
590 |
+
U and
|
591 |
+
ˆRP
|
592 |
+
U =
|
593 |
+
1
|
594 |
+
U−1
|
595 |
+
U−1
|
596 |
+
�
|
597 |
+
i=1
|
598 |
+
RP
|
599 |
+
i (mean driving score during previous U−1
|
600 |
+
windows). Suppose this difference is significant (greater than
|
601 |
+
some predefined threshold ǫ). In that case, DriCon proceeds
|
602 |
+
towards analyzing the temporal dependency among the feature
|
603 |
+
vectors at different time windows to understand the reason
|
604 |
+
behind this difference.
|
605 |
+
E. Identification of Possible Context
|
606 |
+
In the final module, we use the feature vectors at different
|
607 |
+
windows to build the model that identifies which features
|
608 |
+
(FGEN) are responsible for the change in driving behavior
|
609 |
+
during the window U. The model reactively seeks explanations
|
610 |
+
behind such fluctuations by analyzing the effect of the micro-
|
611 |
+
events that occurred over the past windows [1, · · · , (U − 1)]
|
612 |
+
and the present window U. Finally, natural language-based
|
613 |
+
human interpretable explanations are generated and fed back
|
614 |
+
to the stakeholders for further analysis.
|
615 |
+
V. MODEL DEVELOPMENT
|
616 |
+
To develop the core model for DriCon, we leverage the
|
617 |
+
already extracted features F ∈ {FM
|
618 |
+
� FS} (details in §IV-C)
|
619 |
+
to capture the temporal dependency of the past as well as the
|
620 |
+
present events. In addition, DriCon derives the explanation be-
|
621 |
+
hind the detected events through explanatory features FGEN.
|
622 |
+
For this purpose, we need a self-explanatory model that
|
623 |
+
can capture the spatiotemporal dependency among different
|
624 |
+
driving maneuvers and micro-events associated with the on-
|
625 |
+
road driving behavior. We choose a Self Organizing Map
|
626 |
+
(SOM) [27] for constructing the model that can exploit such
|
627 |
+
spatiotemporal dependencies with minimum data availability.
|
628 |
+
The major limitation of the classical deep learning models
|
629 |
+
(such as CNN or RNN) stems from the fact that, (i) deep
|
630 |
+
networks consume heavy resources (say, memory), as well as
|
631 |
+
suffer from huge data dependency, and (ii) they act as a black
|
632 |
+
box, hence fail to generate human interpretable explanations
|
633 |
+
behind certain predictions [28]. On the other hand, SOM is
|
634 |
+
able to characterize the micro-events in runtime using feature
|
635 |
+
variability and unlabelled data.
|
636 |
+
Neighboring Radius
|
637 |
+
F1
|
638 |
+
F2
|
639 |
+
F3
|
640 |
+
FU
|
641 |
+
Input
|
642 |
+
Layer
|
643 |
+
Learning Phase
|
644 |
+
Feature
|
645 |
+
Input
|
646 |
+
Converge
|
647 |
+
Final Map
|
648 |
+
Code Book
|
649 |
+
BMU
|
650 |
+
Weight
|
651 |
+
(a)
|
652 |
+
(b)
|
653 |
+
(c)
|
654 |
+
No Change
|
655 |
+
Change
|
656 |
+
Fig. 4: Working Principle of SOM
|
657 |
+
A. Inferring Explanatory Features using SOM
|
658 |
+
The key idea behind obtaining the explanatory features is
|
659 |
+
first to discover the spatiotemporal feature dependency. In
|
660 |
+
DriCon, we derive so using Kohonen’s Self Organizing Map
|
661 |
+
(see Fig. 4), as it is an unsupervised ANN-based technique
|
662 |
+
leveraging competitive learning methods. Since DriCon runs
|
663 |
+
on an edge-device, we employ a minimal number of model
|
664 |
+
parameters to expedite the processing without compromising
|
665 |
+
the performance. Precisely, we implement the codebook with
|
666 |
+
147 neurons, spread out over a two-dimensional array of
|
667 |
+
size 7 × 21 (where 7 is a hyperparameter depending on the
|
668 |
+
maximum influence of the past windows during a trip, 21 cor-
|
669 |
+
responds to the number of features in the feature space). These
|
670 |
+
neurons are initialized with a random weight (see Fig. 4(a)),
|
671 |
+
where the weight vector has the same length (of 21) as the
|
672 |
+
feature vector. Next, we represent each trip with a 2D grid of
|
673 |
+
size 8 × 21 (considering 8 consecutive windows in a trip) to
|
674 |
+
capture the influence of the past windows [1, · · · , (U −1)] and
|
675 |
+
the present window U. In principle, the inherent topological
|
676 |
+
ordering of SOM groups the similar feature space (in windows
|
677 |
+
[1, · · · , (U −1)]) into a single group, when there is no change
|
678 |
+
in the driving behavior. On the contrary, the dissimilar ones
|
679 |
+
|
680 |
+
(say, during the window U), when there exists a change in
|
681 |
+
the driving behavior, are mapped into a different group, as
|
682 |
+
depicted in Fig. 4(b,c).
|
683 |
+
For instance, suppose on a trip, the ego vehicle abruptly
|
684 |
+
stops due to the preceding vehicle’s braking action following
|
685 |
+
a sudden change in the traffic signal. Hence the feature space
|
686 |
+
in window [1, · · · , (U − 1)] exhibits a similar signature (until
|
687 |
+
the abrupt stop occurs), and subsequently gets mapped to a
|
688 |
+
single neuron. However, during the abrupt stop, there will be
|
689 |
+
changes in the feature space (say, maneuvers and other spatial
|
690 |
+
events). These changes in the feature space will get it assigned
|
691 |
+
to a different neuron and settle the other neurons’ weight
|
692 |
+
automatically depending on the changes in the feature space
|
693 |
+
between the windows [1, · · · , (U −1)] and the window U. This
|
694 |
+
procedure allows SOM to harness the temporal dependency
|
695 |
+
among spatial events in an unsupervised mode, without using
|
696 |
+
the driving score explicitly.
|
697 |
+
1) Model Training: The input trip data is represented in
|
698 |
+
the 2D grid format for learning the best-matched neuron,
|
699 |
+
optimizing the Euclidean distance between the feature space
|
700 |
+
and weight vector of the corresponding neuron. To ensure the
|
701 |
+
best-fitting, the best-matched neuron tries to learn the weight
|
702 |
+
vector of the feature space at most. Also, the neurons in the
|
703 |
+
neighborhood try to tune their weights as nearest as possible
|
704 |
+
compared to the best-matched neuron. We train this model
|
705 |
+
for 500 epochs, where each neuron gets mapped with the
|
706 |
+
best matching trip instances and converges to their coordinate
|
707 |
+
position in the codebook. We implement the Bubble neigh-
|
708 |
+
borhood function [29] to update the neighborhood neurons’
|
709 |
+
weights until the neighborhood radius converges to ≈ 0. We
|
710 |
+
ensure that both the distance and neighborhood functions are
|
711 |
+
computationally faster for accurate learning accelerating the
|
712 |
+
convergence. Upon completing the total number of epochs, we
|
713 |
+
obtain the converged codebook called the Map, where each trip
|
714 |
+
instance gets assigned to the best matching neuron called the
|
715 |
+
Best Matching Unit (BMU). The weight vector corresponding
|
716 |
+
to the BMU’s coordinate reveals the explanatory features
|
717 |
+
FGEN.
|
718 |
+
2) Model Execution: We leverage the constructed Map for
|
719 |
+
the runtime inference. First, we conduct the feature processing
|
720 |
+
of the current ongoing trip (following §IV-C), and in parallel,
|
721 |
+
the extracted feature space is fed as input to the constructed
|
722 |
+
Map. Eventually, we obtain the BMU’s coordinate and extract
|
723 |
+
its corresponding weight vector and the feature encoding for
|
724 |
+
the given trip instance. From the weight vector, we extract
|
725 |
+
the top-k weights and their corresponding feature names
|
726 |
+
(say, weather type) and their encoded values (say, weather
|
727 |
+
type: rainy). Finally, we populate them in FGEN (called
|
728 |
+
the Generative micro-events) for further generation of human
|
729 |
+
interpretable explanation.
|
730 |
+
B. Generating Textual Explanation
|
731 |
+
DriCon aims to generate the explanations in textual format
|
732 |
+
utilizing the output features FGEN for better readability and
|
733 |
+
human interpretation. As the features f ∈ FGEN are already
|
734 |
+
associated with some keywords (say, severe jerkiness), we
|
735 |
+
need to generate them in a sentential form, keeping the features
|
736 |
+
as “action” or “subject” depending on whether f ∈ FM
|
737 |
+
or f ∈ FS, respectively. For instance, if the feature is an
|
738 |
+
action, we assign the ego vehicle as the subject, replace the
|
739 |
+
corresponding output feature f with its describing keyword,
|
740 |
+
and finally concatenate them to obtain the sentential form.
|
741 |
+
For example, in case of severe jerkiness, the constructed
|
742 |
+
sentence becomes, “the ego vehicle severe jerks”. However, if
|
743 |
+
the output feature f represents a subject, then many possible
|
744 |
+
sentences can be generated out of one subject. Thus, we
|
745 |
+
mine several traffic guidelines [30] and compute the cosine
|
746 |
+
similarity among the features and existing guidelines using TF-
|
747 |
+
IDF vectorizer. Upon extracting the most relevant guidelines,
|
748 |
+
we fetch the object associated with the sentence and construct
|
749 |
+
a single sentence for each output feature (e.g., “pedestrian
|
750 |
+
crossing” → “pedestrian crossing the intersection”). Next, for
|
751 |
+
all the generated sentences, the describing keywords corre-
|
752 |
+
sponding to each feature are converted to an adjective or
|
753 |
+
adverb using Glove [31] for better structuring of the sentences
|
754 |
+
(say, “the ego vehicle severe jerks” → “the ego vehicle severely
|
755 |
+
jerks”). Finally, each sentence is concatenated using the “and”
|
756 |
+
conjunction, and repetitive subjects are replaced using their
|
757 |
+
pronoun form using string manipulation to generate the whole
|
758 |
+
explanation, as depicted in Fig. 3(e).
|
759 |
+
VI. PERFORMANCE EVALUATION
|
760 |
+
This section gives the details of DriCon implemented over
|
761 |
+
a live setup as well as over the BDD dataset. We report the
|
762 |
+
performance of the SOM model and compare it against a
|
763 |
+
well-established baseline. Additionally, we show how well our
|
764 |
+
system has generated the textual explanations along with a
|
765 |
+
sensitivity analysis to distinguish how error-prone DriCon is.
|
766 |
+
We start with the experimental setup details as follows.
|
767 |
+
A. Experimental Setup
|
768 |
+
DriCon is implemented over a Raspberry Pi 3 Model B
|
769 |
+
microprocessor kit operating Raspbian OS with Linux kernel
|
770 |
+
version 5.15.65 − v7+ along with 1 GB primary memory
|
771 |
+
and ARMv7 processor. We primarily utilize the IMU, the
|
772 |
+
GPS, and the video data captured through the front camera
|
773 |
+
(facing towards the front windscreen) as different modalities.
|
774 |
+
For this purpose, we embed one MPU−9250 IMU sensor,
|
775 |
+
one u-blox NEO−6M GPS module, and one Logitech USB
|
776 |
+
camera over the Raspberry Pi board, as depicted in Fig. 1(a).
|
777 |
+
We deployed DriCon over three different types of vehicles
|
778 |
+
(e.g., SUV, Sedan, & Hatchback). We hired 6 different drivers
|
779 |
+
in the age group of [20 − 45] who regularly drive in practice.
|
780 |
+
Therefore, our whole experimentation ran for more than two
|
781 |
+
months over three cities, resulting in approximately 33 hours
|
782 |
+
of driving over 1000 km distance. The drivers drove freely
|
783 |
+
without any specific instructions given, with each trip varying
|
784 |
+
from approximately 20 minutes to 2 hours. In addition, each
|
785 |
+
driver drove over five different types of roads (city street,
|
786 |
+
highway, residential, parking & campus road) at three different
|
787 |
+
times of the day (day, dusk & night). We evaluate DriCon by
|
788 |
+
analyzing how well our proposed model extracts the generative
|
789 |
+
|
790 |
+
micro-events FGEN (see §V-B). For implementing DriCon,
|
791 |
+
we consider δ = 5 seconds, ǫ = 1. The impact of other hy-
|
792 |
+
perparameters and resource consumption have been discussed
|
793 |
+
later during the analysis. We next discuss the ground-truth
|
794 |
+
annotation procedure used for the evaluation of DriCon.
|
795 |
+
B. Annotating Micro-events
|
796 |
+
We launched an annotation drive by floating a Google
|
797 |
+
form among a set of recruited annotators, where they had
|
798 |
+
to watch a video of at most 10 seconds and choose the
|
799 |
+
top-3 most influential factors impacting the driving behavior.
|
800 |
+
We do this annotation over the in-house data (video data
|
801 |
+
collected during the live experiments) and the videos over the
|
802 |
+
BDD dataset. For each video from both the datasets given
|
803 |
+
in the form, we showed only the clipped portion where the
|
804 |
+
score fluctuations had occurred. Next, out of the total 15
|
805 |
+
factors (including driving maneuvers and spatial micro-events)
|
806 |
+
given in a list, they were instructed to choose the top-3 most
|
807 |
+
influential factors responsible for the poor driving behavior
|
808 |
+
based on their visual perception. Besides, we also provided
|
809 |
+
the model-generated sentences (§V-B) and asked how relevant
|
810 |
+
and well-structured the sentences are (on a scale of [1−5]) for
|
811 |
+
explaining the change in the driving behavior. The annotators
|
812 |
+
also had the option to write their own explanation if they
|
813 |
+
perceived a better reason behind the driving behavior change.
|
814 |
+
As the number of trips is quite large, we need to design a set
|
815 |
+
of Google forms (sample form1), each containing at most 20
|
816 |
+
videos to ensure the least cognitive load on the annotators. We
|
817 |
+
also collected annotators’ demographic information such as
|
818 |
+
age, gender, city, etc. We find that most participants (> 67%)
|
819 |
+
had prior driving skills. At least three independent annotators
|
820 |
+
had annotated each instance. Upon receiving the annotated
|
821 |
+
factors, we need to find the agreement among the annotators
|
822 |
+
to ensure the received ground truth is unbiased and non-
|
823 |
+
random. As standard inter-annotator agreement policies (say,
|
824 |
+
Cohen’s kappa index) work on quantitative analysis or one-to-
|
825 |
+
one mapping, we cannot apply such metrics. Thus, we use the
|
826 |
+
majority voting technique where each listed factor is assigned
|
827 |
+
a percentage, signifying how many times the annotators choose
|
828 |
+
that factor. Each factor having a vote of at least 60% is kept in
|
829 |
+
FGT . We observe the minimum and the maximum cardinality
|
830 |
+
of FGT are 3 and 5, respectively. This also indicates that
|
831 |
+
the annotators agreed on selecting the factors that influenced
|
832 |
+
the driving behavior. FGT contains the annotated micro-events
|
833 |
+
against which FGEN is evaluated.
|
834 |
+
C. Performance Metric
|
835 |
+
We use the Dice Similarity Coefficient score [32] (N)
|
836 |
+
which computes the similarity between FGT and FGEN as fol-
|
837 |
+
lows: N = 2×|FGT ∩FGEN|
|
838 |
+
|FGT |+|FGEN| . We report the mean N across all
|
839 |
+
the trips to measure the accuracy of DriCon. Next, we also use
|
840 |
+
Average Treatment Effect [33] (ATE) to report comparatively
|
841 |
+
higher causal features out of the model identified features.
|
842 |
+
Finally, we define Percentage of Error as follows. First, we
|
843 |
+
1https://forms.gle/97N6uk4ujRaZSWbj8 (Accessed: January 16, 2023)
|
844 |
+
Top-3
|
845 |
+
Top-5
|
846 |
+
30
|
847 |
+
50
|
848 |
+
70
|
849 |
+
90
|
850 |
+
Dice Coefficient (in %)
|
851 |
+
(a)
|
852 |
+
Top-3
|
853 |
+
Top-5
|
854 |
+
10
|
855 |
+
30
|
856 |
+
50
|
857 |
+
70
|
858 |
+
90
|
859 |
+
Dice Coefficient (in %)
|
860 |
+
(b)
|
861 |
+
Fig. 5: (a) Dice Coefficient Similarity (in %) between Human
|
862 |
+
Annotated and Model Generated Features (b) Ablation Study
|
863 |
+
compute the set-difference as {FGT \FGEN}, and extract the
|
864 |
+
corresponding feature category (say, FM, FS). Once we get
|
865 |
+
the count of each feature category, we compute its percentage
|
866 |
+
out of the total trips as the Percentage of Error.
|
867 |
+
D. Baseline Implementation
|
868 |
+
As a baseline for extracting FGEN, we implement a super-
|
869 |
+
vised rule-based Random Forest (RF) algorithm with 20 deci-
|
870 |
+
sion trees where each tree is expanded to an unlimited depth
|
871 |
+
over the training data. We optimize the labels RP
|
872 |
+
U with the
|
873 |
+
intuition that features will contribute differently to each of the
|
874 |
+
predicted scores. Although the RF-based model has a feature
|
875 |
+
importance score signifying the contribution of each feature
|
876 |
+
in constructing the model, we need to have an explanation of
|
877 |
+
how each feature contributes to predicting the driving scores
|
878 |
+
on a trip instance basis. Therefore, we use LIME [34] in the
|
879 |
+
background of the RF model for generating the explanatory
|
880 |
+
features. As LIME is a model-agnostic method, it tries to map
|
881 |
+
the relationship between the input features and output scores
|
882 |
+
by tweaking the feature values. Thus, it explains the range
|
883 |
+
of values and probability for each feature that contributes
|
884 |
+
to predicting the score. From the generated explanation, we
|
885 |
+
extract the contributing features FGEN along with their values
|
886 |
+
for further generation of textual explanation. This pipeline is
|
887 |
+
executed in a similar manner as described in §VI-A.
|
888 |
+
E. Accuracy of Characterized Context
|
889 |
+
We present the accuracy of DriCon using the SOM and
|
890 |
+
RF+LIME model over the in-house dataset using Dice Coeffi-
|
891 |
+
cient Similarity N. We extract the top-k features from FGEN
|
892 |
+
where k ∈ {3, 5} and compute N between the two sets of fea-
|
893 |
+
tures – FGEN and FGT with top-k. Fig. 5(a) shows the result.
|
894 |
+
For top-3, we get 69% & 40% similarity on average with SOM
|
895 |
+
and RF+LIME, respectively. Whereas for top-5, we observe
|
896 |
+
79% & 48% similarity on average with SOM and RF+LIME,
|
897 |
+
respectively. As the in-house dataset has more complex micro-
|
898 |
+
events, the slight performance drop over the in-house dataset
|
899 |
+
using the top-3 features is tolerable. Intuitively, the model can
|
900 |
+
capture more diversity as perceived by the human annotators;
|
901 |
+
therefore, the similarity improves as we move from k = 3
|
902 |
+
to k = 5. However, as the RF+LIME considers each time
|
903 |
+
instance of a trip independently, its performance degrades.
|
904 |
+
It captures the dominant features responsible for the driving
|
905 |
+
behavior change within the current time window, contrary to
|
906 |
+
inspecting past time windows’ impact.
|
907 |
+
|
908 |
+
DriCon
|
909 |
+
DriCon-man
|
910 |
+
DriCon-spat.SOM
|
911 |
+
RF WI LIMETABLE I: Similarity Measure among Human Annotated vs. Model Generated Output
|
912 |
+
Instance#
|
913 |
+
Human Annotated FGT
|
914 |
+
Model Generated FGEN
|
915 |
+
Similarity N(%)
|
916 |
+
ATE
|
917 |
+
1
|
918 |
+
Poor Weather Conditions (Heavy Rainfall, Fog, etc.), Swerving,
|
919 |
+
Congestion, Overtaking, Taking Abrupt Stop
|
920 |
+
Congestion, Preceding Vehicle Braking,
|
921 |
+
Weaving, Abrupt Stop, Severe Jerkiness
|
922 |
+
40%
|
923 |
+
1.96
|
924 |
+
2
|
925 |
+
Sideslip, Taking Abrupt Stop, Traffic Lights: Red
|
926 |
+
Traffic Lights: Red, Congestion, Abrupt Stop
|
927 |
+
66.67%
|
928 |
+
2.5
|
929 |
+
3
|
930 |
+
Crossing Pedestrian, High Speed Variation among Cars, Weaving
|
931 |
+
Severe Jerkiness, Crossing Pedestrian, Weaving
|
932 |
+
66.67%
|
933 |
+
1.35
|
934 |
+
To have a glimpse, we present the explanatory features
|
935 |
+
(FGEN) vs. human-annotated ones (FGT) in Table I for a
|
936 |
+
sample of three test instances where the similarity (Dice coef-
|
937 |
+
ficient) is comparatively lower. Interestingly, when there is a
|
938 |
+
mismatch, we observe that the corresponding features from the
|
939 |
+
model-generated and human-annotated ones are conceptually
|
940 |
+
related for most of the time. Additionally, a positive high
|
941 |
+
mean ATE value for the model-generated mismatched features
|
942 |
+
signifies that the model perceived those features as more causal
|
943 |
+
than normal human perception. It can be noted that an ATE
|
944 |
+
value ≥ 1 indicates high causal relationships between the
|
945 |
+
features and the corresponding effect (changes in the driving
|
946 |
+
behavior). For example, in test instance #2, the mismatched
|
947 |
+
features are Sideslip (for human generated) and Congestion
|
948 |
+
(for model generated), where Congestion was relatively more
|
949 |
+
causal, affecting the change in the driving behavior. By manu-
|
950 |
+
ally analyzing this instance and interviewing the corresponding
|
951 |
+
driver, we found that he indeed made a minor sideslip on a
|
952 |
+
congested road. Indeed, the driver was not very comfortable
|
953 |
+
in driving a manually-geared car on a congested road.
|
954 |
+
2
|
955 |
+
3
|
956 |
+
4
|
957 |
+
5
|
958 |
+
Fig. 6: Generated Map from SOM for a 7×7 Network (Scaled
|
959 |
+
Down)
|
960 |
+
F. Ablation Study
|
961 |
+
Next, we understand the importance of different feature
|
962 |
+
categories corresponding to the driving maneuvers and on-road
|
963 |
+
spatial events, as described in §IV-A, on the overall perfor-
|
964 |
+
mance of DriCon. To study the impact of driving maneuvers
|
965 |
+
and spatial features, we implement SOM, excluding each of
|
966 |
+
the above feature classes one at a time, and evaluate N to
|
967 |
+
inspect the importance of each. The two variants other than
|
968 |
+
DriCon are constructed in the following way. (a) DriCon-
|
969 |
+
man.: Here, we exclude the driving maneuvers FM and keep
|
970 |
+
FS only. (b) DriCon-spat.: Next, we exclude the spatial
|
971 |
+
features FS and keep FM only. We evaluate these two variants
|
972 |
+
over both top-3 and top-5 generated features, along with
|
973 |
+
DriCon containing all the features, as depicted in Fig. 5(b).
|
974 |
+
On excluding the driving maneuvers and spatial features,
|
975 |
+
performance drops to 45% and 31%, respectively, for top-5
|
976 |
+
features. This drastic drop signifies the crucial importance of
|
977 |
+
spatial features, as these are the frequently changing features
|
978 |
+
responsible for fluctuating driving behavior.
|
979 |
+
G. Model Insight
|
980 |
+
To understand how the spatiotemporal dependency among
|
981 |
+
different features corresponding to the driving maneuvers and
|
982 |
+
various on-road spatial micro-events are derived, we use 49
|
983 |
+
neurons spread over a 7 × 7 two-dimensional array (a smaller
|
984 |
+
variant of the SOM network originally used to develop the
|
985 |
+
model, as the original model having 147 neurons is difficult to
|
986 |
+
visualize), fitted over 200 trips. This instance produces a Map
|
987 |
+
as depicted in Fig. 6, where all the given trips are assigned
|
988 |
+
to each of the neurons. The scores RP
|
989 |
+
U are used only for
|
990 |
+
visual depiction purpose of how the trips are located on the
|
991 |
+
Map. Each trip captures the change in the driving behavior
|
992 |
+
using the feature variation. The neurons with multi-color are
|
993 |
+
of more importance than the mono-color, as in those, the
|
994 |
+
score fluctuations are most observed. During a stand-alone
|
995 |
+
trip, the features corresponding to each instance of the trip
|
996 |
+
will have a similar value until there is a change in the driving
|
997 |
+
behavior, thus getting assigned to the same neuron (mono-
|
998 |
+
color). However, the difference in the driving behavior induces
|
999 |
+
distinct feature values than the previous instances; thus, it gets
|
1000 |
+
assigned to a different neuron in the Map. The neurons having
|
1001 |
+
multi-color, as depicted in Fig. 6, map the trip instances where
|
1002 |
+
a sudden change of driving behavior has occurred.
|
1003 |
+
H. Dissecting DriCon
|
1004 |
+
We next benchmark the resource consumption behavior of
|
1005 |
+
DriCon, followed by an analysis of the model’s significance
|
1006 |
+
and sensitivity.
|
1007 |
+
1) Edge-device Resource Consumption: We benchmark
|
1008 |
+
the CPU & memory usage, processing time, temperature rise,
|
1009 |
+
and energy consumption over two cases: when (a) the device
|
1010 |
+
is idle, & (b) DriCon is running. From Fig. 7(a), we observe
|
1011 |
+
that in idle mode, on average, 2% of CPU (using “top”
|
1012 |
+
command) is used. In contrary, running DriCon acquires at
|
1013 |
+
most 10% of the processor, which is acceptable. However,
|
1014 |
+
the memory usage is a bit high (≈ 500MB) mainly due to
|
1015 |
+
video processing overhead as depicted in Fig. 7(b). Next,
|
1016 |
+
we show the required processing time starting from data
|
1017 |
+
acquisition to output generation on a number of trip basis.
|
1018 |
+
|
1019 |
+
Idle
|
1020 |
+
Live
|
1021 |
+
2
|
1022 |
+
4
|
1023 |
+
6
|
1024 |
+
8
|
1025 |
+
10
|
1026 |
+
CPU Consumption (in %)
|
1027 |
+
(a)
|
1028 |
+
Idle
|
1029 |
+
Live
|
1030 |
+
100
|
1031 |
+
200
|
1032 |
+
300
|
1033 |
+
400
|
1034 |
+
500
|
1035 |
+
600
|
1036 |
+
Memory Consumption (in MB)
|
1037 |
+
(b)
|
1038 |
+
2.0
|
1039 |
+
2.5
|
1040 |
+
3.0
|
1041 |
+
3.5
|
1042 |
+
4.0
|
1043 |
+
4.5
|
1044 |
+
Processing Time (in mins)
|
1045 |
+
0
|
1046 |
+
5
|
1047 |
+
10
|
1048 |
+
15
|
1049 |
+
20
|
1050 |
+
25
|
1051 |
+
#Trips
|
1052 |
+
(c)
|
1053 |
+
Idle
|
1054 |
+
Live
|
1055 |
+
43
|
1056 |
+
46
|
1057 |
+
49
|
1058 |
+
52
|
1059 |
+
55
|
1060 |
+
58
|
1061 |
+
61
|
1062 |
+
Temperature Rise (in ∘ C)
|
1063 |
+
(d)
|
1064 |
+
0
|
1065 |
+
12
|
1066 |
+
24
|
1067 |
+
36
|
1068 |
+
48
|
1069 |
+
60
|
1070 |
+
Time (in mins)
|
1071 |
+
0
|
1072 |
+
5
|
1073 |
+
10
|
1074 |
+
15
|
1075 |
+
20
|
1076 |
+
25
|
1077 |
+
Energy Consumption (in W-Hr)
|
1078 |
+
Nexar
|
1079 |
+
Live
|
1080 |
+
Idle
|
1081 |
+
(e)
|
1082 |
+
Fig. 7: Resource Consumption over the Edge-device (a) CPU Usage (b) Memory Usage (c) Histogram of Processing Time
|
1083 |
+
w.r.t., #Trips (d) Temperature Rise, (e) Energy Consumed
|
1084 |
+
Relevance
|
1085 |
+
Well-Structured
|
1086 |
+
3
|
1087 |
+
4
|
1088 |
+
5
|
1089 |
+
Annotation Score
|
1090 |
+
(a)
|
1091 |
+
Spatial
|
1092 |
+
Maneuver
|
1093 |
+
0
|
1094 |
+
5
|
1095 |
+
10
|
1096 |
+
15
|
1097 |
+
%age of Error
|
1098 |
+
(b)
|
1099 |
+
Top-3
|
1100 |
+
Top-5
|
1101 |
+
30
|
1102 |
+
50
|
1103 |
+
70
|
1104 |
+
90
|
1105 |
+
Dice Coefficient (in %)
|
1106 |
+
(c)
|
1107 |
+
Fig. 8: (a) Significance of DriCon (b) Sensitivity Analysis of
|
1108 |
+
DriCon (c) Performance on BDD Dataset
|
1109 |
+
DriCon generates the output within ≈ 3 minutes only for
|
1110 |
+
majority of the trips, further validating shorter response time
|
1111 |
+
(see Fig. 7(c)). To further delve deeper, we also log the
|
1112 |
+
temperature hike (from “vcgencmd measure temp” command)
|
1113 |
+
and total energy consumption using Monsoon High Voltage
|
1114 |
+
Power Monitor [35] while running DriCon. From Fig. 7(d) &
|
1115 |
+
(e), we observe that the temperature hiked at most to 59°C,
|
1116 |
+
while on average, 13 Watt-hour energy is consumed, which is
|
1117 |
+
nominal for any live system. To benchmark DriCon, we have
|
1118 |
+
also measured the energy consumption of the Nexar dashcam,
|
1119 |
+
which consumes 22 Watt-hour on an average, while capturing
|
1120 |
+
very few driving maneuvers (say, hard brake) without any
|
1121 |
+
context. This further justifies that DriCon never exhausts the
|
1122 |
+
resources on the edge-device and is can accurately detect the
|
1123 |
+
micro-events precisely.
|
1124 |
+
2) Significance of Generated Explanation:
|
1125 |
+
Next, we
|
1126 |
+
check how significant our generated explanations are. As
|
1127 |
+
reported in §VI-B, we plot the distribution of annotated
|
1128 |
+
scores (given by the recruited annotators) for the two fields –
|
1129 |
+
“Relevance” and “Well-Structured”. “Relevance” signifies the
|
1130 |
+
generated explanation’s applicability in explaining unexpected
|
1131 |
+
events. In contrast, “Well-structured” indicates how well inter-
|
1132 |
+
pretative the generated sentences are as per human cognition.
|
1133 |
+
Fig. 8(a) depicts a median value of 5 and 4 for “Relevance”
|
1134 |
+
and “Well-Structured”, respectively, which further justifies
|
1135 |
+
the credibility of DriCon. We also compute the similarity
|
1136 |
+
between the human-annotated and model-generated sentences
|
1137 |
+
and obtain a minimum, maximum, and mean similarity value
|
1138 |
+
as 51.33%, 85.5% & 70.57%, respectively, using the TF-IDF
|
1139 |
+
vectorizer. Thus DriCon resembles human cognition level up
|
1140 |
+
to an indistinguishable level (between a human and model) of
|
1141 |
+
auto-generating a contextual explanation, which further shows
|
1142 |
+
its applicability to give feedback to the stakeholders for their
|
1143 |
+
decision-making procedure.
|
1144 |
+
3) Sensitivity of DriCon: Finally, we inspect the micro-
|
1145 |
+
events that DriCon fails to capture. Because, apart from a
|
1146 |
+
model’s efficiency, we must also look into its deficiency to
|
1147 |
+
analyze how much that might affect the overall performance.
|
1148 |
+
Especially, this is important in the case where stakeholders
|
1149 |
+
are boosting/penalizing the driver’s profile. As depicted in
|
1150 |
+
Fig. 8(b), incompetence to capture both the spatial and ma-
|
1151 |
+
neuvers is low. Although this might lead to degraded model
|
1152 |
+
performance, as studied in §VI-F; driving maneuvers (FM)
|
1153 |
+
do not contribute superiorly to model performance due to the
|
1154 |
+
inter-dependency on spatial features (FS). But for FS, the
|
1155 |
+
Percentage of Error is still ≤ 13%, making the system less
|
1156 |
+
sensitive into generating error-prone contextual explanations.
|
1157 |
+
I. Offline Performance
|
1158 |
+
Finally, we report the accuracy of our system over the BDD
|
1159 |
+
dataset comprising 17 hours of driving data over 1.5k trips
|
1160 |
+
using N. As depicted in Fig. 8(c), DriCon performs quite
|
1161 |
+
well on pre-recorded data, with N = {71%, 84%}, for top-
|
1162 |
+
3 and top-5 features. We observe that SOM can identify the
|
1163 |
+
micro-events in a better way for offline analysis with a public
|
1164 |
+
dataset. However, as running the system live is essential for a
|
1165 |
+
realistic driving environment other than offline analysis, this
|
1166 |
+
much of slight accuracy drop can be endured.
|
1167 |
+
VII. CONCLUSION
|
1168 |
+
This paper developed an intelligent system on the edge-
|
1169 |
+
device called DriCon leveraging multi-modalities to detect the
|
1170 |
+
micro-events responsible for unexpected fluctuations in driving
|
1171 |
+
behavior. The human-interpretable explanations generated by
|
1172 |
+
DriCon show their relevance and credibility in identifying
|
1173 |
+
such context. Further, the spatiotemporal dependency among
|
1174 |
+
various features is inspected in an unsupervised manner to
|
1175 |
+
capture a diverse set of driving scenarios. Additionally, the
|
1176 |
+
resource-friendly deployment over a live testbed further vali-
|
1177 |
+
dates DriCon. Although our study captures the context where
|
1178 |
+
each feature’s contribution is taken independently, inter-feature
|
1179 |
+
dependency is not captured explicitly. For instance, say, a
|
1180 |
+
driver suddenly weaves while taking a turn to avoid colliding
|
1181 |
+
with a crossing pedestrian, making the following vehicle’s
|
1182 |
+
driver slam the brake. Here, the first driver’s action is due
|
1183 |
+
to the crossing pedestrian, which in turn impacts the second
|
1184 |
+
driver’s action. The analysis of such complex and collective
|
1185 |
+
interactions among the vehicles needs a more sophisticated
|
1186 |
+
|
1187 |
+
SOM
|
1188 |
+
RF W/ LIMEsystem, possibly a different modality that can connect the
|
1189 |
+
inter-vehicle interactions. However, DriCon provides a simple,
|
1190 |
+
in-the-silo solution that can be independently deployed over
|
1191 |
+
vehicles with a dashboard-mounted edge-device or dashcam.
|
1192 |
+
REFERENCES
|
1193 |
+
[1] “Road
|
1194 |
+
traffic
|
1195 |
+
injuries,
|
1196 |
+
by
|
1197 |
+
world
|
1198 |
+
health
|
1199 |
+
organization
|
1200 |
+
(who),”
|
1201 |
+
https://www.who.int/news-room/fact-sheets/detail/road-traffic-injuries,
|
1202 |
+
2022, (Online Accessed: January 16, 2023).
|
1203 |
+
[2] “Institute
|
1204 |
+
of
|
1205 |
+
engineering
|
1206 |
+
tokyo
|
1207 |
+
university
|
1208 |
+
of
|
1209 |
+
agriculture
|
1210 |
+
and
|
1211 |
+
technology
|
1212 |
+
(tuat).
|
1213 |
+
smart
|
1214 |
+
mobility
|
1215 |
+
research
|
1216 |
+
center
|
1217 |
+
-
|
1218 |
+
research.”
|
1219 |
+
https://web.tuat.ac.jp/∼smrc/research.html,
|
1220 |
+
2017,
|
1221 |
+
(Online
|
1222 |
+
Accessed:
|
1223 |
+
January 16, 2023).
|
1224 |
+
[3] N. H. T. S. A. (NHTSA)., https://www.nhtsa.gov/, (Online Accessed:
|
1225 |
+
January 16, 2023).
|
1226 |
+
[4] “Lidars
|
1227 |
+
for
|
1228 |
+
self-driving
|
1229 |
+
vehicles:
|
1230 |
+
a
|
1231 |
+
technological
|
1232 |
+
arms
|
1233 |
+
race,”
|
1234 |
+
https://www.automotiveworld.com/articles/lidars-for-self-driving-vehicles-a-technological-arms-race/,
|
1235 |
+
2020, (Online Accessed: January 16, 2023).
|
1236 |
+
[5] Z. Li, C. Wu, S. Wagner, J. C. Sturm, N. Verma, and K. Jamieson, “Reits:
|
1237 |
+
Reflective surface for intelligent transportation systems,” in 22nd ACM
|
1238 |
+
HotMobile, 2021, pp. 78–84.
|
1239 |
+
[6] R. Akikawa, A. Uchiyama, A. Hiromori, H. Yamaguchi, T. Higashino,
|
1240 |
+
M. Suzuki, Y. Hiehata, and T. Kitahara, “Smartphone-based risky traffic
|
1241 |
+
situation detection and classification,” in IEEE PerCom Workshops,
|
1242 |
+
2020, pp. 1–6.
|
1243 |
+
[7] D. A. Ridel, N. Deo, D. Wolf, and M. Trivedi, “Understanding
|
1244 |
+
pedestrian-vehicle interactions with vehicle mounted vision: An lstm
|
1245 |
+
model and empirical analysis,” in 2019 IEEE Intelligent Vehicles Sym-
|
1246 |
+
posium (IV), pp. 913–918.
|
1247 |
+
[8] F. Yu, W. Xian, Y. Chen, F. Liu, M. Liao, V. Madhavan, and T. Darrell,
|
1248 |
+
“Bdd100k: A diverse driving video database with scalable annotation
|
1249 |
+
tooling,” arXiv preprint arXiv:1805.04687, vol. 2, no. 5, p. 6, 2018.
|
1250 |
+
[9] “Vehicle
|
1251 |
+
detection
|
1252 |
+
and
|
1253 |
+
distance
|
1254 |
+
estimation,”
|
1255 |
+
https://towardsdatascience.com/vehicle-detection-and-distance-estimation-7acde48256e1,
|
1256 |
+
2017, (Online Accessed: January 16, 2023).
|
1257 |
+
[10] D. Das, S. Pargal, S. Chakraborty, and B. Mitra, “Dribe: on-road mobile
|
1258 |
+
telemetry for locality-neutral driving behavior annotation,” in 23rd IEEE
|
1259 |
+
MDM, 2022, pp. 159–168.
|
1260 |
+
[11] D.
|
1261 |
+
Mohan,
|
1262 |
+
G.
|
1263 |
+
Tiwari,
|
1264 |
+
and
|
1265 |
+
K. Bhalla,
|
1266 |
+
“Road
|
1267 |
+
safety
|
1268 |
+
in
|
1269 |
+
india:
|
1270 |
+
Status report 2019. new delhi: Transportation research & injury
|
1271 |
+
prevention
|
1272 |
+
programme,
|
1273 |
+
indian
|
1274 |
+
institute
|
1275 |
+
of
|
1276 |
+
technology
|
1277 |
+
delhi.”
|
1278 |
+
http://tripp.iitd.ac.in/assets/publication/Road Safety in India2018.pdf,
|
1279 |
+
2019, (Online Accessed: January 16, 2023).
|
1280 |
+
[12] K. Fu, Z. Chen, and C.-T. Lu, “Streetnet: preference learning with
|
1281 |
+
convolutional neural network on urban crime perception,” in Proceedings
|
1282 |
+
of the 26th ACM SIGSPATIAL, 2018, pp. 269–278.
|
1283 |
+
[13] K. Patroumpas, N. Pelekis, and Y. Theodoridis, “On-the-fly mobility
|
1284 |
+
event detection over aircraft trajectories,” in Proceedings of the 26th
|
1285 |
+
ACM SIGSPATIAL, 2018, pp. 259–268.
|
1286 |
+
[14] I. Janveja, A. Nambi, S. Bannur, S. Gupta, and V. Padmanabhan,
|
1287 |
+
“Insight: monitoring the state of the driver in low-light using smart-
|
1288 |
+
phones,” Proceedings of the ACM on Interactive, Mobile, Wearable and
|
1289 |
+
Ubiquitous Technologies, vol. 4, no. 3, pp. 1–29, 2020.
|
1290 |
+
[15] X. Fan, F. Wang, D. Song, Y. Lu, and J. Liu, “Gazmon: eye gazing
|
1291 |
+
enabled driving behavior monitoring and prediction,” IEEE Transactions
|
1292 |
+
on Mobile Computing, 2019.
|
1293 |
+
[16] M. Walch, M. Woide, K. M¨uhl, M. Baumann, and M. Weber, “Coop-
|
1294 |
+
erative overtaking: Overcoming automated vehicles’ obstructed sensor
|
1295 |
+
range via driver help,” in 11th ACM AutomotiveUI, 2019, pp. 144–155.
|
1296 |
+
[17] H. T. Lam, “A concise summary of spatial anomalies and its application
|
1297 |
+
in efficient real-time driving behaviour monitoring,” in Proceedings of
|
1298 |
+
the 24th ACM SIGSPATIAL, 2016, pp. 1–9.
|
1299 |
+
[18] S. Moosavi, B. Omidvar-Tehrani, R. B. Craig, A. Nandi, and R. Ram-
|
1300 |
+
nath, “Characterizing driving context from driver behavior,” in Proceed-
|
1301 |
+
ings of the 25th ACM SIGSPATIAL, 2017, pp. 1–4.
|
1302 |
+
[19] Y. Shi, R. Biswas, M. Noori, M. Kilberry, J. Oram, J. Mays, S. Kharude,
|
1303 |
+
D. Rao, and X. Chen, “Predicting road accident risk using geospatial
|
1304 |
+
data and machine learning (demo paper),” in Proceedings of the 29th
|
1305 |
+
ACM SIGSPATIAL, 2021, pp. 512–515.
|
1306 |
+
[20] M. R. Samsami, M. Bahari, S. Salehkaleybar, and A. Alahi, “Causal imi-
|
1307 |
+
tative model for autonomous driving,” arXiv preprint arXiv:2112.03908,
|
1308 |
+
2021.
|
1309 |
+
[21] V. Ramanishka, Y.-T. Chen, T. Misu, and K. Saenko, “Toward driving
|
1310 |
+
scene understanding: A dataset for learning driver behavior and causal
|
1311 |
+
reasoning,” in IEEE CVPR, 2018, pp. 7699–7707.
|
1312 |
+
[22] F. Codevilla, E. Santana, A. M. L´opez, and A. Gaidon, “Exploring the
|
1313 |
+
limitations of behavior cloning for autonomous driving,” in IEEE/CVF
|
1314 |
+
ICCV, 2019, pp. 9329–9338.
|
1315 |
+
[23] J. Redmon and A. Farhadi, “Yolov3: An incremental improvement,”
|
1316 |
+
arXiv, 2018.
|
1317 |
+
[24] J. Yu, Z. Chen, Y. Zhu, Y. Chen, L. Kong, and M. Li, “Fine-grained ab-
|
1318 |
+
normal driving behaviors detection and identification with smartphones,”
|
1319 |
+
IEEE Transactions on Mobile Computing, vol. 16, no. 8, pp. 2198–2212,
|
1320 |
+
2016.
|
1321 |
+
[25] T.-Y. Lin, M. Maire, S. Belongie, J. Hays, P. Perona, D. Ramanan,
|
1322 |
+
P. Doll´ar, and C. L. Zitnick, “Microsoft coco: Common objects in
|
1323 |
+
context,” in ECCV.
|
1324 |
+
Springer, 2014, pp. 740–755.
|
1325 |
+
[26] D. Das, S. Pargal, S. Chakraborty, and B. Mitra, “Why slammed
|
1326 |
+
the brakes on? auto-annotating driving behaviors from adaptive causal
|
1327 |
+
modeling,” in IEEE PerCom Workshops, pp. 587–592.
|
1328 |
+
[27] T. Kohonen, “The self-organizing map,” Proceedings of the IEEE,
|
1329 |
+
vol. 78, no. 9, pp. 1464–1480, 1990.
|
1330 |
+
[28] C. Rudin, “Stop explaining black box machine learning models for high
|
1331 |
+
stakes decisions and use interpretable models instead,” Nature Machine
|
1332 |
+
Intelligence, vol. 1, no. 5, pp. 206–215, 2019.
|
1333 |
+
[29] “Neighborhood function,” https://users.ics.aalto.fi/jhollmen/dippa/node21.html,
|
1334 |
+
(Online Accessed: January 16, 2023).
|
1335 |
+
[30] “Guidelines
|
1336 |
+
for
|
1337 |
+
pedestrian
|
1338 |
+
facilities,”
|
1339 |
+
http://www.irc.nic.in/admnis/admin/showimg.aspx?ID=345,
|
1340 |
+
(Online
|
1341 |
+
Accessed: January 16, 2023).
|
1342 |
+
[31] J. Pennington, R. Socher, and C. D. Manning, “Glove: Global vectors
|
1343 |
+
for word representation,” in EMNLP, 2014, pp. 1532–1543.
|
1344 |
+
[32] A. Carass, S. Roy, A. Gherman, J. C. Reinhold, A. Jesson, T. Arbel,
|
1345 |
+
O. Maier, H. Handels, M. Ghafoorian, B. Platel et al., “Evaluating
|
1346 |
+
white matter lesion segmentations with refined sørensen-dice analysis,”
|
1347 |
+
Scientific reports, vol. 10, no. 1, pp. 1–19, 2020.
|
1348 |
+
[33] D. B. Rubin, “Estimating causal effects of treatments in randomized and
|
1349 |
+
nonrandomized studies.” Journal of educational Psychology, vol. 66,
|
1350 |
+
no. 5, p. 688, 1974.
|
1351 |
+
[34] M. T. Ribeiro, S. Singh, and C. Guestrin, ““why should i trust you?”
|
1352 |
+
explaining the predictions of any classifier,” in Proceedings of the 22nd
|
1353 |
+
ACM SIGKDD, 2016, pp. 1135–1144.
|
1354 |
+
[35] “Monsoon
|
1355 |
+
high
|
1356 |
+
voltage
|
1357 |
+
power
|
1358 |
+
monitor,”
|
1359 |
+
https://www.msoon.com/online-store/High-Voltage-Power-Monitor-p90002590,
|
1360 |
+
(Online Accessed: January 16, 2023).
|
1361 |
+
|
2dE4T4oBgHgl3EQfzw3P/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
4dFAT4oBgHgl3EQfExzK/content/tmp_files/2301.08424v1.pdf.txt
ADDED
@@ -0,0 +1,1016 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Possible new phase transition in the 3D Ising Model
|
2 |
+
associated with boundary percolation
|
3 |
+
Michael Grady
|
4 |
+
Department of Physics
|
5 |
+
State University of New York at Fredonia
|
6 |
+
Fredonia NY 14063 USA
|
7 | |
8 |
+
January 23, 2023
|
9 |
+
Abstract
|
10 |
+
In the ordered phase of the 3D Ising model, minority spin clusters are surrounded by
|
11 |
+
a boundary of dual plaquettes. As the temperature is raised, these spin clusters become
|
12 |
+
more numerous, and it is found that eventually their boundaries undergo a percolation
|
13 |
+
transition when about 13% of spins are minority. Boundary percolation differs from the
|
14 |
+
more commonly studied site and link percolation, although it is related to an unusual
|
15 |
+
type of site percolation that includes next to nearest neighbor relationships. Because
|
16 |
+
the Ising model can be reformulated in terms of the domain boundaries alone, there is
|
17 |
+
reason to believe boundary percolation should be relevant here. A symmetry-breaking
|
18 |
+
order parameter is found in the dual theory, the 3D gauge Ising model. It is seen to
|
19 |
+
undergo a phase transition at a coupling close to that predicted by duality from the
|
20 |
+
boundary percolation. This transition lies in the disordered phase of the gauge theory
|
21 |
+
and has the nature of a spin-glass transition.
|
22 |
+
Its critical exponent ν ∼ 1.3 is seen
|
23 |
+
to match the finite-size shift exponent of the percolation transition further cementing
|
24 |
+
their connection. This predicts a very weak specific heat singularity with exponent
|
25 |
+
α ∼ −1.9. The third energy cumulant fits well to the expected non-infinite critical
|
26 |
+
behavior in a manner consistent with both the predicted exponent and critical point,
|
27 |
+
indicating a true thermal phase transition. Unlike random boundary percolation, the
|
28 |
+
Ising boundary percolation has two different ν exponents, one associated with largest-
|
29 |
+
cluster scaling and the other with finite-size transition-point shift. This suggests there
|
30 |
+
are two different correlation lengths present.
|
31 |
+
PACS: 05.50+q, 05.70.Jk, 64.60.ah, 64.60.F
|
32 |
+
Keywords: Ising model, Gauge Ising model, spin glass, percolation, phase transition
|
33 |
+
arXiv:2301.08424v1 [cond-mat.stat-mech] 20 Jan 2023
|
34 |
+
|
35 |
+
1
|
36 |
+
Introduction
|
37 |
+
The Ising models in two and three dimensions are the most basic spin models which undergo
|
38 |
+
order-disorder transitions. These have been extremely well studied and, of course, an exact
|
39 |
+
solution exists in the two-dimensional case. The 3D model has always been a bit more of a
|
40 |
+
mystery, and in this paper we explore the possibility of a weak secondary phase transition
|
41 |
+
within the ordered phase.
|
42 |
+
Presumably this is associated with some geometrical change
|
43 |
+
in spin-clustering, but the exact nature of this reordering is unknown. The situation is
|
44 |
+
clearer in the dual theory, the 3D gauge Ising model. Here the suspected transition is in
|
45 |
+
the disordered phase, and clearly has the nature of a spin-glass transition. In other words
|
46 |
+
we have identified a symmetry-breaking order parameter in the dual theory, but not in
|
47 |
+
the Ising model itself. However, the Ising model does exhibit an interesting percolation
|
48 |
+
phenomenon near the critical point predicted by duality from the spin-glass transition.
|
49 |
+
This is a percolation of the domain boundaries between + and - spin clusters. As shown
|
50 |
+
below, boundary percolation can be considered a third type of percolation, beyond site and
|
51 |
+
link percolation, although it has a close relationship to an unusual type of site percolation.
|
52 |
+
Of course percolation is not always related to a phase transition, but sometimes it is.
|
53 |
+
It’s linkage in this case to a symmetry-breaking transition in the dual theory provides
|
54 |
+
strong evidence that it is associated with a phase transition in this case. The argument is
|
55 |
+
further strengthened by independent fits of the third energy cumulant to consistent critical
|
56 |
+
behavior about the suspected critical point. Each investigation, the order-parameter in the
|
57 |
+
dual theory, the boundary percolation finite-size shift, and the third energy moment, yields
|
58 |
+
an independent determination of the critical exponent ν, all of which agree to a fairly close
|
59 |
+
tolerance.
|
60 |
+
In the following, first the boundary percolation concept is fleshed out and studied in
|
61 |
+
both the 2D and 3D Ising models, as well as for 3D random percolation. It is found that
|
62 |
+
the latter has the same critical exponents as for site percolation, and in fact is equivalent to
|
63 |
+
site percolation if next to nearest neighbors are included in the cluster definition. The 3D
|
64 |
+
Ising case is particularly interesting in that an analysis of the finite-size shift in percolation
|
65 |
+
threshold gives a critical exponent very different from the percolation value, even though
|
66 |
+
the cluster scaling still obeys the percolation exponents. This suggests it is linked to a phase
|
67 |
+
transition with its own dynamical scaling and correlation length. Then we move on to the
|
68 |
+
dual theory and introduce the spin-glass order parameter. A Monte Carlo study here shows
|
69 |
+
clear crossings in the Binder cumulant and second-moment correlation length divided by
|
70 |
+
lattice size. Correlation-length finite-size scaling is exhibited around the suspected critical
|
71 |
+
point using scaling collapse plots, which also yield critical exponents. The critical exponent
|
72 |
+
ν is found to match well with that found from the finite-size shift in percolation threshold.
|
73 |
+
Finally, we study energy moments, such as specific heat and higher moments. Unlike an
|
74 |
+
order parameter, these have both critical and non-critical pieces, so fitting can be difficult.
|
75 |
+
This leads to the selection of the third energy cumulant as the best prospect for finding
|
76 |
+
critical behavior as it can be fit without a non-critical part other than a constant. A Monte
|
77 |
+
Carlo study with several times 109 sweeps per point on 303, 403, and 503 lattices yields a
|
78 |
+
precise determination of this quantity. An independent critical behavior fit in the region of
|
79 |
+
the suspected critical point gives values for both ν and κc which agree well with the two
|
80 |
+
2
|
81 |
+
|
82 |
+
other predictions. A substantial jump in the coefficient of the critical scaling fit across the
|
83 |
+
transition further cements evidence for a thermal singularity here.
|
84 |
+
The rather high value of ν ∼ 1.3 gives a highly negative value for the specific heat
|
85 |
+
exponent α = 2 − dν ∼ −1.9. This means that both the specific heat and third cumulant
|
86 |
+
have finite singularities. A very weak infinite singularity is expected in the fourth cumulant
|
87 |
+
and stronger ones in fifth and higher.
|
88 |
+
In the Ehrenfest classification this transition is
|
89 |
+
fourth order.
|
90 |
+
We attempted to measure fourth and fifth cumulants to find evidence of
|
91 |
+
peaks growing with lattice size as expected from infinite singularities, but even with the
|
92 |
+
rather large sample size here, these were still largely obscured by random error. However,
|
93 |
+
finite singularities are just as singular as infinite ones, so perhaps one lesson is that one
|
94 |
+
should not necessarily obsess over trying to find infinite singularities in transitions of such
|
95 |
+
high order.
|
96 |
+
Figure 1: Example of a boundary cluster. Sites marked with dots have opposite orientation to all
|
97 |
+
surrounding sites.
|
98 |
+
3
|
99 |
+
|
100 |
+
2
|
101 |
+
Boundary percolation
|
102 |
+
The standard partition function for the Ising model is
|
103 |
+
Z =
|
104 |
+
�
|
105 |
+
{σ}
|
106 |
+
exp(κ
|
107 |
+
�
|
108 |
+
n.n.
|
109 |
+
σiσj),
|
110 |
+
(1)
|
111 |
+
where the σ’s are classical spins taking values ±1 and the coupling is between nearest
|
112 |
+
neighbors only. There is a well-known reformulation of the Ising models in terms of the
|
113 |
+
boundaries themselves[1]. This reformulation even leads to an alternate exact solution in
|
114 |
+
the 2D case[2]. The partition function can be written
|
115 |
+
Z =
|
116 |
+
�
|
117 |
+
A
|
118 |
+
N(A) exp(−κA)
|
119 |
+
(2)
|
120 |
+
where A is the total area of dual boundary plaquettes (or dual boundary links in 2D) in
|
121 |
+
a configuration and N(A) are the number of distinct non-intersecting boundary configura-
|
122 |
+
tions with that area. In this formulation there are no spins or domains. Only the boundary
|
123 |
+
surfaces need exist, and the entropy associated with these surfaces controls the phase tran-
|
124 |
+
sition. This is one reason why percolation of the domain boundary might be important for
|
125 |
+
this model, as opposed to, say, site percolation. For instance, the density of states could
|
126 |
+
change abruptly when an infinite boundary cluster forms, because for a finite cluster the
|
127 |
+
area is usually an increasing function of the volume, whereas an infinite cluster can easily
|
128 |
+
grow in volume without adding much to the area. If this were the case then the free energy
|
129 |
+
would form a singularity at the boundary percolation point.
|
130 |
+
We define boundary percolation as follows. Consider the set of all boundary links that
|
131 |
+
connect + and - sites.
|
132 |
+
These are each associated with a plaquette on the dual lattice.
|
133 |
+
These plaquettes form closed surfaces separating clusters of + and - spins. These surfaces
|
134 |
+
can form clusters themselves, if we define boundary clusters to be made up of boundary
|
135 |
+
surfaces that share dual-lattice links. For instance, Fig. 1 shows a single boundary cluster.
|
136 |
+
This same configuration would, however, count as two separate site-clusters, since sites are
|
137 |
+
clustered only along lattice directions. If the site cluster concept is extended to include
|
138 |
+
sites connected by face diagonals, i.e next nearest neighbors (NNN) in addition to near-
|
139 |
+
est neighbors (NN), then these redefined site clusters would appear to coincide with the
|
140 |
+
boundary cluster concept. Indeed we have verified for thousands of configurations that
|
141 |
+
those with percolating boundaries also have percolating NN+NNN site-clusters, and vice
|
142 |
+
versa, so they do appear to measure the same thing.
|
143 |
+
It seems boundary percolation, which in three dimensions could also be called plaquette
|
144 |
+
percolation, has only been studied before in the form of the equivalent extended NN+NNN
|
145 |
+
site percolation problem[3], as a part of surveys of various extended percolation models, but
|
146 |
+
never applied to the Ising model. The main result of these studies is establishing a threshold
|
147 |
+
for random NN+NNN percolation at a minority site probability of 0.1372(1). Because the
|
148 |
+
Ising model is interacting, correlations would be expected to modify this result, but still
|
149 |
+
it should be kept in mind.
|
150 |
+
Fig. 2ab shows the evolution of the Ising model boundary
|
151 |
+
percolation threshold κ∗
|
152 |
+
L with lattice size L for both two and three dimensions. These both
|
153 |
+
4
|
154 |
+
|
155 |
+
scale well with the finite-size scaling relation
|
156 |
+
κ∗
|
157 |
+
L = κc − cL−1/ν
|
158 |
+
(3)
|
159 |
+
where κc is the infinite lattice threshold. The percolation threshold is defined here as the
|
160 |
+
point where 50% of lattices have a cluster which percolates in all directions. For two dimen-
|
161 |
+
sions, boundary percolation exists in the random phase, and ceases in the ferromagnetic
|
162 |
+
phase. The above fit gives κc = 0.4405(5) which agrees well with the known ferromagnetic
|
163 |
+
transition point 1
|
164 |
+
2 ln(
|
165 |
+
√
|
166 |
+
2 + 1) ≃ 0.44069. This is just the opposite of majority-site perco-
|
167 |
+
lation, which happens only in the magnetized phase. Thus in two dimensions boundary-
|
168 |
+
percolation and site-percolation seem equally relevant.
|
169 |
+
The exponent derived from the
|
170 |
+
finite-size scaling fit to Fig. 2a is ν = 1.261(18). This seems slightly different from the
|
171 |
+
standard 2D site-percolation exponent ν = 4/3 but of course that is for a non-interacting
|
172 |
+
system. There could also be a small correction from next to leading order scaling effects.
|
173 |
+
As far as we know, the critical exponents for random NN+NNN site percolation or ran-
|
174 |
+
dom boundary-percolation have not been previously measured.
|
175 |
+
In principle they could
|
176 |
+
differ from site percolation, however in three dimensions we find below that the critical
|
177 |
+
exponents for random boundary percolation appear to be the same as for ordinary site
|
178 |
+
percolation. Probably the same is true in two dimensions, but we did not perform that
|
179 |
+
measurement.
|
180 |
+
0.37
|
181 |
+
0.38
|
182 |
+
0.39
|
183 |
+
0.40
|
184 |
+
0.41
|
185 |
+
0.42
|
186 |
+
0.43
|
187 |
+
0.44
|
188 |
+
0.45
|
189 |
+
0.000
|
190 |
+
0.005
|
191 |
+
0.010
|
192 |
+
0.015
|
193 |
+
0.020
|
194 |
+
0.025
|
195 |
+
k*
|
196 |
+
L
|
197 |
+
1/L
|
198 |
+
0.2475
|
199 |
+
0.2480
|
200 |
+
0.2485
|
201 |
+
0.2490
|
202 |
+
0.2495
|
203 |
+
0.000
|
204 |
+
0.005
|
205 |
+
0.010
|
206 |
+
0.015
|
207 |
+
0.020
|
208 |
+
0.025
|
209 |
+
0.030
|
210 |
+
k*
|
211 |
+
L
|
212 |
+
1/L
|
213 |
+
Figure 2: Finite-size shift of the boundary percolation threshold for the 2D (a) and 3D (b) Ising
|
214 |
+
model. Error bar ranges are 1/10 to 1/20 of symbol size.
|
215 |
+
In two dimensions minority sites never percolate. In three dimensions there are a lot
|
216 |
+
more paths. Majority sites always percolate and minority sites percolate in the random
|
217 |
+
phase and about the first 5% of the ferromagnetic phase, measured by temperature. Even-
|
218 |
+
tually minority sites get too few and percolation is lost at κ = 0.2346(13) where the mag-
|
219 |
+
netization is about 0.62[4]. There is no visible effect on other quantities at this point. For
|
220 |
+
comparison, the ferromagnetic transition is at κ = 0.2216595(26) [5]. Because in boundary
|
221 |
+
5
|
222 |
+
|
223 |
+
percolation the clusters are more liberally defined, it persists even further into the ferro-
|
224 |
+
magnetic phase. From the fit to Fig. 2b we find κc = 0.24781(4) and ν = 1.30(3). Here the
|
225 |
+
magnetization is about 0.7364. This means that 13.18(4)% of the sites are minority, which
|
226 |
+
is about 4% lower than the value mentioned above, pc = 0.1372, for random NN+NNN site
|
227 |
+
percolation (our study of random boundary percolation below also corroborates this value).
|
228 |
+
The value found here for the exponent ν is particularly interesting. It is not at all close to
|
229 |
+
the correlation length exponent for random site percolation ν ≃ 0.88[6, 7] measured from
|
230 |
+
the finite-size scaling of the infinite cluster. For random boundary percolation we find a
|
231 |
+
similar value below. Even in the 3D Ising case where interactions could change the result
|
232 |
+
we still find scaling of the largest cluster gives ν ≃ 0.87 (detailed below). We are led to
|
233 |
+
conclude that a different correlation length is controlling the finite-lattice shift exponent in
|
234 |
+
this case. This makes the case of boundary percolation in the 3D Ising model of consider-
|
235 |
+
able theoretical interest, because a system with two different correlation lengths diverging
|
236 |
+
at the same place is, to say the least, unusual.
|
237 |
+
0.1360
|
238 |
+
0.1362
|
239 |
+
0.1364
|
240 |
+
0.1366
|
241 |
+
0.1368
|
242 |
+
0.1370
|
243 |
+
0.1372
|
244 |
+
0.1374
|
245 |
+
0.000
|
246 |
+
0.002
|
247 |
+
0.004
|
248 |
+
0.006
|
249 |
+
0.008
|
250 |
+
0.010
|
251 |
+
p*
|
252 |
+
L
|
253 |
+
1/L
|
254 |
+
Figure 3: Finite-size shift of boundary percolation threshold for 3D random percolation model.
|
255 |
+
Error bar range is about 1/8 symbol size.
|
256 |
+
Now we consider the case of random boundary percolation. This is an interaction-free
|
257 |
+
model where positive sites are placed at random in the lattice, with the fraction of positive
|
258 |
+
sites given as p. The remaining sites are, of course, set negative. Fig. 3 shows the finite-size
|
259 |
+
shift of percolation threshold, p∗
|
260 |
+
L. From the scaling relation given above we find the infinite
|
261 |
+
lattice threshold as pc = 0.13730(4), which is fairly close to the percentage of positive sites
|
262 |
+
at the 3D Ising boundary percolation threshold (they differ by 4%). However, the exponent
|
263 |
+
here is quite different from the 3D Ising value of 1.30(3). We find ν = 0.91(5), consistent
|
264 |
+
with well-known measurements of the site-percolation exponent. One can also determine ν
|
265 |
+
from scaling of the largest cluster. If one defines P to be the fraction of plaquettes occupied
|
266 |
+
by the largest cluster, then the same finite-size scaling analysis as is usually applied to the
|
267 |
+
magnetization in a magnetic system undergoing a thermal phase transition can be applied
|
268 |
+
[8] to P, its susceptibility
|
269 |
+
6
|
270 |
+
|
271 |
+
χ = (< P 2 > − < P >2)Np
|
272 |
+
(4)
|
273 |
+
and the corresponding Binder fourth-order cumulant
|
274 |
+
U = (< P 4 > − < P 2 >2)/(3 < P 2 >2).
|
275 |
+
(5)
|
276 |
+
Here Np is the number of plaquettes in the lattice. The correlation-length scaling hypoth-
|
277 |
+
esis implies that these should collapse onto universal functions if scaled according to their
|
278 |
+
0.40
|
279 |
+
0.45
|
280 |
+
0.50
|
281 |
+
0.55
|
282 |
+
0.60
|
283 |
+
0.65
|
284 |
+
0.136
|
285 |
+
0.137
|
286 |
+
0.138
|
287 |
+
0.139
|
288 |
+
0.140
|
289 |
+
U
|
290 |
+
p
|
291 |
+
0
|
292 |
+
200
|
293 |
+
400
|
294 |
+
600
|
295 |
+
800
|
296 |
+
1000
|
297 |
+
1200
|
298 |
+
1400
|
299 |
+
1600
|
300 |
+
1800
|
301 |
+
0.136
|
302 |
+
0.137
|
303 |
+
0.138
|
304 |
+
0.139
|
305 |
+
0.140
|
306 |
+
c
|
307 |
+
p
|
308 |
+
0
|
309 |
+
0.02
|
310 |
+
0.04
|
311 |
+
0.06
|
312 |
+
0.08
|
313 |
+
0.1
|
314 |
+
0.136
|
315 |
+
0.137
|
316 |
+
0.138
|
317 |
+
0.139
|
318 |
+
0.140
|
319 |
+
P
|
320 |
+
p
|
321 |
+
Figure 4: Boundary percolation study in the 3D random percolation model. Binder cumulant U (a),
|
322 |
+
susceptibility χ (b), and fraction of sites occupied by largest cluster P (c) vs. fraction of positive
|
323 |
+
sites, p. Triangles are 403, boxes 643, ×’s 1003, and open circles 1283. Error bar ranges for U are
|
324 |
+
about 1/30 the size of plotted points, between 1/4 and 1/10 for χ, and 1/15 for P.
|
325 |
+
7
|
326 |
+
|
327 |
+
respective exponents and plotted against the scaling variable
|
328 |
+
x = (p − pc)L1/ν
|
329 |
+
(6)
|
330 |
+
where L is the linear lattice size. Fig. 4abc shows the data for U, χ and P as a function
|
331 |
+
of the concentration of positive links p for lattices of size 403, 643, 1003, and 1283. All
|
332 |
+
datapoints are from samples of 100,000 randomly generated lattices. One sees a crossing
|
333 |
+
in U, similar to the case of a thermal phase transition. Here the crossing point marks
|
334 |
+
the infinite-lattice percolation threshold. Fig. 5ab shows the scaling collapse plots, where
|
335 |
+
ν, β, γ and pc are adjusted to give the best collapse. Here the scaled χ is χL−γ/ν and
|
336 |
+
scaled P is PLβ/ν. Although a good fit can be achieved using all four lattice sizes, a small
|
337 |
+
systematic shift was seen in exponents toward typical percolation values when the 403 data
|
338 |
+
were omitted, suggesting a small correction-to-scaling effect of order the random error. For
|
339 |
+
this fit there are 65 degrees of freedom overall and the fit to the three universal functions (in
|
340 |
+
this case power laws) has χ2/d.f= 0.77. The fit gives ν = 0.872(4), β/ν = 0.472(3), γ/ν =
|
341 |
+
2.056(4) and pc = 0.137317(5). The latter agrees with that determined from finite-size shift
|
342 |
+
above as well as with the threshold previously measured for NN+NNN site percolation,
|
343 |
+
pc = 0.1372(1)[3], which we believe to be equivalent to boundary percolation. As far as
|
344 |
+
we know exponents have not been previously measured for these cases. The quantities γ/ν
|
345 |
+
and β/ν should be related by the hyperscaling relation
|
346 |
+
γ/ν + 2β/ν = d
|
347 |
+
(7)
|
348 |
+
where d is the spatial dimension.
|
349 |
+
Our values give, for the LHS, 3.001(7).
|
350 |
+
For ran-
|
351 |
+
dom site percolation a fairly recent high statistics study gives ν = 0.8764(11) and
|
352 |
+
β/ν = 0.47705(15)[6]. Comparing with our result leads to the conclusion that all of the
|
353 |
+
exponents for random boundary percolation likely match those of ordinary site percolation.
|
354 |
+
For random boundary percolation, finite-size shift and largest cluster scaling give con-
|
355 |
+
sistent measurements of ν. However that is not the case for boundary percolation in the
|
356 |
+
3D Ising model. Figs. 6abc and 7ab analyze the largest cluster scaling for boundary perco-
|
357 |
+
lation in the 3D Ising model in the same way as above. Of course now the abscissa is the
|
358 |
+
Ising coupling strength κ. The study was similar to the above but with 1,000,000 sweeps
|
359 |
+
per point, sampled every 10, and 200,000 initial equilibration sweeps on 403, 643 and 1003
|
360 |
+
lattices. Again we have a Binder cumulant crossing and excellent scaling collapse plots.
|
361 |
+
These give κc = 0.247925(6), ν = 0.867(5), β/ν = 0.465(5), and γ/ν = 2.068(20). So there
|
362 |
+
are no surprises here as these exponents are consistent with the random percolation values.
|
363 |
+
The fraction of minority sites at κc is 0.1318(4), about 4% lower than for random percola-
|
364 |
+
tion. However, the finite-size-shift exponent, ν, obtained above from the fit to Fig. 2b was
|
365 |
+
1.30(3) . This is clearly incompatible with the percolation value just obtained from largest
|
366 |
+
cluster scaling in the same system. This would seem to indicate that some other dynam-
|
367 |
+
ics has taken over the scaling of the finite-size shift, driven by another correlation length
|
368 |
+
which is becoming infinite at a different rate. One possibility for how this could happen
|
369 |
+
is if the percolation is linked to a thermal phase transition which has its own correlation
|
370 |
+
8
|
371 |
+
|
372 |
+
0.3
|
373 |
+
0.4
|
374 |
+
0.5
|
375 |
+
0.6
|
376 |
+
0.7
|
377 |
+
0.8
|
378 |
+
0.40
|
379 |
+
0.45
|
380 |
+
0.50
|
381 |
+
0.55
|
382 |
+
0.60
|
383 |
+
0.65
|
384 |
+
-0.2
|
385 |
+
-0.1
|
386 |
+
0.0
|
387 |
+
0.1
|
388 |
+
0.2
|
389 |
+
0.3
|
390 |
+
0.4
|
391 |
+
Scaled P
|
392 |
+
U
|
393 |
+
x
|
394 |
+
0.05
|
395 |
+
0.06
|
396 |
+
0.07
|
397 |
+
0.08
|
398 |
+
-0.2
|
399 |
+
-0.1
|
400 |
+
0
|
401 |
+
0.1
|
402 |
+
0.2
|
403 |
+
0.3
|
404 |
+
0.4
|
405 |
+
Scaled c
|
406 |
+
x
|
407 |
+
Figure 5: Scaling collapse plots for boundary percolation in the 3D random percolation model.
|
408 |
+
Binder cumulant (left graph) and scaled largest-cluster fraction (a), and scaled susceptibility (b).
|
409 |
+
length controlled by different dynamics. This is a very curious behavior that invites further
|
410 |
+
investigation because, as previously mentioned, it is quite unusual for a system to have two
|
411 |
+
different correlation lengths.
|
412 |
+
3
|
413 |
+
Dual order parameter
|
414 |
+
As is well known, percolations are not necessarily coincident with phase transitions, but
|
415 |
+
sometimes are. The situation is clearer if a symmetry-breaking order parameter exists. In
|
416 |
+
that case an energy singularity follows from the hyperscaling relation α = 2 − dν, where
|
417 |
+
α is the specific heat exponent, d is the number of dimensions and ν is the correlation
|
418 |
+
length exponent associated with the order parameter near the symmetry-breaking phase
|
419 |
+
transition. The spontaneous breaking of an exact symmetry is always associated with a
|
420 |
+
mathematical singularity because the order parameter is exactly zero in the unbroken phase,
|
421 |
+
and is non-zero in the broken phase[9]. A function which is zero over a range of values can
|
422 |
+
only become non-zero at a point of non-analyticity.
|
423 |
+
In order to build further evidence of a phase transition at the point of boundary per-
|
424 |
+
colation, one can examine the dual theory, the three-dimensional gauge Ising model. This
|
425 |
+
has action
|
426 |
+
S = −β
|
427 |
+
�
|
428 |
+
p
|
429 |
+
Up
|
430 |
+
(8)
|
431 |
+
where Up is the product of four gauge fields Uµijk around an elementary plaquette. The Uµijk
|
432 |
+
exist on links with µ a direction index and ijk the site address. The duality relation maps
|
433 |
+
the coupling of the Ising model κ to β of the dual gauge theory, β = −0.5 ln(tanh(κ))[10].
|
434 |
+
The ordered phase of the spin theory maps to the disordered (confining) phase of the
|
435 |
+
gauge theory. Generally it is not considered that there is a local symmetry-breaking order-
|
436 |
+
parameter in gauge theories, because Elitzur’s theorem[11] does not allow a local symmetry
|
437 |
+
9
|
438 |
+
|
439 |
+
0.20
|
440 |
+
0.25
|
441 |
+
0.30
|
442 |
+
0.35
|
443 |
+
0.40
|
444 |
+
0.45
|
445 |
+
0.50
|
446 |
+
0.55
|
447 |
+
0.60
|
448 |
+
0.65
|
449 |
+
0.70
|
450 |
+
0.244
|
451 |
+
0.245
|
452 |
+
0.246
|
453 |
+
0.247
|
454 |
+
0.248
|
455 |
+
0.249
|
456 |
+
0.25
|
457 |
+
U
|
458 |
+
k
|
459 |
+
0
|
460 |
+
100
|
461 |
+
200
|
462 |
+
300
|
463 |
+
400
|
464 |
+
500
|
465 |
+
600
|
466 |
+
700
|
467 |
+
800
|
468 |
+
900
|
469 |
+
0.244
|
470 |
+
0.245
|
471 |
+
0.246
|
472 |
+
0.247
|
473 |
+
0.248
|
474 |
+
0.249
|
475 |
+
0.25
|
476 |
+
c
|
477 |
+
k
|
478 |
+
0
|
479 |
+
0.02
|
480 |
+
0.04
|
481 |
+
0.06
|
482 |
+
0.08
|
483 |
+
0.1
|
484 |
+
0.12
|
485 |
+
0.14
|
486 |
+
0.244
|
487 |
+
0.245
|
488 |
+
0.246
|
489 |
+
0.247
|
490 |
+
0.248
|
491 |
+
0.249
|
492 |
+
0.25
|
493 |
+
P
|
494 |
+
k
|
495 |
+
Figure 6: Boundary percolation study in the 3D Ising model. Binder cumulant U (a), susceptibility
|
496 |
+
χ (b), and fraction of sites occupied by largest cluster P (c) vs. coupling κ. Error bar ranges for U
|
497 |
+
are about 1/30 the size of plotted points, 1/20 for P, and between 1/5 and 1/20 for χ.
|
498 |
+
to break spontaneously. However, if one transforms configurations to Coulomb gauge then
|
499 |
+
a symmetry-breaking order parameter may be defined, for which the remnant symmetry
|
500 |
+
breaks in the deconfined phase[12]. The Coulomb gauge transformation seeks to maximize
|
501 |
+
the number of positive links in the one and two directions, ignoring the third direction
|
502 |
+
links.
|
503 |
+
This leaves a remnant layered Z2 symmetry.
|
504 |
+
Two-dimensional global symmetry
|
505 |
+
operations applied to single 1-2 layers do not alter the one and two direction links on
|
506 |
+
which Coulomb gauge is defined, but flip all third direction links attached to the layer.
|
507 |
+
For fixed one and two direction links the third direction links have mostly ferromagnetic
|
508 |
+
interactions from plaquettes with two positive one or two direction links, especially at high
|
509 |
+
β. If one takes the third direction links in each separate layer as order parameters, it is
|
510 |
+
found that these magnetize exactly at the dual-reflection of the 3-d Ising critical point[13].
|
511 |
+
10
|
512 |
+
|
513 |
+
0
|
514 |
+
0.1
|
515 |
+
0.2
|
516 |
+
0.3
|
517 |
+
0.4
|
518 |
+
0.5
|
519 |
+
0.6
|
520 |
+
0.7
|
521 |
+
0.8
|
522 |
+
0.9
|
523 |
+
0.1
|
524 |
+
0.2
|
525 |
+
0.3
|
526 |
+
0.4
|
527 |
+
0.5
|
528 |
+
0.6
|
529 |
+
0.7
|
530 |
+
-0.4
|
531 |
+
-0.3
|
532 |
+
-0.2
|
533 |
+
-0.1
|
534 |
+
0.0
|
535 |
+
0.1
|
536 |
+
0.2
|
537 |
+
0.3
|
538 |
+
P-scale
|
539 |
+
U
|
540 |
+
x
|
541 |
+
0
|
542 |
+
0.01
|
543 |
+
0.02
|
544 |
+
0.03
|
545 |
+
0.04
|
546 |
+
0.05
|
547 |
+
0.06
|
548 |
+
0.07
|
549 |
+
-0.4
|
550 |
+
-0.3
|
551 |
+
-0.2
|
552 |
+
-0.1
|
553 |
+
0
|
554 |
+
0.1
|
555 |
+
0.2
|
556 |
+
0.3
|
557 |
+
c-scale
|
558 |
+
x
|
559 |
+
Figure 7: Scaling collapse graphs for percolation in the 3D Ising model. Binder cumulant (left
|
560 |
+
graph) and scaled largest-cluster fraction (a), and scaled susceptibility (b).
|
561 |
+
The deconfined phase is magnetized and the confined phase is not. The dual reflection of the
|
562 |
+
boundary-percolation point lies in the confined phase, ie. the non-magnetized phase of the
|
563 |
+
gauge theory. If there is a symmetry-breaking phase transition here it must be a spin-glass
|
564 |
+
transition, which is a symmetry-breaking transition within the unmagnetized phase. A spin
|
565 |
+
glass has a hidden pattern of order which does not result in an overall magnetization. To
|
566 |
+
search for such a transition we used a two-real-replica approach[15]. A second set of third-
|
567 |
+
direction pointing links is equilibrated to a fixed pattern of one and two direction links
|
568 |
+
from the main simulation. This is similar to the initial equilibration for any Monte-Carlo
|
569 |
+
simulation. Then the order parameter is defined as
|
570 |
+
qk =
|
571 |
+
�
|
572 |
+
i,j
|
573 |
+
R3ijkU3ijk.
|
574 |
+
(9)
|
575 |
+
Here R3ijk is the replica third-direction link at site ijk and U3ijk is the original one. Note
|
576 |
+
there is a separate qk for each 2D layer, because the symmetry being broken is only global
|
577 |
+
in two directions but still local in the third direction. As is usual one needs to take the
|
578 |
+
absolute value of the order parameter due to tunneling on the finite lattices. We also choose
|
579 |
+
to take the square root of the order parameter since it is the product of two spins, but this
|
580 |
+
is not absolutely necessary. Thus the average spin-glass magnetization to be analyzed is
|
581 |
+
M ≡<
|
582 |
+
�
|
583 |
+
|qk| >
|
584 |
+
(10)
|
585 |
+
where the average is both over gauge configurations as well as third direction fixed 2D
|
586 |
+
layers in each gauge configuration. The order parameter M will become non-zero in a phase
|
587 |
+
with either spin-glass order or ferromagnetic order. Spin-glass order is symmetry breaking
|
588 |
+
because the symmetry operation applied only to the original U’s but not the replicas will
|
589 |
+
invert the order parameter. Another way to say this is that tunneling configurations within
|
590 |
+
11
|
591 |
+
|
592 |
+
the replica or original, where half of the lattice is flipped, do not exist in the spin-glass phase
|
593 |
+
in the thermodynamic limit. For systems without a spin-glass phase this order parameter
|
594 |
+
will simply turn on at the normal ferromagnetic transition (for instance, this is the case
|
595 |
+
for Landau-gauge Higgs phase transitions in the combined Ising gauge-Higgs theory[13]).
|
596 |
+
Note also that although its original motivation was from Coulomb gauge, qk itself is gauge
|
597 |
+
invariant, so it is no longer necessary to fix the gauge. As detailed below, we indeed find a
|
598 |
+
phase transition in M away from the ferromagnetic transition indicating the presence of a
|
599 |
+
spin-glass phase. Here we can use all of the finite-size scaling techniques which have been
|
600 |
+
developed for studying symmetry-breaking phase transitions with a local order parameter.
|
601 |
+
0.7
|
602 |
+
0.71
|
603 |
+
0.72
|
604 |
+
0.73
|
605 |
+
0.74
|
606 |
+
0.75
|
607 |
+
0.76
|
608 |
+
0.77
|
609 |
+
0
|
610 |
+
20000
|
611 |
+
40000
|
612 |
+
60000
|
613 |
+
80000
|
614 |
+
100000
|
615 |
+
M
|
616 |
+
Equilibration sweeps
|
617 |
+
0.61
|
618 |
+
0.615
|
619 |
+
0.62
|
620 |
+
0.625
|
621 |
+
0.63
|
622 |
+
0.635
|
623 |
+
0.64
|
624 |
+
0
|
625 |
+
20000
|
626 |
+
40000
|
627 |
+
60000
|
628 |
+
80000
|
629 |
+
100000
|
630 |
+
U
|
631 |
+
Equilibration Sweeps
|
632 |
+
Figure 8: Equilibration of spin-glass order parameter and Binder cumulant on a 303 lattice.
|
633 |
+
Before studying the spin-glass order parameter M in Monte Carlo simulations, one
|
634 |
+
must first perform an equilibration study to determine how long the replica must be equili-
|
635 |
+
brated to obtain a truly independent configuration. One simply simulates at many different
|
636 |
+
equilibration sweep values and watches the measured quantities approach constant values
|
637 |
+
exponentially. We then picked equilibration amounts that insure systematic errors are less
|
638 |
+
than 25% of random errors in the quantities measured. Detailed studies were made at gauge
|
639 |
+
coupling β = 0.705, near the suspected critical point, for both the 303 and 503 lattices. The
|
640 |
+
equilibration value for the 403 lattice was determined from these and the volume scaling
|
641 |
+
suggested by them. Fig. 8ab shows the equilibration of magnetization (order parameter)
|
642 |
+
and its Binder cumulant for the 303 lattice. Other quantities were similar. The exponential
|
643 |
+
fits give an equilibration time constant of 14,000 sweeps. By equilibrating with 105,000
|
644 |
+
sweeps systematic errors are brought to less than 25% of random in the planned simula-
|
645 |
+
tions. For 503 this value was a bit surprisingly high at 700,000 sweeps. We used 190,000
|
646 |
+
sweeps for the intermediate 403 case. These high equilibration values indicate the standard
|
647 |
+
heat bath Monte Carlo algorithm is not working particularly well here, but it still gives
|
648 |
+
good results if one is patient. The high number of sweeps to equilibrate are due to the fact
|
649 |
+
that 2/3 of the links, those lying in the 1 and 2 directions are being held fixed, which erects
|
650 |
+
more barriers that a simulation where all links participate.
|
651 |
+
12
|
652 |
+
|
653 |
+
0.3
|
654 |
+
0.5
|
655 |
+
0.7
|
656 |
+
0.9
|
657 |
+
1.1
|
658 |
+
1.3
|
659 |
+
0.4
|
660 |
+
0.45
|
661 |
+
0.5
|
662 |
+
0.55
|
663 |
+
0.6
|
664 |
+
0.65
|
665 |
+
0.7
|
666 |
+
0.66
|
667 |
+
0.68
|
668 |
+
0.70
|
669 |
+
0.72
|
670 |
+
0.74
|
671 |
+
M
|
672 |
+
U
|
673 |
+
b
|
674 |
+
0.00
|
675 |
+
0.50
|
676 |
+
1.00
|
677 |
+
1.50
|
678 |
+
2.00
|
679 |
+
2.50
|
680 |
+
0.66
|
681 |
+
0.68
|
682 |
+
0.7
|
683 |
+
0.72
|
684 |
+
0.74
|
685 |
+
x2nd/L
|
686 |
+
b
|
687 |
+
0
|
688 |
+
10
|
689 |
+
20
|
690 |
+
30
|
691 |
+
40
|
692 |
+
50
|
693 |
+
60
|
694 |
+
70
|
695 |
+
80
|
696 |
+
90
|
697 |
+
100
|
698 |
+
0.66
|
699 |
+
0.68
|
700 |
+
0.7
|
701 |
+
0.72
|
702 |
+
0.74
|
703 |
+
c
|
704 |
+
b
|
705 |
+
Figure 9: Binder cumulant (left graph) and magnetization (a), ξ2nd/L (b), and susceptibility (c) for
|
706 |
+
the spin-glass order parameter. Error bar spreads for U and M are about 1/15 the size of plotted
|
707 |
+
points, and 1/2 to 1/5 for ξ2nd/L and χ.
|
708 |
+
All simulations had 100 ordinary Monte Carlo sweeps between each measurement of
|
709 |
+
the order parameter to reduce correlations. There were 1000 measurements for each 303
|
710 |
+
and 403 lattices and 500 for 503.
|
711 |
+
Initial equilibration was 200,000 sweeps.
|
712 |
+
Error bars
|
713 |
+
were determined from binned fluctuations. Fig. 9abc shows the Binder cumulant U, order
|
714 |
+
parameter M, susceptibility χ, and second moment correlation length[16] divided by lattice
|
715 |
+
size, ξ2nd/L, for the three lattices. The latter, as well as the Binder cumulant, should cross
|
716 |
+
near the infinite lattice transition point (to determine this precisely one must consider
|
717 |
+
corrections to scaling which we do not do here). One can see a well-defined crossing in both
|
718 |
+
near βc = 0.715. The crossings are well established. For instance the 503 value exceeds
|
719 |
+
the 303 value at β = 0.725 by 15σ for U and 10σ for ξ2nd/L, and points above this have
|
720 |
+
similar significances. The opposite order in the low β region is never in doubt. Indeed, here
|
721 |
+
points here are separated by even larger amounts, exceeding 30σ. Scaling collapse plots are
|
722 |
+
13
|
723 |
+
|
724 |
+
shown in Fig. 10abc. The overall fit has 75 degrees of freedom and has a χ2/d.f.= 1.48 .
|
725 |
+
This fit gives βc = 0.7174(3), ν = 1.27(3), β/ν = 0.058(2), and γ/ν = 1.86(2). Checking
|
726 |
+
hyperscaling on the latter give deff = γ/ν + 2β/ν = 1.97(2). Because the order parameter
|
727 |
+
is defined on 2-d layers, the expected value is 2.
|
728 |
+
The dual reflection of the boundary
|
729 |
+
percolation point of the 3D Ising model itself is −0.5 ln tanh(0.247925) = 0.70741(1). This
|
730 |
+
is close to the βc here, but certainly not an exact match, and not within statistical errors.
|
731 |
+
However there could be a systematic error present from corrections to scaling. Looking at
|
732 |
+
the U crossing (Fig. 9a), it is plausible that the crossing on larger lattices could shift to this
|
733 |
+
point. Corrections to scaling can give a slightly shifting crossing with increasing lattice size.
|
734 |
+
There is also the possibility of a residual systematic error from insufficient equilibration.
|
735 |
+
Although we have tried to limit this to 25% of the random error it could still have an
|
736 |
+
effect. The fact that the ν seen here and the ν from the coupling-shift of the percolation
|
737 |
+
transition, 1.30(3) agree within 1σ strongly supports these being dual-manifestations of the
|
738 |
+
same transition.
|
739 |
+
0.0
|
740 |
+
0.2
|
741 |
+
0.4
|
742 |
+
0.6
|
743 |
+
0.8
|
744 |
+
1.0
|
745 |
+
1.2
|
746 |
+
0.5
|
747 |
+
0.55
|
748 |
+
0.6
|
749 |
+
0.65
|
750 |
+
0.7
|
751 |
+
-1.5
|
752 |
+
-1.0
|
753 |
+
-0.5
|
754 |
+
0.0
|
755 |
+
0.5
|
756 |
+
1.0
|
757 |
+
Scaled M
|
758 |
+
U
|
759 |
+
x
|
760 |
+
0.0
|
761 |
+
0.5
|
762 |
+
1.0
|
763 |
+
1.5
|
764 |
+
2.0
|
765 |
+
2.5
|
766 |
+
-1.5
|
767 |
+
-1.0
|
768 |
+
-0.5
|
769 |
+
0.0
|
770 |
+
0.5
|
771 |
+
1.0
|
772 |
+
x2nd/L
|
773 |
+
x
|
774 |
+
0
|
775 |
+
0.01
|
776 |
+
0.02
|
777 |
+
0.03
|
778 |
+
0.04
|
779 |
+
0.05
|
780 |
+
0.06
|
781 |
+
0.07
|
782 |
+
0.08
|
783 |
+
-1.5
|
784 |
+
-1.0
|
785 |
+
-0.5
|
786 |
+
0.0
|
787 |
+
0.5
|
788 |
+
1.0
|
789 |
+
Scaled c
|
790 |
+
x
|
791 |
+
Figure 10: Scaling collapse plots for Binder cumulant (left graph) and scaled magnetization (a),
|
792 |
+
ξ2nd/L (b), and scaled susceptibility (c), for the spin-glass order parameter.
|
793 |
+
14
|
794 |
+
|
795 |
+
4
|
796 |
+
Energy moments
|
797 |
+
Since the spin-glass transition in the dual theory is symmetry breaking, Landau theory
|
798 |
+
connects this to a thermal phase transition through the hyperscaling relation
|
799 |
+
α = 2 − dν.
|
800 |
+
(11)
|
801 |
+
Here α is the specific heat exponent. At the critical point the expected behavior of the spe-
|
802 |
+
cific heat is |T − Tc|−α. For ν = 1.3, α = −1.9. This means that the specific heat does not
|
803 |
+
have an infinite singularity, however it does have a finite singularity. Unfortunately, when
|
804 |
+
rounded by a finite lattice size, these are difficult to spot using finite-size scaling. Never-
|
805 |
+
theless one can still try to fit to a fractional power, and in some cases more importantly,
|
806 |
+
a different coefficient on each side of the transition. However, the energy moments also
|
807 |
+
have non-singular terms. This makes fitting them more difficult than quantities based on
|
808 |
+
the order parameter which are purely singular. The non-singular part is expected to vary
|
809 |
+
slowly through the critical region. For this reason it affects higher moments less, and there
|
810 |
+
is a good chance these can be fit without a non-singular part other than perhaps a constant.
|
811 |
+
This simplifies fitting to the expected critical behavior. A study of energy moments of the
|
812 |
+
3D Ising model itself was performed. This study had approximately 7 × 109 sweeps at each
|
813 |
+
coupling for the 303 lattice and 2 × 109 for the 403 and 503, with measurements performed
|
814 |
+
every other sweep. With these statistics, rather precise data can be obtained on the third
|
815 |
+
cumulant (third central moment), defined as < (E − ¯E)3 > (3L3)2. It is this combination
|
816 |
+
that corresponds to the derivative of the specific heat. The third cumulant is expected to
|
817 |
+
scale as |κ − κc|−α−1, which is still a non-infinite singularity. This quantity is shown in
|
818 |
+
Fig. 11, along with a fit to the expected critical behavior, but leaving α and κc as free
|
819 |
+
parameters. The fit also allows for a different coefficient on the two sides of the transition.
|
820 |
+
The result is κc = 0.2477(2), and −α−1 = 0.967(12). The coefficient ratio below and above
|
821 |
+
the critical point is 1.287(25). The predicted ν from this α is ν = (−α + 2)/3 = 1.322(4).
|
822 |
+
The critical point agrees well with that extracted from percolation(0.24781(4), and the
|
823 |
+
exponent ν also agrees with those from both percolation finite-size shift (1.30(3)) and the
|
824 |
+
spin glass transition in the dual gauge theory(1.27(3)). Even though the singularity is non-
|
825 |
+
infinite, it can still be seen from this fit. It is important to remember that these functions
|
826 |
+
are singular in two ways - the fractional power and the jump in coefficient. So even if the
|
827 |
+
power were to end up being exactly unity, that would not erase the singularity due to the
|
828 |
+
fairly large coefficient jump, verified to 11.5σ, which can be seen in the change of slope.
|
829 |
+
Higher moments were also measured, but even with these high statistics were somewhat
|
830 |
+
of a disappointment due to fairly large statistical errors. Fig. 12 shows the fourth cumulant,
|
831 |
+
(< (E − ¯E)4 > −3 < (E − ¯E)2 >2)(3L3)3.
|
832 |
+
(12)
|
833 |
+
This combination of moments tracks the third derivative of the internal energy with respect
|
834 |
+
to κ. Also shown is a numerical derivative of the fit function to the third cumulant on a
|
835 |
+
parameter spacing 1/4 of that used for the simulations. This was done instead of an exact
|
836 |
+
derivative to simulate finite-lattice rounding, so not an exact prediction of the expected
|
837 |
+
15
|
838 |
+
|
839 |
+
-210
|
840 |
+
-200
|
841 |
+
-190
|
842 |
+
-180
|
843 |
+
-170
|
844 |
+
-160
|
845 |
+
-150
|
846 |
+
-140
|
847 |
+
-130
|
848 |
+
0.243
|
849 |
+
0.245
|
850 |
+
0.247
|
851 |
+
0.249
|
852 |
+
0.251
|
853 |
+
0.253
|
854 |
+
Third Energy Cumulant
|
855 |
+
k
|
856 |
+
Figure 11: Third order energy cumulant, with fit to critical behavior. Here open circles are 303,
|
857 |
+
open triangles 403, and × 503.
|
858 |
+
behavior, but one which should be good away from the critical point. The main effect of
|
859 |
+
the shift in slope in the third cumulant which translates to a shift in level here can be seen.
|
860 |
+
In principle some finite-size effect could be seen in this quantity since it diverges with a
|
861 |
+
very small exponent, but the expected ratio in peak heights between 303 and 403 is only
|
862 |
+
(4/3)((α+2)/ν) = 1.02, much smaller than our statistical errors. A larger effect is predicted
|
863 |
+
for the fifth cumulant (1.28), but the errors there are magnified even more. This figure
|
864 |
+
is shown primarily to illustrate how much further one would have to go in statistics to
|
865 |
+
see an infinite singularity in a high moment. Our program, which was run for about 24
|
866 |
+
processor-years on PC’s, does not employ multi-spin coding. Perhaps a study that did or
|
867 |
+
used specialized hardware could see these effects.
|
868 |
+
5
|
869 |
+
Conclusion
|
870 |
+
In this paper evidence has been given for a new high-order phase transition within the or-
|
871 |
+
dered phase of the 3D Ising model. This transition appears to be associated with boundary
|
872 |
+
percolation. This is the percolation of dual-plaquettes that lie on the domain boundary
|
873 |
+
between + and - spins, a type of percolation that has not been much studied. Percolation
|
874 |
+
of domain boundaries occurs when minority sites occupy 13% or more of the lattice. It is,
|
875 |
+
incidentally, not coincident with the roughening transition which occurs much deeper into
|
876 |
+
the ordered phase, around κ = 0.408[17]. Because the Ising model has a formulation in
|
877 |
+
terms of the domain boundary itself, the percolation of the boundary could be important,
|
878 |
+
16
|
879 |
+
|
880 |
+
3000
|
881 |
+
4000
|
882 |
+
5000
|
883 |
+
6000
|
884 |
+
7000
|
885 |
+
8000
|
886 |
+
9000
|
887 |
+
0.244
|
888 |
+
0.246
|
889 |
+
0.248
|
890 |
+
0.25
|
891 |
+
0.252
|
892 |
+
Fourth Energy Cumulant
|
893 |
+
k
|
894 |
+
Figure 12: Fourth order energy cumulant for 303 and 403 lattices. Line is a plausible rounded critical
|
895 |
+
behavior based on third cumulant fit (see text).
|
896 |
+
possibly producing a sudden change in the entropy function expressed in terms of boundary
|
897 |
+
area.
|
898 |
+
Random boundary percolation appears to have the same critical exponents as ordinary
|
899 |
+
site percolation. Boundary percolation in the Ising model seems to model random boundary
|
900 |
+
percolation as far as the scaling of cluster sizes is concerned, however it differs in the finite-
|
901 |
+
size shift exponent, which determines how the percolation threshold depends on lattice
|
902 |
+
size. Whereas random percolation has a shift exponent agreeing with typical values of the
|
903 |
+
correlation-length exponent from cluster size scaling (ν ∼ 0.88), the shift exponent from
|
904 |
+
the 3D Ising model boundary percolation is vastly different, ν ∼ 1.3. This surprising result
|
905 |
+
means that the system has two different correlation lengths, both diverging at the infinite-
|
906 |
+
lattice percolation threshold. This also suggests that there is more than just percolation
|
907 |
+
going on here. If percolation is linked to a thermal phase transition, that could explain
|
908 |
+
the odd shift exponent, since the order parameter of the phase transition may have its own
|
909 |
+
correlation length.
|
910 |
+
To find such an order parameter we examined the dual system, the 3D gauge Ising
|
911 |
+
model. The dual point of the boundary percolation threshold occurs in the random (con-
|
912 |
+
fining) phase of the gauge theory. An order parameter for the confinement-deconfinement
|
913 |
+
transition can be obtained in Coulomb gauge, where as many one and two direction links
|
914 |
+
as possible are made to be positive by gauge transformations. The third direction links on
|
915 |
+
each lattice layer can be taken to be a spin-like order parameter, which shows spontaneous
|
916 |
+
magnetization in the ordered phase and is unmagnetized in the random phase. If there is
|
917 |
+
a phase transition corresponding to boundary percolation in the Ising model itself, it must
|
918 |
+
occur within the random phase of the gauge theory. This suggests looking for a spin-glass
|
919 |
+
transition here, a shift from a completely disordered phase to one with a hidden pattern
|
920 |
+
of order, but still showing no net magnetization. To this end we utilized a two-real-replica
|
921 |
+
17
|
922 |
+
|
923 |
+
order parameter, which indeed does show a phase transition near the dual reflection of
|
924 |
+
boundary percolation, and with the same critical exponent ∼ 1.30. This is significant be-
|
925 |
+
cause it is a true symmetry-breaking phase transition. The symmetry being broken is the
|
926 |
+
layered remnant (Z2)L symmetry left over after Coulomb gauge fixing, which is global in
|
927 |
+
two dimensions but still local in the third. This is “global enough” to avoid Elitzur’s theo-
|
928 |
+
rem and has sufficient dimensions (2) for a discrete symmetry to break spontaneously at a
|
929 |
+
finite coupling. Spontaneous symmetry-breaking always results in a phase transition, i.e. a
|
930 |
+
mathematical singularity in the order parameter, which also results in an energy singularity
|
931 |
+
except in a few unusual cases [14].
|
932 |
+
Finally we examined energy moments in search of this singularity.
|
933 |
+
Because of the
|
934 |
+
high value of ν the specific heat exponent is negative, implying a finite singularity, so the
|
935 |
+
usual finite-size scaling applied to peak heights cannot be used here. We concentrated on
|
936 |
+
the third energy cumulant, since it could be fit without the addition of an obfuscating
|
937 |
+
non-singular part, other than a constant. An open fit to the singular form expected for
|
938 |
+
this quantity based on the hyperscaling relationship, gives κc and ν values consistent with
|
939 |
+
those determined by boundary percolation and the dual order parameter. There is also
|
940 |
+
a noticeable jump in coefficient here, another expectation of this sort of singularity. Our
|
941 |
+
study did not have enough statistics to see the small expected peak scaling in the fourth
|
942 |
+
cumulant or somewhat larger effect in the fifth, which should have infinite singularities
|
943 |
+
on the infinite lattice.
|
944 |
+
Although observing these would be satisfying, still the singular
|
945 |
+
fit to the third cumulant does match well with the prediction from the order parameter.
|
946 |
+
This demonstrates that phase transitions as weak as these can be studied by numerical
|
947 |
+
methods. The existence of an order parameter and associated symmetry breaking is key
|
948 |
+
in establishing this as a true phase transition. The coincidence of boundary percolation is
|
949 |
+
also interesting and gives another measure of ν, but cannot by itself be used to imply the
|
950 |
+
presence of a phase transition. However, it has the advantage of being very easy to measure.
|
951 |
+
It appears to have the same cluster-size scaling exponents as ordinary site percolation, but a
|
952 |
+
different threshold. It may be interesting to explore boundary percolation in other systems.
|
953 |
+
Since percolation has so many practical applications, it’s possible boundary percolation is a
|
954 |
+
better fit than site or link percolation in some cases. Finally, we note that a previous study
|
955 |
+
of the Z2 gauge-Higgs system showed a total of four phase transition lines further into the
|
956 |
+
diagram[13]. The current paper shows there are two phase transitions on each axis, gauge
|
957 |
+
and Higgs, so also a total of four. It will be interesting to follow these new phase transitions
|
958 |
+
into the phase diagram to see how they connect with the lines previously found.
|
959 |
+
The
|
960 |
+
previous paper showed that the Z2 gauge-Higgs system appears to be more complicated
|
961 |
+
than previously thought.
|
962 |
+
The present paper shows that these additional complications
|
963 |
+
extend to the 3D Ising model itself, and its dual, the 3D gauge Ising model.
|
964 |
+
It seems
|
965 |
+
possible that similar weak phase transitions may also be lurking in other well-known spin
|
966 |
+
and gauge systems.
|
967 |
+
18
|
968 |
+
|
969 |
+
References
|
970 |
+
[1] R.P. Feynman, Statistical mechanics - a set of lectures, Addison-Wesley, Reading MA,
|
971 |
+
1998, ch.5.
|
972 |
+
[2] M. Kac and J.C. Ward, A combinatorial solution of the two dimensional Ising model,
|
973 |
+
Phys. Rev. 88, 1332-1337 (1952).
|
974 |
+
[3] L. Kurzawski and K. Malarz, Simpe cubic random-site percolation thresholds for com-
|
975 |
+
plex neighborhoods, Rep. Math. Phys. 70, 163-169 (2012); C. Domb and N.W. Walton,
|
976 |
+
Crystal statistics with long-range forces I. The equivalent neighbor model, Proc. Phys.
|
977 |
+
Soc. 89, 859-871 (1966).
|
978 |
+
[4] H. M¨uller-Krumbhaar, Percolation in a lattice system with particle interaction, Phys.
|
979 |
+
Lett. A 50, 27-28 (1974).
|
980 |
+
[5] A.M. Ferrenberg and D.P. Landau, Critical behavior of the three-dimensional Ising
|
981 |
+
model: A high-resolution Monte Carlo study, Phys. Rev. B 44, 5081-5091 (1991).
|
982 |
+
[6] J. Wang, Z. Zohu, W. Zhang, T.M. Garoni, and Y. Deng, Bond and site percolation
|
983 |
+
in three dimensions, Phys. Rev. E 87, 052107 (2013); Erratum, 89, 069907 (2014).
|
984 |
+
[7] D. Stauffer and A. Aharony, Introduction to Percolation Theory, Revised 2nd edition,
|
985 |
+
Taylor and Francis, London, 1994.
|
986 |
+
[8] K. Binder and D.W.Heermann, Monte Carlo simulation in statistical physics - an
|
987 |
+
introduction, 6th ed., Springer Nature, Cham Switzerland, 2019.
|
988 |
+
[9] L.D. Landau and E.M. Lifshitz, Statistical Physics - Vol. 5 of the Course of Theoretical
|
989 |
+
Physics, Pergamon Press, London, 1958, p452.
|
990 |
+
[10] H.A. Kramers and G.H. Wannier, Statistics of the two-dimensional ferromagnet Part 1,
|
991 |
+
Phys. Rev. 60, 252-262 (1941); R. Savit, Duality in field theory and statistical systems,
|
992 |
+
Rev. Mod. Phys. 52, 453-487 (1980).
|
993 |
+
[11] S. Elitzur, Impossibility of spontaneously breaking local symmetries, Phys. Rev. D12,
|
994 |
+
3978-3982 (1975) .
|
995 |
+
[12] J. Greensite, S. Olejn´ık, and D. Zwanziger, Coulomb energy, remnant symmetry, and
|
996 |
+
phases of non-Abelian gauge theories, Phys. Rev. D 69, 074506 (2004); D. Zwanziger,
|
997 |
+
No confinement without Coulomb confinement, Phys. Rev. Lett. 90, 102001 (2003).
|
998 |
+
[13] M. Grady, Exploring the 3D Ising gauge-Higgs theory in exact Coulomb gauge and
|
999 |
+
with a gauge-invariant substitute for Landau gauge, arXiv:2109.04560 (2021).
|
1000 |
+
[14] ibid Appendix A.
|
1001 |
+
[15] K. Binder and W. Kob, Glassy Materials and Disordered Solids, World Scientific, New
|
1002 |
+
Jersey, 2005, pp. 248, 261.
|
1003 |
+
19
|
1004 |
+
|
1005 |
+
[16] F. Cooper, B. Freedman, and D. Preston, Solving φ4
|
1006 |
+
1,2 field theory with Monte Carlo,
|
1007 |
+
Nucl. Phys. B 210, 210-228 (1982); D.J. Amit and V. Mart´ın-Mayor, Field Theory,
|
1008 |
+
the Renormalization Group, and Critical Phenomena: Graphs to Computers, 3rd ed.,
|
1009 |
+
World Scientific, Singapore, 2005.
|
1010 |
+
[17] K.K. Mon, S. Wansleben, D.P. Landau and K. Binder, Anisotropic surface tension,
|
1011 |
+
step free energy, and interface roughening in the three-dimensional Ising model, Phys.
|
1012 |
+
Rev. Lett. 60, 708-711 (1988); Erratum, 61, 902 (1988); K.K Mon, D.P. Landau, and
|
1013 |
+
D. Stauffer, Interface roughening in the three-dimensional Ising model, Phys. Rev. B
|
1014 |
+
42, 545-547 (1990).
|
1015 |
+
20
|
1016 |
+
|
4dFAT4oBgHgl3EQfExzK/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
4tFKT4oBgHgl3EQfRy39/content/2301.11773v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:03aa441c55a0b6aac2dd2bf488e85d43c67d4c85aee6267dbf9508b6908655ff
|
3 |
+
size 4944672
|
4tFKT4oBgHgl3EQfRy39/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3d32002281639333b4b6521356454658767c704d6108bbe13b6974cc712f0328
|
3 |
+
size 12058669
|
4tFKT4oBgHgl3EQfRy39/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fed0c14e37ce8c1af5b58f6dfb4303701015c555433e5ade5e00f2c1b2a3eefc
|
3 |
+
size 347660
|
59E0T4oBgHgl3EQfvwFr/content/2301.02622v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e62e6883d9cdcde90c5ed707f23e9272b00a20b6892dba6deb00da9e5caa0adb
|
3 |
+
size 5058457
|
59E0T4oBgHgl3EQfvwFr/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d6c3b435e6117b20572c3c103d2b15092700491bbaceb43635e86a53544ba977
|
3 |
+
size 277131
|
5tFIT4oBgHgl3EQf8CtK/content/2301.11400v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:482273e2f21e300ef8f681cdc350a945d6faf22c3cb51c47e8a028df885283f9
|
3 |
+
size 4456755
|
5tFIT4oBgHgl3EQf8CtK/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:43522b2af2f12d6829e7355bd8cb9d5c96e10c4f2ac3d257e7ee3221a19d5425
|
3 |
+
size 1699971
|
69E1T4oBgHgl3EQfBgJT/content/tmp_files/2301.02852v1.pdf.txt
ADDED
@@ -0,0 +1,1274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Coherent control of wave beams via unidirectional evanescent modes excitation
|
2 |
+
Shuomin Zhong1*,∗ Xuchen Wang2*, and Sergei A. Tretyakov3
|
3 |
+
1. School of Information Science and Engineering, Ningbo University, Ningbo 315211, China
|
4 |
+
2. Institute of Nanotechnology, Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany
|
5 |
+
3. Department of Electronics and Nanoengineering, Aalto University, Finland
|
6 |
+
Conventional coherent absorption occurs only when two incident beams exhibit mirror symmetry
|
7 |
+
with respect to the absorbing surface, i.e., the two beams have the same incident angles, phases,
|
8 |
+
and amplitudes. In this work, we propose a more general metasurface paradigm for coherent perfect
|
9 |
+
absorption, with impinging waves from arbitrary asymmetric directions. By exploiting excitation of
|
10 |
+
unidirectional evanescent waves, the output can be fixed at one reflection direction for any amplitude
|
11 |
+
and phase of the control wave. We show theoretically and confirm experimentally that the relative
|
12 |
+
amplitude of the reflected wave can be tuned continuously from zero to unity by changing the phase
|
13 |
+
difference between the two beams, i.e. switching from coherent perfect absorption to full reflection.
|
14 |
+
We hope that this work will open up promising possibilities for wave manipulation via evanescent
|
15 |
+
waves engineering with applications in optical switches, one-side sensing, and radar cross section
|
16 |
+
control.
|
17 |
+
I.
|
18 |
+
INTRODUCTION
|
19 |
+
Coherent control of propagation of a wave beam by
|
20 |
+
tuning the amplitude and phase of another beam is a very
|
21 |
+
promising approach to realize ultra fast optical devices
|
22 |
+
for optical computing, sensing, and other applications [1–
|
23 |
+
11]. One of the most important effects in coherent control
|
24 |
+
of light is coherent perfect absorption [12–22]. In these
|
25 |
+
devices, the level of absorption of one beam illuminating
|
26 |
+
a thin sheet is controlled by another coherent beam that
|
27 |
+
illuminates the same sheet.
|
28 |
+
In earlier works, coherent perfect absorption (CPA)
|
29 |
+
was achieved only when with illumination from different
|
30 |
+
sides of a homogeneous lossy layer and for two incident
|
31 |
+
waves at the same angle [12, 13, 15, 22].
|
32 |
+
The mecha-
|
33 |
+
nism of coherent perfect absorption is destructive cancel-
|
34 |
+
lation of all scattered beams. For homogeneous coher-
|
35 |
+
ent perfect absorbers, there are only specular reflection
|
36 |
+
and non-diffractive transmission, allowing coherent ab-
|
37 |
+
sorption only with illumination of both sides and at the
|
38 |
+
same incidence angle. From the theoretical point of view
|
39 |
+
and for many applications, it is important to achieve co-
|
40 |
+
herent control of output for illuminations from the same
|
41 |
+
side of the metasurface sheet at two or more arbitrary
|
42 |
+
incidence angles. In Refs. [17, 18, 23], coherent perfect
|
43 |
+
absorption and scattering for two angularly asymmetric
|
44 |
+
beams are realized by using surface plasmon-polariton
|
45 |
+
(SPP) excitation at silver-based diffraction groove grat-
|
46 |
+
ings. However, such plasmonic grating designs have limi-
|
47 |
+
tations. In particular, the structures are non-planar and
|
48 |
+
operate only for TM modes at optical frequencies, where
|
49 |
+
SPP are supported. Moreover, there are always two out-
|
50 |
+
put beams for different values of the phase of the control
|
51 |
+
waves, one of which may cause undesired noise to the
|
52 |
+
useful output signal due to parasitic scattering. This is-
|
53 |
+
sue is critical in applications such as optical computing
|
54 |
+
[24].
|
55 |
+
∗ Email: [email protected], [email protected]
|
56 |
+
In this decade, the emergence of gradient metasurfaces
|
57 |
+
[25–28] and metagratings [29–35] has opened a new av-
|
58 |
+
enue for manipulation of light for arbitrary incidence an-
|
59 |
+
gles and versatile functionalities. For periodical metasur-
|
60 |
+
faces or metagratings with the period larger than half of
|
61 |
+
the wavelength, the incident plane wave from one direc-
|
62 |
+
tion will be scattered into multiple directions, and the
|
63 |
+
power carried by the incident wave can be redistributed
|
64 |
+
among a number of diffraction modes.
|
65 |
+
Based on this
|
66 |
+
concept, several metasurface devices with perfect anoma-
|
67 |
+
lous reflection working at microwaves [36, 37] and optical
|
68 |
+
bands [38] have been developed. However, in these previ-
|
69 |
+
ous works, the functionality of metasurfaces is designed
|
70 |
+
only for one incident angle and the response for other illu-
|
71 |
+
minations is actually not considered. To design metasur-
|
72 |
+
faces with coherent control functions for multiple simul-
|
73 |
+
taneously incident coherent beams from different direc-
|
74 |
+
tions, the matching conditions of amplitude, phase, and
|
75 |
+
wavevector(direction) of the scattering modes between all
|
76 |
+
incidences are required [35, 39, 40], which is almost an
|
77 |
+
impossible task using traditional gradient phase methods
|
78 |
+
[25, 36] and brute-force numerical optimizations [37, 41].
|
79 |
+
In this work, we perform inverse designs of CPA meta-
|
80 |
+
surfaces by solving the surface impedance satisfying the
|
81 |
+
boundary condition determined by two coherent incident
|
82 |
+
waves from two arbitrary angles and the desired total
|
83 |
+
scattered waves.
|
84 |
+
The engineering of evanescent waves
|
85 |
+
in the scattered fields without altering the desired far-
|
86 |
+
field outputs provides significant freedom in the CPA
|
87 |
+
metasurface design, making another functionality of co-
|
88 |
+
herent control of reflection with a single direction possi-
|
89 |
+
ble. It is demonstrated that excitation of unidirectional
|
90 |
+
evanescent waves propagating along the surface in the
|
91 |
+
direction of the incident-wave wavevector can be used to
|
92 |
+
achieve single-direction output in coherently controlled
|
93 |
+
optical devices. Furthermore, a mathematical optimiza-
|
94 |
+
tion method based on scattered harmonics analysis [42]
|
95 |
+
is utilized to find the surface-impedance profile that si-
|
96 |
+
multaneously ensures the CPA and coherent maximum
|
97 |
+
reflection (CMR) in a single direction. Thereafter, the
|
98 |
+
arXiv:2301.02852v1 [physics.app-ph] 8 Jan 2023
|
99 |
+
|
100 |
+
2
|
101 |
+
substrate parameters are invoked as additional degrees of
|
102 |
+
freedom in the optimization model, realizing reflection ef-
|
103 |
+
ficiency of 100%. As an example, we experimentally vali-
|
104 |
+
date the CPA gradient metasurface design in microwaves
|
105 |
+
for TE-polarized waves by engineering the Indium Tin
|
106 |
+
Oxide (ITO) film mounted on a grounded dielectric sub-
|
107 |
+
strate. It is showed that the normalized output power
|
108 |
+
can be continuously controlled between 0 and 1 by tun-
|
109 |
+
ing the phase of the control wave.
|
110 |
+
II.
|
111 |
+
DESIGN CONCEPT
|
112 |
+
Dx
|
113 |
+
x
|
114 |
+
z
|
115 |
+
Zs(x)
|
116 |
+
θ1
|
117 |
+
θ2
|
118 |
+
I1
|
119 |
+
I2
|
120 |
+
FIG. 1. General scattering scenario for a periodically modu-
|
121 |
+
lated impenetrable impedance surface. Two coherent beams
|
122 |
+
I1 and I2 are simultaneously incident from two angles.
|
123 |
+
Let us consider an impenetrable reciprocal metasur-
|
124 |
+
face whose surface is periodically modulated along the
|
125 |
+
x-direction, with the period Dx. The surface is in the
|
126 |
+
xy-plane of a Cartesian coordinate system (see Fig. 1).
|
127 |
+
The metasurface is simultaneously illuminated by two
|
128 |
+
TE(s)-polarized plane waves I1 and I2 at the incidence
|
129 |
+
angles θ1 and θ2 (θ1 > θ2). The electric field amplitudes
|
130 |
+
of the two beams I1 and I2 is E1 = E0 and E2 = αE0,
|
131 |
+
respectively (α is the amplitude ratio). The phase differ-
|
132 |
+
ence between them is ∆φ=0, defined at the origin point
|
133 |
+
(x = 0, z = 0). The electromagnetic properties of the
|
134 |
+
metasurface can be characterized by the locally-defined
|
135 |
+
surface impedance that stands for the ratio of the tangen-
|
136 |
+
tial electric and magnetic field amplitudes at the surface
|
137 |
+
plane Zs(x) = Et(x)/Ht(x).
|
138 |
+
The field reflected by a periodically modulated meta-
|
139 |
+
surface can be interpreted as a sum of Floquet harmonics.
|
140 |
+
The tangential wavenumber of the n-th harmonic is re-
|
141 |
+
lated to the period and the incident wavenumber k0 as
|
142 |
+
krxn = k0 sin θi + 2πni/Dx, where i = 1, 2. The corre-
|
143 |
+
sponding normal component of the reflected wavenumber
|
144 |
+
equals krzn =
|
145 |
+
�
|
146 |
+
k2
|
147 |
+
0 − k2rxn. If |krxn| is greater than the
|
148 |
+
incident wave number, the wave is evanescent and it does
|
149 |
+
not contribute to the far field. For the harmonic wave sat-
|
150 |
+
isfying |krxn| < k0, krzn is real, and this wave is propagat-
|
151 |
+
ing. The evanescent harmonics will be dissipated by the
|
152 |
+
lossy surface and the propagating harmonics will propa-
|
153 |
+
gate into the far-zone at the angles θrn = arcsin(krxn/k0).
|
154 |
+
In order to achieve coherent perfect absorption, it is nec-
|
155 |
+
essary (but not sufficient) to ensure that all the diffracted
|
156 |
+
propagating modes of two beams have the same set of
|
157 |
+
angles θrn, that allows mutual cancellation, defining the
|
158 |
+
period Dx = λ0/(sin θ1 −sin θ2) [43], where λ0 stands for
|
159 |
+
the wavelength.
|
160 |
+
Our aim is to achieve coherent perfect absorption for
|
161 |
+
two coherent in-phase waves simultaneously incident on
|
162 |
+
the metasurface at two different angles θ1 and θ2. First,
|
163 |
+
let us assume that no evanescent waves are excited for
|
164 |
+
these two illuminations. In the CPA case, there should
|
165 |
+
be no reflected field at the surface. Thus, the tangential
|
166 |
+
components of the total electric field at the plane z = 0
|
167 |
+
can be written as Et(x) = E0(e−jk0 sin θ1x+αe−jk0 sin θ2x),
|
168 |
+
where the time-harmonic dependency in the form ejωt
|
169 |
+
is assumed and suppressed.
|
170 |
+
The corresponding total
|
171 |
+
magnetic field reads Ht(x) = E0(cos θ1e−jk0 sin θ1x +
|
172 |
+
α cos θ2e−jk0 sin θ2x)/Z0, with Z0 =
|
173 |
+
�
|
174 |
+
µ0/ϵ0 being the
|
175 |
+
free-space wave impedance. The ratio of these electric
|
176 |
+
and magnetic fields gives the required surface impedance
|
177 |
+
ℜ(Zs) = Z0
|
178 |
+
cos θ1 + α2 cos θ2 + α cos Φ(cos θ1 + cos θ2)
|
179 |
+
cos2 θ1 + α2 cos2 θ2 + 2α cos θ1 cos θ2 cos Φ ,
|
180 |
+
ℑ(Zs) = Z0
|
181 |
+
α(cos θ1 − cos θ2) sin Φ
|
182 |
+
cos2 θ1 + α2 cos2 θ2 + 2α cos θ1 cos θ2 cos Φ,
|
183 |
+
(1)
|
184 |
+
where Φ = k0(sin θ1 − sin θ2)x is the linearly varying
|
185 |
+
phase.
|
186 |
+
The real and imaginary parts of the surface
|
187 |
+
impedance are even and odd functions of x, respectively.
|
188 |
+
As is seen from Eqs. (1), the periodicity of the surface
|
189 |
+
impedance is D = λ0/(sin θ1 − sin θ2), in accord with the
|
190 |
+
above analysis. For passive metasurfaces, the real part
|
191 |
+
of the surface impedance must be non-negative.
|
192 |
+
Con-
|
193 |
+
sequently, the amplitude ratio should satisfy α ≥ 1 or
|
194 |
+
α ≤ cos θ1/ cos θ2 to ensure passive solution for CPA by
|
195 |
+
the surface.
|
196 |
+
As an example, we consider two incident waves with
|
197 |
+
incidence angles of (θ1, θ2) = (45◦, 0◦) and the same am-
|
198 |
+
plitude, assuming α = 1 for simplicity. (Other scenarios
|
199 |
+
with (θ1, θ2) = (60◦, −30◦), (75◦, 15◦) are illustrated in
|
200 |
+
the Supplemental Materials[43], corresponding to differ-
|
201 |
+
ent surface impedance profiles.) As is shown in Fig. 2(a),
|
202 |
+
everywhere on the surface its resistance is non-negative,
|
203 |
+
demonstrating that passive gradient periodic surfaces can
|
204 |
+
realize CPA for two asymmetric incident beams.
|
205 |
+
To analyze the mechanism of CPA by the periodic
|
206 |
+
impedance surface further, we can determine the ampli-
|
207 |
+
tudes of all the Floquet scattered harmonics for general
|
208 |
+
plane-wave illumination, using the method reported in
|
209 |
+
[42]. The total reflected field can be represented as an
|
210 |
+
infinite sum of Floquet harmonic modes:
|
211 |
+
Er =
|
212 |
+
∞
|
213 |
+
�
|
214 |
+
n=−∞
|
215 |
+
Ane−jkrznze−jkrxnx,
|
216 |
+
(2)
|
217 |
+
where An is the complex amplitude of the n-th Floquet
|
218 |
+
harmonic. Because the surface modulation is periodical,
|
219 |
+
the surface admittance Ys(x) = 1/Zs(x) can be expanded
|
220 |
+
|
221 |
+
3
|
222 |
+
0
|
223 |
+
0.2
|
224 |
+
0.4
|
225 |
+
0.6
|
226 |
+
0.8
|
227 |
+
1
|
228 |
+
-200
|
229 |
+
0
|
230 |
+
200
|
231 |
+
400
|
232 |
+
(a)
|
233 |
+
-8
|
234 |
+
-6
|
235 |
+
-4
|
236 |
+
-2
|
237 |
+
0
|
238 |
+
2
|
239 |
+
4
|
240 |
+
6
|
241 |
+
8
|
242 |
+
0
|
243 |
+
0.1
|
244 |
+
0.2
|
245 |
+
0.3
|
246 |
+
0.4
|
247 |
+
0.5
|
248 |
+
(b)
|
249 |
+
0
|
250 |
+
0.2
|
251 |
+
0.4
|
252 |
+
0.6
|
253 |
+
0.8
|
254 |
+
1
|
255 |
+
-200
|
256 |
+
0
|
257 |
+
200
|
258 |
+
400
|
259 |
+
(c)
|
260 |
+
-6
|
261 |
+
-4
|
262 |
+
-2
|
263 |
+
0
|
264 |
+
2
|
265 |
+
4
|
266 |
+
6
|
267 |
+
8
|
268 |
+
0
|
269 |
+
0.1
|
270 |
+
0.2
|
271 |
+
0.3
|
272 |
+
0.4
|
273 |
+
0.5
|
274 |
+
(d)
|
275 |
+
FIG. 2. (a) Analytical surface impedance over one period to realize CPA for two incidence beams with (θ1, θ2) = (45◦, 0◦).
|
276 |
+
(b) Magnitudes of the complex amplitudes of different Floquet scattered harmonics (normalized by the amlpitude of the
|
277 |
+
incident electric field E0) when the gradient surface is illuminated by single-beam incidences at 45◦ and 0◦, and for two-
|
278 |
+
beam incidences in phase and out of phase, respectively. (c) Optimized surface impedance profile over one period to realize
|
279 |
+
CPA for in-phase incidences and single-direction reflection for out-of-phase incidences.
|
280 |
+
The optimized Fourier coefficients
|
281 |
+
of Ys(x) read g0 = 2.654 × 10−3 + j1.724 × 10−11, g1 = −7.770 × 10−4 − j1.045 × 10−10, g2 = −(6.565 + j4.581) × 10−5,
|
282 |
+
g3 = −9.143×10−8 +j5.720×10−6, g4 = (−1.644+j1.992)×10−5. (d) Amplitudes of scattered harmonics when the optimized
|
283 |
+
gradient surface in (c) is illuminated by single-beam incidences at 45◦ and 0◦, and for two-beam incidences in phase and out
|
284 |
+
of phase, respectively.
|
285 |
+
into Fourier series:
|
286 |
+
Ys(x) =
|
287 |
+
+∞
|
288 |
+
�
|
289 |
+
n=−∞
|
290 |
+
gne−j2nπx/D.
|
291 |
+
(3)
|
292 |
+
A Toeplitz matrix Ys which we call the admittance ma-
|
293 |
+
trix is determined only by the Fourier coefficients of the
|
294 |
+
modulation function and filled with Ys(r, c) = gr−c at
|
295 |
+
the r-th row and c-th column. The reflection matrix is
|
296 |
+
found as [44]
|
297 |
+
Γ = (Y0 + Ys)−1 (Y0 − Ys),
|
298 |
+
(4)
|
299 |
+
where Y0 = Z−1
|
300 |
+
0
|
301 |
+
is a diagonal matrix with its main
|
302 |
+
entry representing the admittance of each space har-
|
303 |
+
monic, which is Y0(n, n) =krzn/ω0µ0. The amplitudes
|
304 |
+
An of reflected harmonics for a given m-th order Flo-
|
305 |
+
quet harmonic of the incident wave can be calculated
|
306 |
+
as An = Γ(n, m). Note that Γ is a (2N + 1) × (2N + 1)
|
307 |
+
square matrix and the columns and rows of Γ are indexed
|
308 |
+
from −N to +N. When the surface is illuminated by two
|
309 |
+
waves simultaneously, the amplitudes of all the Floquet
|
310 |
+
harmonics are linear superpositions of all harmonics.
|
311 |
+
As is seen from Fig. 2(b), when the two incident waves
|
312 |
+
are in phase, all the harmonics have zero amplitude,
|
313 |
+
meaning that CPA with no reflected fields occurs. How-
|
314 |
+
ever, when the two incident waves are out of phase, the
|
315 |
+
reflected harmonics come out, including both propagat-
|
316 |
+
ing modes and evanescent ones, proving that the perfect
|
317 |
+
absorption effect is phase-coherent, different from perfect
|
318 |
+
absorption for two angles [45]. To understand the mech-
|
319 |
+
anism of CPA in the metasurface better, the harmonics
|
320 |
+
|
321 |
+
4
|
322 |
+
of the reflected field when single beams illuminate the
|
323 |
+
surface separately are calculated. As shown in Fig. 2(b),
|
324 |
+
the complex amplitudes of every scattered harmonic are
|
325 |
+
equal and 180◦ out of phase (the phases are not shown
|
326 |
+
here) for 45◦ and 0◦ incidences, resulting in destructive
|
327 |
+
cancellation when the two beams illuminate simultane-
|
328 |
+
ously in phase. Here, the propagating harmonic of the
|
329 |
+
order n = 0 is defined at the specular direction of θ1 for
|
330 |
+
both incidences. By properly designing the metasurface
|
331 |
+
with the periodicity of D = λ0/(sin θ1 − sin θ2), three
|
332 |
+
propagating modes corresponding to n = 0, −1, −2 are
|
333 |
+
created, and all the diffracted modes for both incidences
|
334 |
+
have the same wave vectors, ensuing coherent interfer-
|
335 |
+
ence for all corresponding harmonics. In the out-of-phase
|
336 |
+
incidence case, the amplitudes of all the scattered har-
|
337 |
+
monics double as compared to the single-beam case, as
|
338 |
+
shown in Fig. 2(b).
|
339 |
+
The analytical method to solve the surface impedance
|
340 |
+
boundaries used above is based on the objective to real-
|
341 |
+
ize CPA with the amplitudes of both scattered propagat-
|
342 |
+
ing and evanescent harmonics being zero when two co-
|
343 |
+
herent beams illuminate the metasurface simultaneously.
|
344 |
+
Indeed, the amplitudes of evanescent surface modes can
|
345 |
+
be nonzero without breaking the CPA condition, because
|
346 |
+
they do not radiate into the far zone and their power
|
347 |
+
will be dissipated at the lossy surface.
|
348 |
+
Thus, the so-
|
349 |
+
lution of the surface impedance to achieve CPA is not
|
350 |
+
unique if a certain set of evanescent waves with unknown
|
351 |
+
complex amplitudes is excited. In addition to CPA, we
|
352 |
+
invoke another functionality of coherent control of reflec-
|
353 |
+
tion with single direction, i.e. eliminating the unwanted
|
354 |
+
outgoing beams at n = −1, −2 orders and keeping the
|
355 |
+
n = 0 order with the maximal amplitude, when the two
|
356 |
+
coherent incident beams are out-of-phase. In this case,
|
357 |
+
finding the complex amplitudes of infinite numbers of
|
358 |
+
evanescent modes for each incidence scenario is difficult
|
359 |
+
or even impossible. Thus, instead of using the analyti-
|
360 |
+
cal method of calculating the surface impedance profile
|
361 |
+
according to the total fields on the boundary, we ap-
|
362 |
+
ply a mathematical optimization algorithm described in
|
363 |
+
Ref. [42] and based on the scattering matrix calculation
|
364 |
+
to find a surface impedance profile that simultaneously
|
365 |
+
ensures the coherent control capability for absorption and
|
366 |
+
reflection of the surface. First, the metasurface is mod-
|
367 |
+
elled as in Eq. (3). To suppress propagating modes at
|
368 |
+
the negative orders (n = −1, −2) and ensure that only
|
369 |
+
the reflection channel at 45◦ is open, the Fourier series of
|
370 |
+
the surface admittance Ys(x) are set to be unilateral as
|
371 |
+
Ys(x) = �4
|
372 |
+
n=0 gne−j2nπx/D with non-negative-order se-
|
373 |
+
ries coefficients being nonzero (only five coefficients from
|
374 |
+
g0 to g4 are used for improving optimization efficiency).
|
375 |
+
This setting is reasonable because the unilateral surface
|
376 |
+
admittance, making the admittance matrix Ys a lower
|
377 |
+
triangular matrix, can lead to the reflection matrix Γ
|
378 |
+
also being a lower triangular matrix, as is seen from
|
379 |
+
Eq. (4). Consequently, the scattered modes contain only
|
380 |
+
components of non-negative orders (n ≥ 0). This effect
|
381 |
+
highlights the role of unidirectional evanescent fields as
|
382 |
+
a mechanism of suppressing propagating modes at the
|
383 |
+
negative orders (n = −1, −2). Moreover, to ensure that
|
384 |
+
the grid is a passive metasurface, we need to impose con-
|
385 |
+
straints ℜ(Ys) ≥ 0, i.e., ℜ(g0) ≥ |g1| + |g2| + |g3| + |g4|.
|
386 |
+
Secondly, the optimization goal is formulated as 6 ob-
|
387 |
+
jectives, including (|A0|, |A−1|, |A−2|) = (0, 0, 0) for the
|
388 |
+
in-phase scenario, and (|A0|, |A−1|, |A−2|) = (A0max, 0, 0)
|
389 |
+
for the out-of-phase scenario, where A0max is the maxi-
|
390 |
+
mum magnitude of reflection in the out-of-phase case.
|
391 |
+
In each trial of the optimization, an array of gn is as-
|
392 |
+
sumed, and the value of all the objectives are calcu-
|
393 |
+
lated using Eq.(4). The sum of errors calculated for all
|
394 |
+
the objectives is defined as a cost function C. By em-
|
395 |
+
ploying MultiStart and fmincon optimization algorithms,
|
396 |
+
the maximum magnitude of the out-of-phase reflection
|
397 |
+
A0max = 0.34 is searched out, and the minimum value
|
398 |
+
of C close to zero is achieved, meaning that the solu-
|
399 |
+
tions of the impedance profile to realize the desired EM
|
400 |
+
responses including CPA and single-direction-reflection
|
401 |
+
are obtained.
|
402 |
+
Figure 2(c) shows a typical optimized solution of the
|
403 |
+
surface impedance, which exhibits positive resistance ev-
|
404 |
+
erywhere along the metasurface. The calculated ampli-
|
405 |
+
tudes of scattered harmonics for single-beam incidences
|
406 |
+
at 45◦ and 0◦, and for two-beam incidences in phase and
|
407 |
+
out of phase, for the impedance profile in Fig. 2(c), are
|
408 |
+
given in Fig. 2(d), revealing the unilateral characteristic
|
409 |
+
of scattering. We can see that the propagating compo-
|
410 |
+
nents at n = −1, −2 orders are suppressed successfully
|
411 |
+
by exciting the unidirectional evanescent wave. The only
|
412 |
+
remaining propagating reflected channel is n = 0 order
|
413 |
+
at the outgoing angle of 45◦. When two incoming beams
|
414 |
+
are in phase, the reflected propagating harmonic (n = 0)
|
415 |
+
of each beam cancel each other because they have the
|
416 |
+
same amplitude and π-reflection-phase difference. Dis-
|
417 |
+
tinct from the zero-amplitude of all the harmonics for the
|
418 |
+
in-phase CPA scenario in Fig. 2(b), the CPA in Fig. 2(d)
|
419 |
+
occurs with non-zero-amplitude evanescent modes in the
|
420 |
+
n ≥ 1 orders. The amplitude of reflected electric field
|
421 |
+
at 45◦ (n = 0) is doubled into A0max = 0.34 when two
|
422 |
+
incoming beams are out of phase (∆φ = π).
|
423 |
+
We can
|
424 |
+
conclude that the reflected power at 45◦ can be contin-
|
425 |
+
uously controlled by phase tuning of the control beam.
|
426 |
+
When the two beams are out of phase, the reflected power
|
427 |
+
normalized by the incident beam power at 45◦ has the
|
428 |
+
maximum reflection efficiency of 11.56 %.
|
429 |
+
III.
|
430 |
+
OPTIMIZATION AND PRACTICAL
|
431 |
+
DESIGN
|
432 |
+
Low efficiency of the above design based on the im-
|
433 |
+
penetrable impedance model calls for optimization with
|
434 |
+
the help of additional degrees of freedom. One possibility
|
435 |
+
can be the use of one or more parameters of the actual
|
436 |
+
implementation of the metasurface.
|
437 |
+
In general, the impedance surface in the impenetra-
|
438 |
+
ble model used above can be realized as a periodic metal
|
439 |
+
|
440 |
+
5
|
441 |
+
x
|
442 |
+
z
|
443 |
+
q1
|
444 |
+
D
|
445 |
+
h
|
446 |
+
I1
|
447 |
+
I2
|
448 |
+
n = 0
|
449 |
+
n = -1
|
450 |
+
n = -2
|
451 |
+
FIG. 3.
|
452 |
+
Schematics of reflection amplitude modulation for
|
453 |
+
two coherent waves with the phase difference ∆φ incident on a
|
454 |
+
periodic sheet over a grounded dielectric slab. The amplitude
|
455 |
+
of the output beam is modulated continuously by varying ∆φ,
|
456 |
+
and switched between 0 (coherent perfect absorption) and 1
|
457 |
+
(coherent maximum reflection) when ∆φ is switched between
|
458 |
+
even and odd multiples of π.
|
459 |
+
pattern on a thin grounded dielectric slab, as shown in
|
460 |
+
Fig. 3. The structure can be considered as a grid admit-
|
461 |
+
tance of the top pattern with a shunt admittance of the
|
462 |
+
grounded substrate. The characteristic admittance ma-
|
463 |
+
trix Yd of the grounded substrate contains only diagonal
|
464 |
+
terms Yd(n, n), where Yd(n, n) is the admittance of the
|
465 |
+
n-th harmonic, and it is expressed as
|
466 |
+
Yd(n, n) = kd
|
467 |
+
rzn/[jµ0ω0 tan(kd
|
468 |
+
rznh)],
|
469 |
+
(5)
|
470 |
+
where kd
|
471 |
+
rzn =
|
472 |
+
�
|
473 |
+
ω2
|
474 |
+
0ϵ0ϵdµ0 − k2rxn is the normal compo-
|
475 |
+
nent of the wavevector in the substrate (see Eq.S23 of
|
476 |
+
the Supplemental Material of [42]), ϵd and h are the
|
477 |
+
permittivity and thickness of the substrate, respectively.
|
478 |
+
The reflection matrix is calculated as Γ = (Y0 + Yg +
|
479 |
+
Yd)−1(Y0−Yg−Yd). When the thickness h is ultra-thin
|
480 |
+
compared with the wavelength, for low-order harmonics
|
481 |
+
we have tan(kd
|
482 |
+
rznh) ≈ kd
|
483 |
+
rznh. As is seen from Eq. (5),
|
484 |
+
the admittance for low-order harmonics equals approxi-
|
485 |
+
mately to 1/(jµ0ω0h), unrelated to the harmonic num-
|
486 |
+
ber. Thus, we can approximately design the top surface
|
487 |
+
with the grid admittance Yg(x) = 1/Zs(x) − Yd(0, 0) us-
|
488 |
+
ing the optimized surface impedance Zs(x) in Fig. 2(c),
|
489 |
+
similar to Ref. [41]. Due to the lack of freedom in the sub-
|
490 |
+
strate design, the evanescent fields engineering is quite
|
491 |
+
limited in the impenetrable model, resulting in a low
|
492 |
+
reflection efficiency (11.56 %) in the out-of-phase sce-
|
493 |
+
nario. In order to implement CPA with a high reflec-
|
494 |
+
tion efficiency, we need to use the substrate parameters
|
495 |
+
as additional degrees of freedom in the design. Since the
|
496 |
+
admittance of the grounded substrate with a moderate
|
497 |
+
thickness strongly depends on the harmonic number, the
|
498 |
+
need of complicated matrix operations makes it impos-
|
499 |
+
sible to analytically solve the grid impedance and sub-
|
500 |
+
strate parameters. Thus, the optimization algorithm is
|
501 |
+
extended by introducing the admittance matrix Yd of the
|
502 |
+
grounded substrate, as described in Ref. [42], to search
|
503 |
+
for an optimum solution for the grid impedance profile
|
504 |
+
and substrate thickness.
|
505 |
+
According to the results of the impenetrable model,
|
506 |
+
the period of the impedance sheet modulation is set
|
507 |
+
to D = λ0/ sin 45◦, with three propagating channels at
|
508 |
+
−45◦, 0◦, and 45◦. The Fourier series of the grid admit-
|
509 |
+
tance is set to be unilateral as Yg(x) = g0 + g1e−j2πx/D,
|
510 |
+
ensuring that only the reflection channel at 45◦ is open.
|
511 |
+
In the optimization process, two Fourier terms g0 and
|
512 |
+
g1 with four unknowns (the real and imaginary parts)
|
513 |
+
are considered here to reduce complexity. The substrate
|
514 |
+
thickness h is another unknown, and an available sub-
|
515 |
+
strate with the permittivity ϵd = 5.8(1 − j0.002) is used.
|
516 |
+
The optimization goal is formulated as 6 objectives, the
|
517 |
+
same as the objectives in the impenetrable model above.
|
518 |
+
The constraints ℜ(Yg) ≥ 0, i.e., ℜ(g0) ≥ |g1| are imposed
|
519 |
+
to ensure the grid to be a passive metasurface. Addi-
|
520 |
+
tionally, to make the reactance easier to implement by
|
521 |
+
patterning a thin conductive surface, another constraint
|
522 |
+
ℑ(g0) ≥ |g1| is set to ensure that the surface reactance is
|
523 |
+
always capacitive at all points of the metasurface.
|
524 |
+
The maximum magnitude of reflection A0max in the
|
525 |
+
out-of-phase scenario is searched out to be about 1 in
|
526 |
+
the optimization, meaning that a reflection beam at
|
527 |
+
45◦ with amplitude equal to the incident beam I1 is
|
528 |
+
obtained [46].
|
529 |
+
It reveals that the invocation of sub-
|
530 |
+
strate design provides an important additional degree
|
531 |
+
of freedom in engineering auxiliary evanescent modes to
|
532 |
+
find a surface impedance that can realize the desired
|
533 |
+
optimum scattering properties for all incidence scenar-
|
534 |
+
ios.
|
535 |
+
The optimized Fourier coefficients of the grid ad-
|
536 |
+
mittance Yg(x) read g0 = (2.599 + 7.054j) × 10−3 and
|
537 |
+
g1 = (−0.807 + 2.463j) × 10−3. The optimal substrate
|
538 |
+
thickness is h = 0.2525λ0. The required grid impedance
|
539 |
+
which is passive and capacitive along the metasurface is
|
540 |
+
shown in Fig. 4(a).
|
541 |
+
Next, we analyse the scattered harmonics for the de-
|
542 |
+
signed impedance sheet on the metal-backed dielectric
|
543 |
+
substrate [see Fig. 4(b)]. The reflection coefficient of the
|
544 |
+
metasurface has the same magnitude of 0.5 at n = 0
|
545 |
+
order for 45◦ and 0◦ single-beam incidences, resulting
|
546 |
+
from destructive interference when these two beams are
|
547 |
+
in phase. For the out-of-phase scenario, the normalized
|
548 |
+
magnitude of the reflected field at n = 0 order (45◦) is
|
549 |
+
about unity, which means that the reflected power effi-
|
550 |
+
ciency reaches 100% (normalized by the incoming power
|
551 |
+
of the 45◦ beam). Parasitic reflections into other direc-
|
552 |
+
tions (n = −1, −2) are seen to be negligible, due to the
|
553 |
+
unilateral property of the admittance of the surface. The
|
554 |
+
evanescent harmonics are also unidirectional, but quite
|
555 |
+
weak with the magnitude of 0.008 at n = 1 order, and
|
556 |
+
they are absorbed by the lossy structure, ensuring a CPA
|
557 |
+
state. Figure 4(c) illustrates the phase-controlled modu-
|
558 |
+
lation of reflections at three propagating orders. The re-
|
559 |
+
flection coefficient at 45◦ can be continuously controlled
|
560 |
+
from 0 to 1 by phase tuning, with the other two par-
|
561 |
+
asitic reflections maintained very close to zero.
|
562 |
+
This
|
563 |
+
phase-sensitive modulation between CPA and coherent
|
564 |
+
|
565 |
+
6
|
566 |
+
0
|
567 |
+
0.2
|
568 |
+
0.4
|
569 |
+
0.6
|
570 |
+
0.8
|
571 |
+
1
|
572 |
+
-200
|
573 |
+
-100
|
574 |
+
0
|
575 |
+
100
|
576 |
+
(a)
|
577 |
+
-8
|
578 |
+
-6
|
579 |
+
-4
|
580 |
+
-2
|
581 |
+
0
|
582 |
+
2
|
583 |
+
4
|
584 |
+
6
|
585 |
+
8
|
586 |
+
0
|
587 |
+
0.5
|
588 |
+
1
|
589 |
+
(b)
|
590 |
+
0
|
591 |
+
1
|
592 |
+
2
|
593 |
+
( )
|
594 |
+
0
|
595 |
+
0.2
|
596 |
+
0.4
|
597 |
+
0.6
|
598 |
+
0.8
|
599 |
+
1
|
600 |
+
Amplitude (|An|/E0)
|
601 |
+
n=0
|
602 |
+
n=-1
|
603 |
+
n=-2
|
604 |
+
(c)
|
605 |
+
𝐸𝑠𝑐/𝐸0
|
606 |
+
1
|
607 |
+
0
|
608 |
+
-1
|
609 |
+
2
|
610 |
+
3
|
611 |
+
-2
|
612 |
+
-3
|
613 |
+
Df = 0
|
614 |
+
Df = p
|
615 |
+
(d)
|
616 |
+
FIG. 4.
|
617 |
+
(a) The optimized and discretized grid impedance distribution over one period. (b) Amplitudes of the scattered
|
618 |
+
harmonics when the optimized gradient metasurface is illuminated by a single beam at 45◦ and 0◦, and for two-beam in-phase
|
619 |
+
and out-of-phase illuminations, respectively. (c) The normalized amplitudes of three propagating harmonics (n = 0, −1, −2)
|
620 |
+
with a varying phase difference ∆φ between incidences at 45◦ and 0◦. (d) The scattered electric fields and power density flow
|
621 |
+
distributions for the metasurface modeled by the discretized grid impedance (step-wise approximation, 6 subcells per period)
|
622 |
+
on top of a grounded dielectric substrate. Two plane-wave incidences are in phase (left) and out of phase (right).
|
623 |
+
maximum reflection (CMR) without parasitic reflections
|
624 |
+
is important in light switching applications where a low-
|
625 |
+
return-loss characteristic is required. See the Supplemen-
|
626 |
+
tal Animation [43] for the switch of reflected beam by an
|
627 |
+
incident phase-controlled wave.
|
628 |
+
In implementations, the influence of discretization on
|
629 |
+
the metasurface performance is an important factor (see
|
630 |
+
detailed analysis of scattered harmonics versus the num-
|
631 |
+
ber of subcells in Ref. [43]).
|
632 |
+
We use six subcells over
|
633 |
+
a period and each discretized impedance value is set at
|
634 |
+
the central point of each subcell, as shown in Fig. 4(a).
|
635 |
+
The scattered fields from the ideal impedance sheet on
|
636 |
+
the metal-backed dielectric slab for both in-phase and
|
637 |
+
out-of-phase incidences are presented in Fig. 4(d), using
|
638 |
+
full-wave simulations in Comsol. The reflected field dis-
|
639 |
+
tribution confirms that the metasurface with six subcells
|
640 |
+
per period possesses the desired response: nearly per-
|
641 |
+
fect absorption with reflection amplitude of only 0.023
|
642 |
+
for two in-phase illuminations and nearly total reflection
|
643 |
+
at 45◦ for two out-of-phase illuminations, relative to the
|
644 |
+
intensity of the 45◦ incidence.
|
645 |
+
It is seen that the top
|
646 |
+
lossy sheet and reflective ground separated by the slab
|
647 |
+
act as a leaky-wave cavity with enhanced fields. For the
|
648 |
+
in-phase scenario, the direct reflections of the top surface
|
649 |
+
and leaky wave components of the cavity destructively
|
650 |
+
cancel out, and all the power is absorbed by the lossy
|
651 |
+
surface, causing CPA. By changing the initial phase dif-
|
652 |
+
ference between the two coherent incidences into π, con-
|
653 |
+
structive interference occurs among these components,
|
654 |
+
which results in nearly total reflection. Note that in the
|
655 |
+
out-of-phase case a half of the total incoming power (two
|
656 |
+
incident beams) is still absorbed by the lossy surface.
|
657 |
+
IV.
|
658 |
+
PHYSICAL IMPLEMENTATION AND
|
659 |
+
EXPERIMENTAL VALIDATION
|
660 |
+
The theory above is general and applies to any fre-
|
661 |
+
quency, and we choose the microwave band for a proof
|
662 |
+
of concept demonstration. The required impedance pro-
|
663 |
+
file at 15.22 GHz is realized using an ITO film with the
|
664 |
+
surface resistance of 5.5 Ω/sq supported by a grounded
|
665 |
+
|
666 |
+
7
|
667 |
+
f (GHz)
|
668 |
+
12
|
669 |
+
13
|
670 |
+
14
|
671 |
+
15
|
672 |
+
16
|
673 |
+
17
|
674 |
+
18
|
675 |
+
E/ciency
|
676 |
+
0
|
677 |
+
0.2
|
678 |
+
0.4
|
679 |
+
0.6
|
680 |
+
0.8
|
681 |
+
1
|
682 |
+
90
|
683 |
+
9-1
|
684 |
+
9-2
|
685 |
+
9'0
|
686 |
+
9'-1
|
687 |
+
9'-2
|
688 |
+
Simulated
|
689 |
+
(a)
|
690 |
+
Transmitting
|
691 |
+
antenna
|
692 |
+
Receiving
|
693 |
+
antenna
|
694 |
+
Metasurface
|
695 |
+
Scanning track
|
696 |
+
(b)
|
697 |
+
3r (deg)
|
698 |
+
-80 -60 -40 -20
|
699 |
+
0
|
700 |
+
20 40 60 80
|
701 |
+
S21;m (dB)
|
702 |
+
-120
|
703 |
+
-110
|
704 |
+
-100
|
705 |
+
-90
|
706 |
+
-80
|
707 |
+
-70
|
708 |
+
-60
|
709 |
+
3i = 0o
|
710 |
+
3i = 45o
|
711 |
+
(c)
|
712 |
+
f (GHz)
|
713 |
+
12
|
714 |
+
13
|
715 |
+
14
|
716 |
+
15
|
717 |
+
16
|
718 |
+
17
|
719 |
+
18
|
720 |
+
E/ciency
|
721 |
+
0
|
722 |
+
0.2
|
723 |
+
0.4
|
724 |
+
0.6
|
725 |
+
0.8
|
726 |
+
1
|
727 |
+
90
|
728 |
+
9-1
|
729 |
+
9'0
|
730 |
+
Measured
|
731 |
+
(d)
|
732 |
+
FIG. 5.
|
733 |
+
(a) Simulated and (d) measured reflection efficiency spectrum for different diffracted modes of each single beam at
|
734 |
+
0◦ (solid lines) and 45◦ (dashed lines). (b) Schematic of the experimental setup (top) and photograph of the fabricated sample
|
735 |
+
(bottom). (c) Signals at 15.22 GHz measured by the receiving antenna at different orientation angles with the transmitting
|
736 |
+
antenna at 0◦ and 45◦.
|
737 |
+
dielectric slab with the thickness h = 4.95 mm, as
|
738 |
+
shown in Fig. 3.
|
739 |
+
The detailed parameters and struc-
|
740 |
+
tures of each unit cell are presented in the Supplementary
|
741 |
+
Material[43]. Due to the resolution limitation of picosec-
|
742 |
+
ond laser micro-processing, the complex grid impedance
|
743 |
+
is implemented as six subcells, and each subcell is divided
|
744 |
+
into four equal sub-subcells in order to make the local
|
745 |
+
design of the gradient impedance more robust. By struc-
|
746 |
+
turing the homogeneous resistive ITO film into I-shaped
|
747 |
+
cells, the required grid resistance and reactance on a sur-
|
748 |
+
face in Fig. 4(a) can be created. For y-polarization inci-
|
749 |
+
dent waves, such I-shaped resonators can be modeled as
|
750 |
+
RLC series circuits. The required resistance is realized by
|
751 |
+
tailoring the width and length of the ITO strips. Smaller
|
752 |
+
width and longer length result in higher grid resistance.
|
753 |
+
The required reactance can be tailored by adjusting ca-
|
754 |
+
pacitance of the gap, which can be increased by narrow-
|
755 |
+
ing the gap or increasing the length or width of the bar,
|
756 |
+
with a small influence on the resistive part. The 5th and
|
757 |
+
6th subcells degenerate into strips, to implement resistive
|
758 |
+
parts as close to the theoretical value as possible. How-
|
759 |
+
ever, there are still deviations of 3.6 Ω and 1.1 Ω from
|
760 |
+
the theoretical resistances of the 5th and 6th subcells,
|
761 |
+
respectively. The deviation can be eliminated if an ITO
|
762 |
+
film with a lower surface resistance is utilized. To sim-
|
763 |
+
plify the fabrication process, we neglect this deviation.
|
764 |
+
The impact is analyzed theoretically, showing that the
|
765 |
+
reflection amplitude in the in-phase scenario increases
|
766 |
+
from 0.023 to 0.065, which is tolerable in experiments.
|
767 |
+
Since the two beams with 0◦ and 45◦ incidence angles
|
768 |
+
illuminate the surface simultaneously, all the elements
|
769 |
+
should have angle-independent surface impedances. The
|
770 |
+
|
771 |
+
OLMS
|
772 |
+
EILMS
|
773 |
+
SWAi
|
774 |
+
CILAS
|
775 |
+
S-iD
|
776 |
+
SWAiN
|
777 |
+
SWiM
|
778 |
+
SWi8
|
779 |
+
I-shaped resonators have angle-insensitive impedance un-
|
780 |
+
der TE incidences, satisfying this requirement [47]. In the
|
781 |
+
strips of the 5th and 6th subcells, narrow slits are cut out
|
782 |
+
to reduce the angular sensitivity of the impedance. All
|
783 |
+
the subcells have been optimized with the geometrical
|
784 |
+
dimensions specified in Ref. [43].
|
785 |
+
Figure 5(a) shows the simulated frequency response of
|
786 |
+
the metasurface for the normal and 45◦ incidences. For
|
787 |
+
the normal illumination, strong reflections occur at n =
|
788 |
+
−1 and n = 0 harmonics (denoted as ξ−1 and ξ0), and the
|
789 |
+
amplitude of the n = −2 scattered propagating mode is
|
790 |
+
nearly zero in the whole frequency band. The reflection
|
791 |
+
at the n = −1 mode (specular reflection at 0◦) also has a
|
792 |
+
near-zero dip at the design frequency of 15.22 GHz, and
|
793 |
+
the reflection efficiency at the n = 0 mode(anomalous re-
|
794 |
+
flection at 0◦) is about 13.9% (the relative amplitude is
|
795 |
+
0.44). Note that for anomalous reflection, the efficiency
|
796 |
+
is calculated as ξ = (Er/Ei)2cos θr/cos θi [37]. For the
|
797 |
+
45◦ illumination, the reflections at both n = −1 and
|
798 |
+
n = −2 modes (ξ′
|
799 |
+
−1 and ξ′
|
800 |
+
−2) are close to zero, and
|
801 |
+
the efficiency at the n = 0 mode (ξ′
|
802 |
+
0) is about 21% at
|
803 |
+
15.22 GHz (the relative amplitude is 0.46). Therefore, at
|
804 |
+
the operating frequency 15.22 GHz, the reflected modes
|
805 |
+
for both incidences at the outgoing angle of 45◦ are al-
|
806 |
+
most equal-amplitude, satisfying the condition of CPA.
|
807 |
+
The scattered electric field distributions of the designed
|
808 |
+
metasurface illuminated by two beams in the in-phase
|
809 |
+
and out-of-phase scenarios obtained from full-wave sim-
|
810 |
+
ulations are presented in Ref. [43]. It can be seen that
|
811 |
+
when the two illuminations are in phase, the total scat-
|
812 |
+
tered fields are quite small (0.02), indicating nearly per-
|
813 |
+
fect coherent absorption. However, when the two illumi-
|
814 |
+
nations are switched into the out-of-phase state, the rel-
|
815 |
+
ative amplitude of the scattered fields is about 0.91, and
|
816 |
+
the coherent maximum reflection is mainly along the 45◦
|
817 |
+
direction.
|
818 |
+
We have fabricated a sample (see Methods) and car-
|
819 |
+
ried out several experiments to validate the theoretical
|
820 |
+
results (see Fig. 5(b)). First, the transmitting antenna
|
821 |
+
is fixed at 0◦, whereas the receiving antenna is moved
|
822 |
+
along the scanning track with a step of 2.5◦. The signal
|
823 |
+
reflected from the metasurface is measured by the receiv-
|
824 |
+
ing antenna at different angles θr. Then, the transmitting
|
825 |
+
antenna is fixed at 45◦ and the receiving antenna is scan-
|
826 |
+
ning its position to measure the reflected signal in the
|
827 |
+
other half space. As shown in Fig. 5(c), the main peaks
|
828 |
+
of reflections for both two incidences occur at θr = 45◦,
|
829 |
+
which is an expected result according to the theory and
|
830 |
+
simulations. There is another reflection peak at θr = 0◦
|
831 |
+
for the normal incidence case, which is about −10 dB
|
832 |
+
lower than the main peak, corresponding to a low spec-
|
833 |
+
ular reflection at 15.22 GHz.
|
834 |
+
To estimate the amplitude efficiency of the metasurface
|
835 |
+
at all three reflection channels, we replaced the metasur-
|
836 |
+
face by a copper plate of the identical size and measured
|
837 |
+
the specular reflection signal amplitudes from the refer-
|
838 |
+
ence uniform metal mirror for θi = 2.5◦ (approximately
|
839 |
+
normal incidence), 22.5◦, and 45◦ incidence angles. The
|
840 |
+
specular reflection efficiency of the metasurface for 0◦ and
|
841 |
+
45◦ illuminations are calculated by normalizing the signal
|
842 |
+
amplitude by the amplitude of the signal reflected from
|
843 |
+
the reference plate, illuminated at 2.5◦ and 45◦ angles, re-
|
844 |
+
spectively. As shown in Fig. 5(d), at the design frequency
|
845 |
+
of 15.22 GHz, the specular reflection efficiencies at 0◦ and
|
846 |
+
45◦ (ξ−1 and ξ′
|
847 |
+
0) equal 0.8% and 18.6% (the relative am-
|
848 |
+
plitude is 0.431), respectively. For the anomalous reflec-
|
849 |
+
tion at the n = 0 mode for the normal incidence, the re-
|
850 |
+
flection angle is θr = arcsin(15.22/(
|
851 |
+
√
|
852 |
+
2f)), which equals
|
853 |
+
45◦ at 15.22 GHz and varies from 63.7◦ to 36.7◦ as the
|
854 |
+
frequency changes from 12 GHz to 18 GHz. Therefore,
|
855 |
+
we choose the signal data of a different receiving angle θr
|
856 |
+
calculated according to different frequency band and nor-
|
857 |
+
malize its signal amplitude by the signal amplitude from
|
858 |
+
the reference mirror for different θr/2 incidence angles.
|
859 |
+
Additionally, we divide the obtained value by an esti-
|
860 |
+
mated correction factor [37]
|
861 |
+
�
|
862 |
+
cos(θr)/ cos(θr/2), which
|
863 |
+
gives the ratio between the theoretically calculated sig-
|
864 |
+
nal amplitudes from an ideal metasurface (of the same
|
865 |
+
size and made of lossless materials) and a perfectly con-
|
866 |
+
ducting plate.
|
867 |
+
At the design frequency of 15.22 GHz,
|
868 |
+
the correction factor is equal to 0.91, thus the reflection
|
869 |
+
efficiency is calculated as 12%(the relative amplitude is
|
870 |
+
0.412), as shown in Fig. 5(d). The measured efficiency is
|
871 |
+
in good agreement with the results obtained using numer-
|
872 |
+
ical simulations (see Fig. 5(a)), except for some ripples in
|
873 |
+
the ξ0 curve caused by the discrete angular scanning step
|
874 |
+
in the measurement. The relative amplitudes of reflec-
|
875 |
+
tions for both incidences at the n = 0 mode are almost
|
876 |
+
equal in the measurements, verifying the capability for
|
877 |
+
CPA.
|
878 |
+
To experimentally verify the phase-controlled reflec-
|
879 |
+
tion by the metasurface, in the last measurement shown
|
880 |
+
in Fig. 6(a), two transmitting antennas fed via a power
|
881 |
+
divider illuminate the metasurface normally and at 45◦.
|
882 |
+
A receiving antenna is placed at the 45◦ angle to mea-
|
883 |
+
sure the total power reflected by the metasurface under
|
884 |
+
two simultaneous illuminations. To avoid severe insertion
|
885 |
+
loss caused by the use of a phase shifter in one branch,
|
886 |
+
which may increase the amplitude inequality between two
|
887 |
+
beams, we mimic the phase-difference-tuning process by
|
888 |
+
moving the metasurface along the x direction. As seen in
|
889 |
+
Fig. 6(b), the phase difference between the two beams is
|
890 |
+
linearly varying when we change the horizontal position
|
891 |
+
of the metasurface. Therefore, this shift is equivalent to
|
892 |
+
a phase change between the two beams. To ensure the
|
893 |
+
effectively-illuminated area of the metasurface to remain
|
894 |
+
stable during the moving process, we put two pieces of ab-
|
895 |
+
sorbing foam on top of both sides of the sample. The to-
|
896 |
+
tal received power, normalized by the maximum power of
|
897 |
+
reflected wave is changing with varying the distance ∆x.
|
898 |
+
As is seen in Fig. 6(c), the modulation depths reach 0.15
|
899 |
+
and 0.04 at 15.22 GHz and 15.47 GHz, respectively. This
|
900 |
+
result indicates that coherent enhancement and cancella-
|
901 |
+
tion near the design frequency can be achieved by tuning
|
902 |
+
the phase difference of the two incident beams. The pe-
|
903 |
+
riod of the modulation is about 29 mm, almost equal to
|
904 |
+
|
905 |
+
9
|
906 |
+
Receiving
|
907 |
+
antenna
|
908 |
+
Transmitting
|
909 |
+
antenna 1
|
910 |
+
Transmitting
|
911 |
+
antenna 2
|
912 |
+
Moving direction
|
913 |
+
Absorbing
|
914 |
+
foam
|
915 |
+
Absorbing
|
916 |
+
foam
|
917 |
+
|
918 |
+
(a)
|
919 |
+
∆∅ = 2𝜋∆𝑥/𝐷
|
920 |
+
𝜃
|
921 |
+
∆∅
|
922 |
+
O’
|
923 |
+
O
|
924 |
+
(b)
|
925 |
+
"x (mm)
|
926 |
+
0
|
927 |
+
10
|
928 |
+
20
|
929 |
+
30
|
930 |
+
40
|
931 |
+
Normalized Recieved Power
|
932 |
+
0
|
933 |
+
0.2
|
934 |
+
0.4
|
935 |
+
0.6
|
936 |
+
0.8
|
937 |
+
1
|
938 |
+
13GHz
|
939 |
+
15.22GHz
|
940 |
+
15.47GHz
|
941 |
+
17GHz
|
942 |
+
(c)
|
943 |
+
FIG. 6.
|
944 |
+
(a) Experimental setup. Two transmitting antennas fed via a power divider illuminate the metasurface normally and
|
945 |
+
at 45◦. A receiving antenna is placed at 45◦ to measure the total reflected power. Due to the periodicity of the metasurface,
|
946 |
+
continuously-changing phase difference between the two beams can be emulated by moving the metasurface horizontally along
|
947 |
+
the impedance variation direction. Two pieces of absorbing foam are put on both sides, ensuring that the effective exposure
|
948 |
+
area of the metasurface remains fixed when the surface is shifted. (b) The reference point O is the intersection point of the 0◦
|
949 |
+
and 45◦ beams on the metasurface when the phase difference is 0. The phase difference at a distance ∆x from the reference
|
950 |
+
point O is ∆φ = 2π∆x/D, which is linearly varying as a function of the horizontal distance ∆x. (c) The normalized received
|
951 |
+
power for different metasurface positions at 13, 15.22, 15.47, and 17 GHz.
|
952 |
+
the period of the metasurface, which validates the theo-
|
953 |
+
retical analysis. However, at the frequency far from the
|
954 |
+
designed one, for instance at 13 GHz and 17 GHz, the
|
955 |
+
coherent phenomenon becomes much weaker, as is seen
|
956 |
+
in Fig. 6(c), due to a mismatch of the main reflection
|
957 |
+
angles and the reflection amplitudes of the normally and
|
958 |
+
obliquely incident waves.
|
959 |
+
V.
|
960 |
+
DISCUSSION
|
961 |
+
We have demonstrated coherent perfect absorption of
|
962 |
+
two beams incident at arbitrary angles. It has been found
|
963 |
+
that this effect is possible for relative beam amplitudes
|
964 |
+
within a certain range using a gradient passive planar
|
965 |
+
structures. When these two incidences change into out-
|
966 |
+
of-phase state, reflections at all three propagating chan-
|
967 |
+
nels come out. To realize coherent control of reflection
|
968 |
+
with single direction, the other parasitic reflections can
|
969 |
+
be suppressed by introducing unidirectional evanescent
|
970 |
+
modes excitation. To realize a larger reflection for out-
|
971 |
+
of-phase scenario, we use an optimization algorithm to
|
972 |
+
search for an optimum solution of grid impedance profile
|
973 |
+
and substrate thickness, which is powerful when many
|
974 |
+
degrees of freedom are required in multi-channel meta-
|
975 |
+
surface design. In the other design methodologies such as
|
976 |
+
non-local metasurface [37] and plasmonic grating [23, 48],
|
977 |
+
where the interference between all the elements of a unit
|
978 |
+
cell are important for the device performance, a brute-
|
979 |
+
force optimization process in full-wave simulations is re-
|
980 |
+
quired, which is time consuming and even cannot work
|
981 |
+
when multiple input beams and multi-functionalities for
|
982 |
+
multiple channels are involved.
|
983 |
+
Compared with them,
|
984 |
+
our approach is much more robust and efficient due to
|
985 |
+
a rigorous theoretical analysis, particularly by introduc-
|
986 |
+
ing unidirectional evanescent mode in the scattered field
|
987 |
+
to eliminate parasitic reflections. Moreover, the angle-
|
988 |
+
dependence of the impedance of substrate is also con-
|
989 |
+
sidered in our algorithm, which is vital in metasurface
|
990 |
+
design for multiple-angle incidence scenarios [49, 50].
|
991 |
+
We have realized a gradient metasurface with angular-
|
992 |
+
asymmetric coherent perfect absorption and reflection
|
993 |
+
functionalities. The concept of wave control via evanes-
|
994 |
+
cent harmonics engineering and independent control of
|
995 |
+
the electromagnetic response for multiple illuminations
|
996 |
+
can be applied for engineering multi-functional wave pro-
|
997 |
+
cesses. Metasurface-based designs are attractive in prac-
|
998 |
+
tical applications.
|
999 |
+
For example, by placing a planar
|
1000 |
+
structure on a metal-grounded dielectric layer, the veloc-
|
1001 |
+
ity or position of the object can be detected by monitor-
|
1002 |
+
|
1003 |
+
10
|
1004 |
+
ing the total reflection of such a object under two coher-
|
1005 |
+
ent illuminations. Additionally, we hope that this work
|
1006 |
+
can find promising applications in phased-array anten-
|
1007 |
+
nas, one-side detection and sensing, and optical switches
|
1008 |
+
with low insertion loss.
|
1009 |
+
VI.
|
1010 |
+
METHODS
|
1011 |
+
Design and modeling of the metasurface
|
1012 |
+
The prototype presented in this work was designed
|
1013 |
+
for operation at 15.22 GHz. The grid impedance is dis-
|
1014 |
+
cretized into 6 sub-cells, and each sub-cell is divided into
|
1015 |
+
4 equal sub-sub-cells.
|
1016 |
+
The effective grid impedance of
|
1017 |
+
each sub-sub-cell is retrieved from simulated reflection
|
1018 |
+
coefficient (S11) through the transmission-line method
|
1019 |
+
approach (see the Supplementary Material[43]). Numeri-
|
1020 |
+
cal simulations are carried out using a frequency-domain
|
1021 |
+
solver, implemented by CST MWS. Excitations propa-
|
1022 |
+
gating along the z-direction from port 1 with the electric
|
1023 |
+
field along the y-direction and the magnetic field along
|
1024 |
+
the x-direction are used in the simulations to obtain the
|
1025 |
+
S11 parameter. The dimensions of all the elements in the
|
1026 |
+
unit cells are designed and optimized one by one to fit
|
1027 |
+
the theoretically found required surface impedance.
|
1028 |
+
Once the dimensions of all the elements in the unit
|
1029 |
+
cells are found, we perform numerical simulations of the
|
1030 |
+
unit cell in CST MWS for the normal and 45◦ incidences.
|
1031 |
+
The simulation domain of the complete unit cell was D×
|
1032 |
+
Dy × D (along the x, y, and z directions), the unit cell
|
1033 |
+
boundary condition and the Floquet port were set. The
|
1034 |
+
scattered fields for the normal and 45◦ incidences were
|
1035 |
+
calculated by subtracting the incident waves from the
|
1036 |
+
total fields. Finally, the total scattered fields when the
|
1037 |
+
metasurface is illuminated by two waves silmutaneously
|
1038 |
+
were obtained by adding the scattered field of each single
|
1039 |
+
beam with different phase differences.
|
1040 |
+
Realization and measurement
|
1041 |
+
The ITO pattern of the metasurface was manufactured
|
1042 |
+
using the picosecond laser micromachining technology on
|
1043 |
+
a 0.175-mm-thick ITO/PET film. The sample comprises
|
1044 |
+
10 unit cells along the x axis and 66 unit cells along the
|
1045 |
+
y axis [Fig. 5(b)] and has the size of 14.15λ × 10.04λ =
|
1046 |
+
278.9 mm × 198 mm. The ITO/PET film was adhered
|
1047 |
+
to a 4.95-mm-thick F4BTM substrate with ϵ = 5.8(1 −
|
1048 |
+
j0.01) backed by a copper ground plane.
|
1049 |
+
The operation of the designed metasurface was tested
|
1050 |
+
using a NRL-arc setup [Fig.
|
1051 |
+
5(b)]. In the experiment,
|
1052 |
+
two double-ridged horn antennas with 17 dBi gain at
|
1053 |
+
15.22 GHz are connected to a vector network analyzer
|
1054 |
+
as the transmitter and receiver. The metasurface was lo-
|
1055 |
+
cated at a distance of 2 m (about 101λ) from both the
|
1056 |
+
transmitting and receiving antennas where the radiation
|
1057 |
+
from the antenna can be approximated as a plane wave.
|
1058 |
+
The antennas are moved along the scanning track to mea-
|
1059 |
+
sure the reflection towards different angles. Time gating
|
1060 |
+
is employed to filter out all the multiple scattering noise
|
1061 |
+
signals received by the antenna [43].
|
1062 |
+
VII.
|
1063 |
+
DATA AVAILABILITY
|
1064 |
+
The data that support the findings of this study are
|
1065 |
+
available from the corresponding authors upon reason-
|
1066 |
+
able request.
|
1067 |
+
[1] Fu, Y. et al. All-optical logic gates based on nanoscale
|
1068 |
+
plasmonic slot waveguides.
|
1069 |
+
Nano Lett. 12, 5784–5790
|
1070 |
+
(2012).
|
1071 |
+
[2] Fang, X. et al. Ultrafast all-optical switching via coher-
|
1072 |
+
ent modulation of metamaterial absorption. Appl. Phys.
|
1073 |
+
Lett. 104, 141102 (2014).
|
1074 |
+
[3] Shi, J. et al. Coherent control of snell’s law at metasur-
|
1075 |
+
faces. Opt. Express 22, 21051–21060 (2014).
|
1076 |
+
[4] Papaioannou, M., Plum, E., Valente, J., Rogers, E. T.
|
1077 |
+
& Zheludev, N. I. Two-dimensional control of light with
|
1078 |
+
light on metasurfaces. Light: Sci. Appl. 5, e16070 (2016).
|
1079 |
+
[5] Papaioannou, M., Plum, E., Valente, J., Rogers, E. T. &
|
1080 |
+
Zheludev, N. I. All-optical multichannel logic based on
|
1081 |
+
coherent perfect absorption in a plasmonic metamaterial.
|
1082 |
+
APL Photonics 1, 090801 (2016).
|
1083 |
+
[6] Fang, X., MacDonald, K. F. & Zheludev, N. I.
|
1084 |
+
Con-
|
1085 |
+
trolling light with light using coherent metadevices: all-
|
1086 |
+
optical transistor, summator and invertor.
|
1087 |
+
Light: Sci.
|
1088 |
+
Appl. 4, e292–e292 (2015).
|
1089 |
+
[7] Silva, A. et al. Performing mathematical operations with
|
1090 |
+
metamaterials. Science 343, 160–163 (2014).
|
1091 |
+
[8] Achouri, K., Lavigne, G., Salem, M. A. & Caloz, C.
|
1092 |
+
Metasurface spatial processor for electromagnetic remote
|
1093 |
+
control. IEEE Trans. Antennas Propag. 64, 1759–1767
|
1094 |
+
(2016).
|
1095 |
+
[9] Zhu, Z., Yuan, J. & Jiang, L. Multifunctional and mul-
|
1096 |
+
tichannel all-optical logic gates based on the in-plane co-
|
1097 |
+
herent control of localized surface plasmons. Opt. Lett.
|
1098 |
+
45, 6362–6365 (2020).
|
1099 |
+
[10] Kang, M. et al. Coherent full polarization control based
|
1100 |
+
on bound states in the continuum. Nat. Commun. 13,
|
1101 |
+
1–9 (2022).
|
1102 |
+
[11] Peng, P. et al.
|
1103 |
+
Coherent control of ultrafast extreme
|
1104 |
+
ultraviolet transient absorption. Nat. Photonics 16, 45–
|
1105 |
+
51 (2022).
|
1106 |
+
[12] Chong, Y. D., Ge, L., Cao, H. & Stone, A. D. Coherent
|
1107 |
+
perfect absorbers: Time-reversed lasers. Phys. Rev. Lett.
|
1108 |
+
105, 053901 (2010).
|
1109 |
+
[13] Wan, W., Chong, Y., Li Ge, H. N., Stone, A. D. & Cao,
|
1110 |
+
H. Time-reversed lasing and interferometric control of
|
1111 |
+
absorption. Science 331, 889–892 (2011).
|
1112 |
+
[14] Dutta-Gupta, S., Deshmukh, R., Gopal, A. V., Martin,
|
1113 |
+
O. J. F. & Gupta, S. D.
|
1114 |
+
Coherent perfect absorption
|
1115 |
+
mediated anomalous reflection and refraction. Opt. Lett.
|
1116 |
+
37, 4452–4454 (2012).
|
1117 |
+
[15] Baranov, D. G., Krasnok, A., Shegai, T., Al`u, A. &
|
1118 |
+
Chong, Y.
|
1119 |
+
Coherent perfect absorbers: linear control
|
1120 |
+
of light with light. Nat. Rev. Mater. 2, 17064 (2017).
|
1121 |
+
|
1122 |
+
11
|
1123 |
+
[16] Pirruccio, G., Ramezani, M., Rodriguez, S. R.-K. & Ri-
|
1124 |
+
vas, J. G. Coherent control of the optical absorption in
|
1125 |
+
a plasmonic lattice coupled to a luminescent layer. Phys.
|
1126 |
+
Rev. Lett. 116, 103002 (2016).
|
1127 |
+
[17] Jung, M. J., Han, C., Yoon, J. W. & Song, S. H. Tem-
|
1128 |
+
perature and gain tuning of plasmonic coherent perfect
|
1129 |
+
absorbers. Opt. Express 23, 19837–19845 (2015).
|
1130 |
+
[18] Yoon, J. W., Jung, M. J. & Song, S. H. Gain-assisted
|
1131 |
+
critical coupling for high-performance coherent perfect
|
1132 |
+
absorbers. Opt. Lett. 40, 2309–2312 (2015).
|
1133 |
+
[19] Kita, S. et al. Coherent control of high efficiency metasur-
|
1134 |
+
face beam deflectors with a back partial reflector. APL
|
1135 |
+
Photonics 2, 046104 (2017).
|
1136 |
+
[20] Xomalis, A. et al. Fibre-optic metadevice for all-optical
|
1137 |
+
signal modulation based on coherent absorption.
|
1138 |
+
Nat.
|
1139 |
+
Commun. 9, 182 (2018).
|
1140 |
+
[21] Wang, C., Sweeney, W. R., Stone, A. D. & Yang, L. Co-
|
1141 |
+
herent perfect absorption at an exceptional point. Science
|
1142 |
+
373, 1261–1265 (2021).
|
1143 |
+
[22] Li, S. et al. Broadband perfect absorption of ultrathin
|
1144 |
+
conductive films with coherent illumination: Superab-
|
1145 |
+
sorption of microwave radiation. Phys. Rev. B 91, 220301
|
1146 |
+
(2015).
|
1147 |
+
[23] Yoon, J. W., Koh, G. M., Song, S. H. & Magnusson,
|
1148 |
+
R.
|
1149 |
+
Measurement and modeling of a complete optical
|
1150 |
+
absorption and scattering by coherent surface plasmon-
|
1151 |
+
polariton excitation using a silver thin-film grating. Phys.
|
1152 |
+
Rev. Lett. 109, 257402 (2012).
|
1153 |
+
[24] Zhang, W. & Zhang, X. Backscattering-immune comput-
|
1154 |
+
ing of spatial differentiation by nonreciprocal plasmonics.
|
1155 |
+
Phys. Rev. Applied 11, 054033 (2019).
|
1156 |
+
[25] Yu, N. et al. Light propagation with phase discontinu-
|
1157 |
+
ities: generalized laws of reflection and refraction. science
|
1158 |
+
334, 333–337 (2011).
|
1159 |
+
[26] Sun, S. et al. Gradient-index meta-surfaces as a bridge
|
1160 |
+
linking propagating waves and surface waves. Nat. Mater.
|
1161 |
+
11, 426–431 (2012).
|
1162 |
+
[27] Kildishev, A. V., Boltasseva, A. & Shalaev, V. M. Pla-
|
1163 |
+
nar photonics with metasurfaces. Science 339, 1232009
|
1164 |
+
(2013).
|
1165 |
+
[28] Epstein, A. & Eleftheriades, G. V. Synthesis of passive
|
1166 |
+
lossless metasurfaces using auxiliary fields for reflection-
|
1167 |
+
less beam splitting and perfect reflection. Physical review
|
1168 |
+
letters 117, 256103 (2016).
|
1169 |
+
[29] Ra’di, Y., Sounas, D. L. & Al`u, A. Metagratings: Beyond
|
1170 |
+
the limits of graded metasurfaces for wave front control.
|
1171 |
+
Phys. Rev. Lett. 119, 067404 (2017).
|
1172 |
+
[30] Epstein, A. & Rabinovich, O. Unveiling the properties of
|
1173 |
+
metagratings via a detailed analytical model for synthesis
|
1174 |
+
and analysis. Physical Review Applied 8, 054037 (2017).
|
1175 |
+
[31] Popov, V., Boust, F. & Burokur, S. N.
|
1176 |
+
Controlling
|
1177 |
+
diffraction patterns with metagratings. Physical Review
|
1178 |
+
Applied 10, 011002 (2018).
|
1179 |
+
[32] Wong, A. M. & Eleftheriades, G. V. Perfect anomalous
|
1180 |
+
reflection with a bipartite huygens’ metasurface. Physical
|
1181 |
+
Review X 8, 011036 (2018).
|
1182 |
+
[33] Cao, Y. et al.
|
1183 |
+
Mechanism behind angularly asymmet-
|
1184 |
+
ric diffraction in phase-gradient metasurfaces. Physical
|
1185 |
+
Review Applied 12, 024006 (2019).
|
1186 |
+
[34] Fu, Y. et al. Reversal of transmission and reflection based
|
1187 |
+
on acoustic metagratings with integer parity design. Na-
|
1188 |
+
ture Commun. 10, 1–8 (2019).
|
1189 |
+
[35] Zhang, Z. et al. Coherent perfect diffraction in metagrat-
|
1190 |
+
ings. Adv. Mater. 32, 2002341 (2020).
|
1191 |
+
[36] Sun, S. et al. High-efficiency broadband anomalous reflec-
|
1192 |
+
tion by gradient meta-surfaces. Nano Lett. 12, 6223–6229
|
1193 |
+
(2012).
|
1194 |
+
[37] D´ıaz-Rubio, A., Asadchy, V. S., Elsakka, A. & Tretyakov,
|
1195 |
+
S. A.
|
1196 |
+
From the generalized reflection law to the re-
|
1197 |
+
alization of perfect anomalous reflectors.
|
1198 |
+
Sci. Adv. 3,
|
1199 |
+
e1602714 (2017).
|
1200 |
+
[38] He, T. et al. Perfect anomalous reflectors at optical fre-
|
1201 |
+
quencies. Science advances 8, eabk3381 (2022).
|
1202 |
+
[39] Cuesta, F., Ptitcyn, G., Mirmoosa, M. & Tretyakov, S.
|
1203 |
+
Coherent retroreflective metasurfaces.
|
1204 |
+
Phys. Rev. Re-
|
1205 |
+
search 3, L032025 (2021).
|
1206 |
+
[40] Cuesta, F., Kuznetsov, A., Ptitcyn, G., Wang, X. &
|
1207 |
+
Tretyakov, S.
|
1208 |
+
Coherent asymmetric absorbers.
|
1209 |
+
Phys.
|
1210 |
+
Rev. Applied 17, 024066 (2022).
|
1211 |
+
[41] Wang, X. et al. Extreme asymmetry in metasurfaces via
|
1212 |
+
evanescent fields engineering: Angular-asymmetric ab-
|
1213 |
+
sorption. Phys. Rev. Lett. 121, 256802 (2018).
|
1214 |
+
[42] Wang, X., D´ıaz-Rubio, A. & Tretyakov, S. A. Indepen-
|
1215 |
+
dent control of multiple channels in metasurface devices.
|
1216 |
+
Phys. Rev. Applied 14, 024089 (2020).
|
1217 |
+
[43] See Supplemental Material for additional information.
|
1218 |
+
[44] Hwang, R.-B.
|
1219 |
+
Periodic structures: mode-matching ap-
|
1220 |
+
proach and applications in electromagnetic engineering
|
1221 |
+
(John Wiley & Sons, 2012).
|
1222 |
+
[45] Zhirihin, D., Simovski, C., Belov, P. & Glybovski, S.
|
1223 |
+
Mushroom high-impedance metasurfaces for perfect ab-
|
1224 |
+
sorption at two angles of incidence.
|
1225 |
+
IEEE Antennas
|
1226 |
+
Wireless Propag. Lett. 16, 2626–2629 (2017).
|
1227 |
+
[46] Wang, X., Asadchy, V. S., Fan, S. & Tretyakov, S. A.
|
1228 |
+
Space–time metasurfaces for power combining of waves.
|
1229 |
+
ACS Photonics 8, 3034–3041 (2021).
|
1230 |
+
[47] Luukkonen, O. et al.
|
1231 |
+
Simple and accurate analytical
|
1232 |
+
model of planar grids and high-impedance surfaces com-
|
1233 |
+
prising metal strips or patches. IEEE Trans. Antennas
|
1234 |
+
Propag. 56, 1624–1632 (2008).
|
1235 |
+
[48] Chen, X. et al. Broadband janus scattering from tilted
|
1236 |
+
dipolar metagratings. Laser Photonics Rev. 16, 2100369
|
1237 |
+
(2022).
|
1238 |
+
[49] Zhang, X. et al. Controlling angular dispersions in optical
|
1239 |
+
metasurfaces. Light Sci. Appl. 9, 1–12 (2020).
|
1240 |
+
[50] Yuan, Y., Cheng, J., Fan, F., Wang, X. & Chang, S. Con-
|
1241 |
+
trol of angular dispersion in dielectric gratings for mul-
|
1242 |
+
tifunctional wavefront shaping and dynamic polarization
|
1243 |
+
conversion. Photonics Res. 9, 2190–2195 (2021).
|
1244 |
+
VIII.
|
1245 |
+
ACKNOWLEDGEMENTS
|
1246 |
+
The authors are grateful to Dr. Viktar S. Asadchy for
|
1247 |
+
useful discussions.
|
1248 |
+
S.M.Z. acknowledges support from
|
1249 |
+
China Scholarship Council. This research was also sup-
|
1250 |
+
ported by the Natural Science Foundation of Zhejiang
|
1251 |
+
Province(LY22F010001), the Natural Science Founda-
|
1252 |
+
tion of China (61701268), and the Fundamental Research
|
1253 |
+
Funds for the Provincial Universities of Zhejiang.
|
1254 |
+
IX.
|
1255 |
+
AUTHOR CONTRIBUTIONS
|
1256 |
+
S.M.Z. and X.C.W. conceived the study. S.M.Z. per-
|
1257 |
+
formed the numerical calculations, and designed the sam-
|
1258 |
+
|
1259 |
+
12
|
1260 |
+
ples. S.M.Z. conducted the experiment. S.M.Z., X.C.W.,
|
1261 |
+
and S.A.T. wrote the paper.
|
1262 |
+
S.A.T. supervised the
|
1263 |
+
project. All authors contributed to scientific discussions
|
1264 |
+
and editing the manuscript.
|
1265 |
+
X.
|
1266 |
+
COMPETING INTERESTS
|
1267 |
+
The authors declare no competing interests.
|
1268 |
+
XI.
|
1269 |
+
ADDITIONAL INFORMATION
|
1270 |
+
Supplementary information The online version
|
1271 |
+
contains supplementary material available at https:xxxx.
|
1272 |
+
Correspondence and requests for materials should
|
1273 |
+
be addressed to Shuomin Zhong or Xuchen Wang.
|
1274 |
+
|
69E1T4oBgHgl3EQfBgJT/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
6NAyT4oBgHgl3EQfpfik/content/tmp_files/2301.00527v1.pdf.txt
ADDED
@@ -0,0 +1,775 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Diffusion Probabilistic Models for Scene-Scale 3D Categorical Data
|
2 |
+
Jumin Lee
|
3 |
+
Woobin Im
|
4 |
+
Sebin Lee
|
5 |
+
Sung-Eui Yoon
|
6 |
+
Korea Advanced Institute of Science and Technology (KAIST)
|
7 |
+
{jmlee,iwbn,seb.lee,sungeui}@kaist.ac.kr
|
8 |
+
Abstract
|
9 |
+
In this paper, we learn a diffusion model to generate
|
10 |
+
3D data on a scene-scale. Specifically, our model crafts a
|
11 |
+
3D scene consisting of multiple objects, while recent diffu-
|
12 |
+
sion research has focused on a single object. To realize our
|
13 |
+
goal, we represent a scene with discrete class labels, i.e.,
|
14 |
+
categorical distribution, to assign multiple objects into se-
|
15 |
+
mantic categories. Thus, we extend discrete diffusion mod-
|
16 |
+
els to learn scene-scale categorical distributions. In addi-
|
17 |
+
tion, we validate that a latent diffusion model can reduce
|
18 |
+
computation costs for training and deploying. To the best
|
19 |
+
of our knowledge, our work is the first to apply discrete
|
20 |
+
and latent diffusion for 3D categorical data on a scene-
|
21 |
+
scale. We further propose to perform semantic scene com-
|
22 |
+
pletion (SSC) by learning a conditional distribution using
|
23 |
+
our diffusion model, where the condition is a partial ob-
|
24 |
+
servation in a sparse point cloud. In experiments, we em-
|
25 |
+
pirically show that our diffusion models not only generate
|
26 |
+
reasonable scenes, but also perform the scene completion
|
27 |
+
task better than a discriminative model. Our code and mod-
|
28 |
+
els are available at https://github.com/zoomin-
|
29 |
+
lee/scene-scale-diffusion.
|
30 |
+
1. Introduction
|
31 |
+
Learning to generate 3D data has received much atten-
|
32 |
+
tion thanks to its high performance and promising down-
|
33 |
+
stream tasks. For instance, a 3D generative model with a
|
34 |
+
diffusion probabilistic model [2] has shown its effectiveness
|
35 |
+
in 3D completion [2] and text-to-3D generation [1,3].
|
36 |
+
While recent models have focused on 3D object gener-
|
37 |
+
ation, we aim beyond a single object by generating a 3D
|
38 |
+
scene with multiple objects. In Fig. 1b, we show a sam-
|
39 |
+
ple scene from our generative model, where we observe the
|
40 |
+
plausible placement of the objects, as well as their correct
|
41 |
+
shapes. Compared to the existing object-scale model [1]
|
42 |
+
(Fig. 1a), our scene-scale model can be used in a broader
|
43 |
+
application, such as semantic scene completion (Sec. 4.3),
|
44 |
+
where we complete a scene given a sparse LiDAR point
|
45 |
+
6
|
46 |
+
Pedestrian
|
47 |
+
Building
|
48 |
+
Vegetation
|
49 |
+
Vehicle
|
50 |
+
Diffusion
|
51 |
+
Model
|
52 |
+
(a) Object-scale generation
|
53 |
+
6
|
54 |
+
Diffusion
|
55 |
+
Model
|
56 |
+
6
|
57 |
+
(b) Scene-scale generation (ours)
|
58 |
+
Figure 1. Comparison of object-scale and scene scale generation
|
59 |
+
(ours). Our result includes multiple objects in a generated scene,
|
60 |
+
while the object-scale generation crafts one object at a time. (a) is
|
61 |
+
obtained by Point-E [1].
|
62 |
+
cloud.
|
63 |
+
We base our scene-scale 3D generation method on a dif-
|
64 |
+
fusion model, which has shown remarkable performance in
|
65 |
+
modeling complex real-world data, such as realistic 2D im-
|
66 |
+
ages [4–6] and 3D objects [1–3]. We develop and evaluate
|
67 |
+
diffusion models learning a scene-scale 3D categorical dis-
|
68 |
+
tribution.
|
69 |
+
First, we utilize categorical data for a voxel entity since
|
70 |
+
we have multiple objects in contrast to the existing work [1–
|
71 |
+
3], so each category tells each voxel belongs to which cat-
|
72 |
+
egory. Thus, we extend discrete diffusion models for 2D
|
73 |
+
categorical data [7, 8] into 3D categorical data (Sec. 3.1).
|
74 |
+
Second, we validate the latent diffusion model for the 3D
|
75 |
+
scene-scale generation, which can reduce training and test-
|
76 |
+
ing computational cost (Sec. 3.2). Third, we propose to per-
|
77 |
+
form semantic scene completion (SSC) by learning a con-
|
78 |
+
ditional distribution using our generative models, where the
|
79 |
+
condition is a partial observation of the scene (Sec. 3.1).
|
80 |
+
That is, we demonstrate that our model can complete a rea-
|
81 |
+
arXiv:2301.00527v1 [cs.CV] 2 Jan 2023
|
82 |
+
|
83 |
+
Building
|
84 |
+
Barrier
|
85 |
+
Other
|
86 |
+
Pedestrian
|
87 |
+
Pole
|
88 |
+
Road
|
89 |
+
Ground
|
90 |
+
Sidewalk
|
91 |
+
Vegetation
|
92 |
+
Vehiclessonable scene in a realistic scenario with a sparse and partial
|
93 |
+
observation.
|
94 |
+
Lastly, we show the effectiveness of our method in terms
|
95 |
+
of the unconditional and conditional (SSC) generation tasks
|
96 |
+
on the CarlaSC dataset [9] (Sec. 4). Especially, we show
|
97 |
+
that our generative model can outperform a discriminative
|
98 |
+
model in the SSC task.
|
99 |
+
2. Related Work
|
100 |
+
2.1. Semantic Scene Completion
|
101 |
+
Leveraging 3D data for semantic segmentation has been
|
102 |
+
studied from different perspectives. Vision sensors (e.g.,
|
103 |
+
RGB-D camera and LiDAR) provide depth information
|
104 |
+
from a single viewpoint, giving more information about the
|
105 |
+
world. One of the early approaches is using an RGB-D (i.e.,
|
106 |
+
color and depth) image with a 2D segmentation map [10].
|
107 |
+
In addition, using data in a 3D coordinate system has been
|
108 |
+
extensively studied. 3D semantic segmentation is the exten-
|
109 |
+
sion of 2D segmentation, where a classifier is applied to
|
110 |
+
point clouds or voxel data in 3D coordinates [11,12].
|
111 |
+
One of the recent advances in 3D semantic segmentation
|
112 |
+
is semantic scene completion (SSC), where a partially ob-
|
113 |
+
servable space – observed via RGB-D image or point clouds
|
114 |
+
– should be densely filled with class labels [13–16]. In SSC,
|
115 |
+
a model gets the point cloud obtained in one viewpoint;
|
116 |
+
thus, it contains multiple partial objects (e.g., one side of a
|
117 |
+
car). Then, the model not only reconstructs the unobserved
|
118 |
+
shape of the car but also labels it as a car. Here, the predic-
|
119 |
+
tion about the occupancy and the semantic labels can mutu-
|
120 |
+
ally benefit [17].
|
121 |
+
Due to the partial observation, filling in occluded and
|
122 |
+
sparse areas is the biggest hurdle. Thus, a generative model
|
123 |
+
is effective for 3D scene completion as 2D completion
|
124 |
+
tasks [18, 19]. Chen et al. [20] demonstrate that generative
|
125 |
+
adversarial networks (GANs) can be used to improve the
|
126 |
+
plausibility of a completion result. However, a diffusion-
|
127 |
+
based generative model has yet to be explored in terms of
|
128 |
+
a 3D semantic segmentation map. We speculate that us-
|
129 |
+
ing a diffusion model has good prospects, thanks to the
|
130 |
+
larger size of the latent and the capability to deal with high-
|
131 |
+
dimensional data.
|
132 |
+
In this work, we explore a diffusion model in the context
|
133 |
+
of 3D semantic scene completion. Diffusion models have
|
134 |
+
been rapidly growing and they perform remarkably well on
|
135 |
+
real-world 2D images [21]. Thus, we would like to delve
|
136 |
+
into the diffusion to generate 3D semantic segmentation
|
137 |
+
maps; thus, we hope to provide the research community a
|
138 |
+
useful road map towards generating the 3D semantic scene
|
139 |
+
maps.
|
140 |
+
2.2. Diffusion Models
|
141 |
+
Recent advances in diffusion models have shown that a
|
142 |
+
deep model can learn more diverse data distribution by a
|
143 |
+
diffusion process [5]. A diffusion process is introduced to
|
144 |
+
adopt a simple distribution (e.g., Gaussian) to learn a com-
|
145 |
+
plex distribution [4]. Especially, diffusion models show im-
|
146 |
+
pressive results for image generation [6] and conditional
|
147 |
+
generation [22, 23] on high resolution compared to GANs.
|
148 |
+
GANs are known to suffer from the mode collapse prob-
|
149 |
+
lem and struggle to capture complex scenes with multiple
|
150 |
+
objects [24]. On the other hand, diffusion models have a ca-
|
151 |
+
pacity to escape mode collapse [6] and generate complex
|
152 |
+
scenes [23,25] since likelihood-based methods achieve bet-
|
153 |
+
ter coverage of full data distribution.
|
154 |
+
Diffusion models have been studied to a large extent in
|
155 |
+
high-dimensional continuous data. However, they often lack
|
156 |
+
the capacity to deal with discrete data (e.g., text and seg-
|
157 |
+
mentation maps) since the discreteness of data is not fully
|
158 |
+
covered by continuous representations. To tackle such dis-
|
159 |
+
creteness, discrete diffusion models have been studied for
|
160 |
+
various applications, such as text generation [7,8] and low-
|
161 |
+
dimensional segmentation maps generation [7].
|
162 |
+
Since both continuous and discrete diffusion models es-
|
163 |
+
timate the density of image pixels, a higher image res-
|
164 |
+
olution means higher computation. To address this issue,
|
165 |
+
latent diffusion models [23, 26] operate a diffusion pro-
|
166 |
+
cess on the latent space of a lower dimension. To work
|
167 |
+
on the compressed latent space, Vector-Quantized Varia-
|
168 |
+
tional Auto-Encoder (VQ-VAE) [27] is employed. Latent
|
169 |
+
diffusion models consist of two stages: VQ-VAE and dif-
|
170 |
+
fusion. VQ-VAE trains an encoder to compress the image
|
171 |
+
into a latent space. Equipped with VQ-VAE, autoregressive
|
172 |
+
models [28, 29] have shown impressive performance. Re-
|
173 |
+
cent advances in latent diffusion models further improve
|
174 |
+
the generative performance by ameliorating the unidirec-
|
175 |
+
tional bias and accumulated prediction error in existing
|
176 |
+
models [23,26].
|
177 |
+
Our work introduces an extension of discrete diffu-
|
178 |
+
sion models for high-resolution 3D categorical voxel data.
|
179 |
+
Specifically, we show the effectiveness of a diffusion model
|
180 |
+
in terms of unconditional and conditional generation tasks,
|
181 |
+
where the condition is a partial observation of a scene (i.e.,
|
182 |
+
SSC). Further, we propose a latent diffusion models for 3D
|
183 |
+
categorical data to reduce the computation load caused by
|
184 |
+
high-resolution segmentation maps.
|
185 |
+
2.3. Diffusion Models for 3D Data
|
186 |
+
Diffusion models have been used for 3D data. Until re-
|
187 |
+
cently, research has been mainly conducted for 3D point
|
188 |
+
clouds with xyz-coordinates. PVD [2] applies continuous
|
189 |
+
diffusion on point-voxel representations for object shape
|
190 |
+
generation and completion without additional shape en-
|
191 |
+
coders. LION [3] uses latent diffusion for object shape com-
|
192 |
+
|
193 |
+
Forward Process
|
194 |
+
Reverse Process
|
195 |
+
(a) Discrete Diffusion Models
|
196 |
+
Segmentation Map
|
197 |
+
Segmentation Map
|
198 |
+
Reverse Process
|
199 |
+
Codebook
|
200 |
+
Stage1:VQ-VAE
|
201 |
+
Stage2: Latent Diffusion
|
202 |
+
Forward Process
|
203 |
+
(b) Latent Diffusion Models
|
204 |
+
Figure 2. Overview of (a) Discrete Diffusion Models and (b) La-
|
205 |
+
tent Diffusion Models. Discrete diffusion models conduct diffu-
|
206 |
+
sion process on voxel space, whereas latent diffusion models op-
|
207 |
+
erate diffusion process on latent space.
|
208 |
+
pletion (i.e., conditional generation) with additional shape
|
209 |
+
encoders.
|
210 |
+
In this paper, we aim to learn 3D categorical data (i.e.,
|
211 |
+
3D semantic segmentation maps) with a diffusion model.
|
212 |
+
The study of object generation has shown promising re-
|
213 |
+
sults, but as far as we know, our work is the first to generate
|
214 |
+
a 3D scene with multiple objects using a diffusion model.
|
215 |
+
Concretely, our work explores discrete and latent diffusion
|
216 |
+
models to learn a distribution of volumetric semantic scene
|
217 |
+
segmentation maps. We develop the models in an uncon-
|
218 |
+
ditional and conditional generation; the latter can be used
|
219 |
+
directly for the SSC task.
|
220 |
+
3. Method
|
221 |
+
Our goal is to learn a data distribution p(x) using dif-
|
222 |
+
fusion models, where each data x ∼ p(x) represents a
|
223 |
+
3D segmentation map described with the one-hot repre-
|
224 |
+
sentation. 3D segmentation maps are samples from the
|
225 |
+
data distribution p(x), which is the categorical distribution
|
226 |
+
Cat(k0, k1, · · · , kM) with M +1 probabilities of the free la-
|
227 |
+
bel k0 and M main categories. The discrete diffusion mod-
|
228 |
+
els could learn data distribution by recovering the noised
|
229 |
+
data, which is destroyed through the successive transition
|
230 |
+
of the label [8].
|
231 |
+
Our method aims to learn a distribution of voxelized
|
232 |
+
3D segmentation maps with discrete diffusion (Sec. 3.1).
|
233 |
+
Specifically, it includes unconditional and conditional gen-
|
234 |
+
eration, where the latter corresponds to the SSC task. In ad-
|
235 |
+
dition, we explore a latent diffusion model for 3D segmen-
|
236 |
+
tation maps (Sec. 3.2).
|
237 |
+
3.1. Discrete Diffusion Models
|
238 |
+
Fig. 2a summarizes the overall process of discrete diffu-
|
239 |
+
sion, consisting of a forward process and a reverse process;
|
240 |
+
the former gradually adds noise to the data and the latter
|
241 |
+
learns to denoise the noised data.
|
242 |
+
In the forward process in the discrete diffusion, an origi-
|
243 |
+
nal segmentation map x0 is gradually corrupted into a t-step
|
244 |
+
noised segmentation map xt with 1 ≤ t ≤ T. Each forward
|
245 |
+
step can be defined by a Markov uniform transition matrix
|
246 |
+
Qt [8] as xt = xt−1Qt. Based on the Markov property, we
|
247 |
+
can derive the t-step noised segmentation map xt straight
|
248 |
+
from the original segmentation map x0, q(xt|x0), with a
|
249 |
+
cumulative transition matrix ¯Qt = Q1Q2 · · · Qt:
|
250 |
+
q(xt|x0) = Cat(xt; p = x0 ¯Qt).
|
251 |
+
(1)
|
252 |
+
In the reverse process parametrized by θ, a learn-
|
253 |
+
able model is used to reverse a noised segmentation map
|
254 |
+
by pθ(xt−1|xt). Specifically, we use a reparametrization
|
255 |
+
trick [5] to make the model predict a denoised map ˜x0 and
|
256 |
+
subsequently get the reverse process pθ(xt−1|xt):
|
257 |
+
pθ(xt−1|xt) = q(xt−1|xt, ˜x0)pθ(˜x0|xt),
|
258 |
+
(2)
|
259 |
+
q(xt−1|xt, ˜x0) = q(xt|xt−1, ˜x0)q(xt−1|˜x0)
|
260 |
+
q(xt|˜x0)
|
261 |
+
.
|
262 |
+
(3)
|
263 |
+
We optimize a joint loss that consists of the KL di-
|
264 |
+
vergence of the forward process q(xt−1|xt, x0) from the
|
265 |
+
reverse process pθ(xt−1|xt); of the original segmentation
|
266 |
+
map q(x0) from the reconstructed one pθ(xt−1|xt) for an
|
267 |
+
auxiliary loss:
|
268 |
+
L = DKL( q(xt−1|xt, x0) ∥ pθ(xt−1|xt) )
|
269 |
+
+ w0DKL( q(x0) ∥ pθ(˜x0|xt) ),
|
270 |
+
(4)
|
271 |
+
where w0 is an auxiliary loss weight.
|
272 |
+
Unlike existing discrete diffusion models [7,8], our goal
|
273 |
+
is to learn the distribution of 3D data. Thus, to better handle
|
274 |
+
3D data, we use a point cloud segmentation network [30]
|
275 |
+
with modifications for discrete data and time embedding.
|
276 |
+
Conditional generation.
|
277 |
+
We propose discrete diffusion
|
278 |
+
for Semantic Scene Completion (SSC) with conditional
|
279 |
+
generation. SSC jointly estimates a scene’s complete geom-
|
280 |
+
etry and semantics, given a sparse occupancy map s. Thus,
|
281 |
+
it introduces a condition into Eq. 2, resulting in:
|
282 |
+
pθ(xt−1|xt, s) = q(xt−1|xt, ˜x0)pθ(˜x0|xt, s),
|
283 |
+
(5)
|
284 |
+
|
285 |
+
where s is a sparse occupancy map. We give the condition
|
286 |
+
by concatenating a sparse occupancy map s with a corrupted
|
287 |
+
input xt.
|
288 |
+
3.2. Latent Diffusion Models
|
289 |
+
Fig. 2b provides an overview of latent diffusion on 3D
|
290 |
+
segmentation maps. Latent diffusion models project the 3D
|
291 |
+
segmentation maps into a smaller latent space and operate
|
292 |
+
a diffusion process on the latent space instead of the high-
|
293 |
+
dimensional input space. A latent diffusion takes advantage
|
294 |
+
of a lower training computational cost and a faster inference
|
295 |
+
by processing diffusion on a lower dimensional space.
|
296 |
+
To encode a 3D segmentation map into a latent rep-
|
297 |
+
resentation, we use Vector Quantized Variational AutoEn-
|
298 |
+
coder (VQ-VAE) [27]. VQ-VAE extends the VAE by adding
|
299 |
+
a discrete learnable codebook E = {en}N
|
300 |
+
n=1 ∈ RN×d,
|
301 |
+
where N is the size of the codebook and d is the dimension
|
302 |
+
of the codes. The encoder E encodes 3D segmentation maps
|
303 |
+
x into a latent z = E(x), and the quantizer V Q(·) maps
|
304 |
+
the latent z into a quantized latent zq, which is the closest
|
305 |
+
codebook entry en. Note that the latent z ∈ Rh×w×z×d has
|
306 |
+
a smaller spatial resolution than the segmentation map x.
|
307 |
+
Then the decoder D reconstructs the 3D segmentation maps
|
308 |
+
from the quantized latent, ˜x = D(V Q(E(x))). The encoder
|
309 |
+
E, the decoder D, and the codebook E can be trained end-
|
310 |
+
to-end using the following loss function:
|
311 |
+
LV QV AE = −
|
312 |
+
�
|
313 |
+
k
|
314 |
+
wkxk log(˜xk) + ∥sg(z) − zq∥2
|
315 |
+
2
|
316 |
+
+ ∥z − sg(zq)∥2
|
317 |
+
2,
|
318 |
+
(6)
|
319 |
+
where wk is a class weight and sg(·) is the stop-gradient
|
320 |
+
operation. Training the latent diffusion model is similar to
|
321 |
+
that of discrete diffusion. Discrete diffusion models diffuse
|
322 |
+
between labels, but latent diffusion models diffuse between
|
323 |
+
codebook indexes using Markov Uniform transition matrix
|
324 |
+
Qt [8].
|
325 |
+
4. Experiments
|
326 |
+
In this section, we empirically study the effectiveness of
|
327 |
+
the diffusion models on 3D voxel segmentation maps. We
|
328 |
+
divide the following sub-sections into the learning of the
|
329 |
+
unconditional data distribution p(x) (Sec. 4.2) and the con-
|
330 |
+
ditional data distribution p(x|s) given a sparse occupancy
|
331 |
+
map s (Sec. 4.3); note that the latter corresponds to seman-
|
332 |
+
tic scene completion (SSC).
|
333 |
+
4.1. Implementation Details
|
334 |
+
Dataset. Following prior work [9], we employ the CarlaSC
|
335 |
+
dataset – a synthetic outdoor driving dataset – for training
|
336 |
+
and evaluation. The dataset consists of 24 scenes in 8 dy-
|
337 |
+
namic maps under low, medium, and high traffic conditions.
|
338 |
+
Model
|
339 |
+
Resolution
|
340 |
+
Training
|
341 |
+
(time/epoch)
|
342 |
+
Sampling
|
343 |
+
(time/img)
|
344 |
+
D-Diffusion
|
345 |
+
128×128×8
|
346 |
+
19m 48s
|
347 |
+
0.883s
|
348 |
+
L-Diffusion
|
349 |
+
32×32×2
|
350 |
+
7m 37s
|
351 |
+
0.499s
|
352 |
+
16×16×2
|
353 |
+
4m 41s
|
354 |
+
0.230s
|
355 |
+
8×8×2
|
356 |
+
4m 40s
|
357 |
+
0.202s
|
358 |
+
Table 1. Computation time comparison between discrete diffu-
|
359 |
+
sion models and latent diffusion models for 3D segmentation maps
|
360 |
+
generation. ‘D-Diffusion’ and ‘L-Diffusion’ denote discrete diffu-
|
361 |
+
sion models and latent diffusion models, respectively. ‘Resolution’
|
362 |
+
means the resolution of the space in which diffusion process op-
|
363 |
+
erates. A latent diffusion models process diffusion on a lower di-
|
364 |
+
mensional latent space, as a result, it shows advantage of a faster
|
365 |
+
training and sampling time.
|
366 |
+
The splits of the dataset contain 18 training, 3 validation,
|
367 |
+
and 3 test scenes, which are annotated with 10 semantic
|
368 |
+
classes and a free label. Each scene with a resolution of
|
369 |
+
128 × 128 × 8 covers a range of 25.6 m ahead and behind
|
370 |
+
the car, 25.6 m to each side, and 3 m in height.
|
371 |
+
Metrics. Since SSC requires predicting the semantic label
|
372 |
+
of a voxel and an occupancy state together, we use mIoU
|
373 |
+
and IoU as SSC and VQ-VAE metrics. The mIoU measures
|
374 |
+
the intersection over union averaged over all classes, and
|
375 |
+
the IoU evaluates scene completion quality, regardless of
|
376 |
+
the predicted semantic labels.
|
377 |
+
Experimental settings. Experiments are deployed on two
|
378 |
+
NVIDIA GTX 3090 GPUs with a batch size of 8 for dif-
|
379 |
+
fusion models and 4 for VQ-VAE. Our models follow the
|
380 |
+
same training strategy as multinomial diffusion [7]. We set
|
381 |
+
the hyper-parameters of the diffusion models with the num-
|
382 |
+
ber of time steps T = 100 timesteps. And for VQ-VAE,
|
383 |
+
we set the codebook E = {en}N
|
384 |
+
n=1 ∈ RN×d where the
|
385 |
+
codebook size N = 1100, dimension of codes d = 11 and
|
386 |
+
en ∈ R32×32×2×d. For diffusion architecture, we slightly
|
387 |
+
modify the encoder–decoder structure in Cylinder3D [30]
|
388 |
+
for time embedding and discreteness of the data. And for
|
389 |
+
VQ-VAE architecture, we also use encoder–decoder struc-
|
390 |
+
ture in Cylinder3D [30], but with the vector quantizer mod-
|
391 |
+
ule.
|
392 |
+
4.2. 3D Segmentation Maps Generation
|
393 |
+
We use the discrete and the latent diffusion models for
|
394 |
+
3D segmentation map generation. Fig. 3 shows the quali-
|
395 |
+
tative results of the generation. As seen in the figure, both
|
396 |
+
the discrete and latent models learn the categorical distri-
|
397 |
+
bution as they produce a variety of reasonable scenes. Note
|
398 |
+
that our models are learned on a large-scale data distribution
|
399 |
+
like the 3D scene with multiple objects; this is worth noting
|
400 |
+
since recent 3D diffusion models for point clouds have been
|
401 |
+
performed on an object scale [2,3,31,32].
|
402 |
+
In Tab. 1, we compare training and sampling time mod-
|
403 |
+
|
404 |
+
Codebook size
|
405 |
+
(N)
|
406 |
+
Resolution
|
407 |
+
(h × w × z)
|
408 |
+
IoU
|
409 |
+
mIoU
|
410 |
+
220
|
411 |
+
8×8×2
|
412 |
+
72.5
|
413 |
+
27.3
|
414 |
+
16×16×2
|
415 |
+
78.7
|
416 |
+
36.9
|
417 |
+
32×32×2
|
418 |
+
84.6
|
419 |
+
56.5
|
420 |
+
550
|
421 |
+
8×8×2
|
422 |
+
67.7
|
423 |
+
25.7
|
424 |
+
16×16×2
|
425 |
+
79.4
|
426 |
+
39.7
|
427 |
+
32×32×2
|
428 |
+
85.8
|
429 |
+
58.4
|
430 |
+
1,100
|
431 |
+
8×8×2
|
432 |
+
70.3
|
433 |
+
25.7
|
434 |
+
16×16×2
|
435 |
+
79.3
|
436 |
+
35.0
|
437 |
+
32×32×2
|
438 |
+
89.1
|
439 |
+
65.1
|
440 |
+
2,200
|
441 |
+
8×8×2
|
442 |
+
70.2
|
443 |
+
26.5
|
444 |
+
16×16×2
|
445 |
+
77.7
|
446 |
+
37.9
|
447 |
+
32×32×2
|
448 |
+
89.2
|
449 |
+
64.2
|
450 |
+
Table 2. Ablation study on VQ-VAE hyper-parameters. We
|
451 |
+
compare different sizes of codebook N and resolutions of the la-
|
452 |
+
tent space h×w×z.
|
453 |
+
els for different resolutions on which each diffusion model
|
454 |
+
operates. Compared to the discrete diffusion, the latent dif-
|
455 |
+
fusion tends to show shorter training and inference time.
|
456 |
+
This is because the latent diffusion models compress the
|
457 |
+
data into a smaller latent so that the time decreases as the
|
458 |
+
compression rate increases. In particular, compared to dis-
|
459 |
+
crete diffusion, which performs a diffusion process in voxel
|
460 |
+
space, 32 × 32 × 32 latent diffusion has 2.6 times faster
|
461 |
+
training time for one epoch and 1.8 times faster sampling
|
462 |
+
time for generating one image.
|
463 |
+
Ablation study on VQ-VAE.
|
464 |
+
Latent diffusion models
|
465 |
+
consist of two stages. The VQ-VAE compresses 3D seg-
|
466 |
+
mentation maps to latent space, and then discrete diffusion
|
467 |
+
models apply on the codebook index of latent. Therefore,
|
468 |
+
the performance of VQ-VAE may set the upper bound for
|
469 |
+
the final generation quality. So we conduct an ablation study
|
470 |
+
about VQ-VAE while adjusting the resolution of the latent
|
471 |
+
space h×w×z and the codebook capacities N while keep-
|
472 |
+
ing the code dimension d fixed. Concretely, we compress
|
473 |
+
the 3D segmentation maps from 128×128×8 to 32×32×2,
|
474 |
+
16×16×2, and 8×8×2 with four different codebook size
|
475 |
+
N ∈ {220, 550, 1100, 2200}.
|
476 |
+
The quantitative comparison is shown in Tab. 2. The big-
|
477 |
+
ger the codebook size is, the higher the performance is, but
|
478 |
+
it saturates around 1,100. That is because most of the codes
|
479 |
+
are not updated, and the update of the codebook can lapse
|
480 |
+
into a local optimum [33].
|
481 |
+
The resolution of latent space has a significant impact on
|
482 |
+
performance. As the resolution of the latent space becomes
|
483 |
+
smaller, it cannot contain all the information of the 3D seg-
|
484 |
+
mentation map. Setting the resolution to 32 × 32 × 2 with
|
485 |
+
a 1,100 codebook size strike a good balance between effi-
|
486 |
+
ciency and fidelity.
|
487 |
+
Methods
|
488 |
+
IoU
|
489 |
+
mIoU
|
490 |
+
LMSCNet SS [16]
|
491 |
+
85.98
|
492 |
+
42.53
|
493 |
+
SSCNet Full [17]
|
494 |
+
80.69
|
495 |
+
41.91
|
496 |
+
MotionSC (T=1) [9]
|
497 |
+
86.46
|
498 |
+
46.31
|
499 |
+
Our network w/o Diffusion
|
500 |
+
80.70
|
501 |
+
39.94
|
502 |
+
Discrete Diffusion (Ours)
|
503 |
+
80.61
|
504 |
+
45.83
|
505 |
+
Table 3. Semantic Scene Completion results on test set of CarlaSC
|
506 |
+
4.3. Semantic Scene Completion
|
507 |
+
We use a discrete diffusion model for conditional 3D
|
508 |
+
segmentation map generation (i.e., SSC). As a baseline
|
509 |
+
model against the diffusion model, we train a network with
|
510 |
+
an identical architecture by discriminative learning without
|
511 |
+
a diffusion process. We optimize the baseline with a loss
|
512 |
+
term L = − �
|
513 |
+
k wkxk log(˜xk), where wk is a weight for
|
514 |
+
each semantic class. We visualize results from the baseline
|
515 |
+
and our discrete diffusion model in Fig. 4. Despite the com-
|
516 |
+
plexities of the networks being identical, our discrete dif-
|
517 |
+
fusion model improves mIoU (i.e., class-wise IoU) up to
|
518 |
+
5.89%p than the baseline model as shown in Tab. 4. Es-
|
519 |
+
pecially, our method achieves outstanding results in small
|
520 |
+
objects and fewer frequency categories like ‘pedestrian’,
|
521 |
+
‘pole’, ‘vehicles,’ and ‘other’. The qualitative results in
|
522 |
+
Fig. 4 better demonstrate the improvement.
|
523 |
+
In Tab. 3, we compare our model with existing SSC mod-
|
524 |
+
els whose network architectures and training strategies are
|
525 |
+
specifically built for the SSC task. Nonetheless, our diffu-
|
526 |
+
sion model outperforms LMSCNet [16] and SSCNet [17],
|
527 |
+
in spite of the simpler architecture and training strategies.
|
528 |
+
Although MotionSC [9] shows a slightly better result, we
|
529 |
+
speculate that the diffusion probabilistic model can be im-
|
530 |
+
proved by extensive future research dedicated to this field.
|
531 |
+
5. Conclusion
|
532 |
+
In this work, we demonstrate the extension of the diffu-
|
533 |
+
sion model to scene-scale 3D categorical data beyond gen-
|
534 |
+
erating a single object. We empirically show that our mod-
|
535 |
+
els have impressive generative power to craft various scenes
|
536 |
+
through a discrete and latent diffusion process. Addition-
|
537 |
+
ally, our method provides an alternative view for the SSC
|
538 |
+
task, showing superior performance compared to a discrim-
|
539 |
+
inative counterpart. We believe that our work can be a useful
|
540 |
+
road map for generating 3D data with a diffusion model.
|
541 |
+
References
|
542 |
+
[1] A. Nichol, H. Jun, P. Dhariwal, P. Mishkin, and
|
543 |
+
M. Chen, “Point-e: A system for generating 3d
|
544 |
+
point clouds from complex prompts,” 2022. [Online].
|
545 |
+
Available: https://arxiv.org/abs/2212.08751 1
|
546 |
+
|
547 |
+
Training Datasets
|
548 |
+
Latent Diffusion Models
|
549 |
+
Discrete Diffusion Models
|
550 |
+
Figure 3. Samples from our unconditional diffusion models. The first column shows samples from training datasets. From the second
|
551 |
+
column, we show samples from our discrete diffusion and latent diffusion models. We can observe our diffusion models learn the 3D
|
552 |
+
categorical distribution well, so that it is capable to generate a variety of plausible maps. Color assignment for each class is available in
|
553 |
+
Tab. 4.
|
554 |
+
Class IoU
|
555 |
+
mIoU
|
556 |
+
Free
|
557 |
+
Building
|
558 |
+
Barrier
|
559 |
+
Other
|
560 |
+
Pedestrian
|
561 |
+
Pole
|
562 |
+
Road
|
563 |
+
Ground
|
564 |
+
Sidewalk
|
565 |
+
Vegetation
|
566 |
+
Vehicles
|
567 |
+
IoU
|
568 |
+
w/o Diffusion
|
569 |
+
39.94
|
570 |
+
96.40
|
571 |
+
27.72
|
572 |
+
3.15
|
573 |
+
8.77
|
574 |
+
22.15
|
575 |
+
37.14
|
576 |
+
89.02
|
577 |
+
18.22
|
578 |
+
59.25
|
579 |
+
29.74
|
580 |
+
47.72
|
581 |
+
80.70
|
582 |
+
Discrete Diffusion (Ours)
|
583 |
+
45.83
|
584 |
+
96.00
|
585 |
+
31.75
|
586 |
+
3.42
|
587 |
+
25.43
|
588 |
+
46.22
|
589 |
+
43.32
|
590 |
+
84.57
|
591 |
+
13.01
|
592 |
+
67.50
|
593 |
+
37.45
|
594 |
+
55.46
|
595 |
+
80.61
|
596 |
+
Table 4. Semantic scene completion results on test set of CarlaSC. The discriminative learning result with the diffusion model architecture
|
597 |
+
is denoted as ‘w/o Diffusion’. Values with a difference equal to or greater than 0.5%p are bold.
|
598 |
+
Ground
|
599 |
+
Truth
|
600 |
+
Discrete
|
601 |
+
Diffusion
|
602 |
+
(ours)
|
603 |
+
Input
|
604 |
+
w/o
|
605 |
+
Diffusion
|
606 |
+
Figure 4. Qualitative comparison of a deterministic model (w/o diffusion) and ours (discrete diffusion) on the test split of CarlaSC.
|
607 |
+
The first row shows the sparse inputs for the scene completion task, and the last row shows the corresponding ground-truth. Compared to
|
608 |
+
the deterministic model, our probabilistic model produces more plausible shape and class inference, as highlighted by the red circles. Note
|
609 |
+
that the both models (w/o diffusion and discrete diffusion) use the same network architecture. Color assignment for each class is available
|
610 |
+
in Tab. 4.
|
611 |
+
|
612 |
+
.X237[2] L. Zhou, Y. Du, and J. Wu, “3d shape generation
|
613 |
+
and completion through point-voxel diffusion,” in Pro-
|
614 |
+
ceedings of the IEEE/CVF International Conference
|
615 |
+
on Computer Vision, 2021, pp. 5826–5835. 1, 2, 4
|
616 |
+
[3] X. Zeng, A. Vahdat, F. Williams, Z. Gojcic, O. Litany,
|
617 |
+
S. Fidler, and K. Kreis, “Lion: Latent point diffu-
|
618 |
+
sion models for 3d shape generation,” arXiv preprint
|
619 |
+
arXiv:2210.06978, 2022. 1, 2, 4
|
620 |
+
[4] J. Sohl-Dickstein, E. Weiss, N. Maheswaranathan,
|
621 |
+
and S. Ganguli, “Deep unsupervised learning using
|
622 |
+
nonequilibrium thermodynamics,” in International
|
623 |
+
Conference on Machine Learning.
|
624 |
+
PMLR, 2015, pp.
|
625 |
+
2256–2265. 1, 2
|
626 |
+
[5] J. Ho, A. Jain, and P. Abbeel, “Denoising diffu-
|
627 |
+
sion probabilistic models,” Advances in Neural Infor-
|
628 |
+
mation Processing Systems, vol. 33, pp. 6840–6851,
|
629 |
+
2020. 1, 2, 3
|
630 |
+
[6] P. Dhariwal and A. Nichol, “Diffusion models beat
|
631 |
+
gans on image synthesis,” Advances in Neural Infor-
|
632 |
+
mation Processing Systems, vol. 34, pp. 8780–8794,
|
633 |
+
2021. 1, 2
|
634 |
+
[7] E. Hoogeboom, D. Nielsen, P. Jaini, P. Forr´e, and
|
635 |
+
M. Welling, “Argmax flows and multinomial diffu-
|
636 |
+
sion: Learning categorical distributions,” Advances in
|
637 |
+
Neural Information Processing Systems, vol. 34, pp.
|
638 |
+
12 454–12 465, 2021. 1, 2, 3, 4
|
639 |
+
[8] J. Austin, D. D. Johnson, J. Ho, D. Tarlow, and
|
640 |
+
R. van den Berg, “Structured denoising diffusion mod-
|
641 |
+
els in discrete state-spaces,” Advances in Neural In-
|
642 |
+
formation Processing Systems, vol. 34, pp. 17 981–
|
643 |
+
17 993, 2021. 1, 2, 3, 4
|
644 |
+
[9] J. Wilson, J. Song, Y. Fu, A. Zhang, A. Capodieci,
|
645 |
+
P. Jayakumar, K. Barton, and M. Ghaffari, “Mo-
|
646 |
+
tionsc: Data set and network for real-time semantic
|
647 |
+
mapping in dynamic environments,” arXiv preprint
|
648 |
+
arXiv:2203.07060, 2022. 2, 4, 5
|
649 |
+
[10] J. Long, E. Shelhamer, and T. Darrell, “Fully convo-
|
650 |
+
lutional networks for semantic segmentation,” in Pro-
|
651 |
+
ceedings of the IEEE conference on computer vision
|
652 |
+
and pattern recognition, 2015, pp. 3431–3440. 2
|
653 |
+
[11] C. R. Qi, H. Su, K. Mo, and L. J. Guibas, “Pointnet:
|
654 |
+
Deep learning on point sets for 3d classification and
|
655 |
+
segmentation,” in Proceedings of the IEEE conference
|
656 |
+
on computer vision and pattern recognition, 2017, pp.
|
657 |
+
652–660. 2
|
658 |
+
[12] G. Riegler, A. Osman Ulusoy, and A. Geiger, “Octnet:
|
659 |
+
Learning deep 3d representations at high resolutions,”
|
660 |
+
in Proceedings of the IEEE conference on computer
|
661 |
+
vision and pattern recognition, 2017, pp. 3577–3586.
|
662 |
+
2
|
663 |
+
[13] R. Cheng, C. Agia, Y. Ren, X. Li, and L. Bingbing,
|
664 |
+
“S3cnet: A sparse semantic scene completion network
|
665 |
+
for lidar point clouds,” in Conference on Robot Learn-
|
666 |
+
ing.
|
667 |
+
PMLR, 2021, pp. 2148–2161. 2
|
668 |
+
[14] X. Yan, J. Gao, J. Li, R. Zhang, Z. Li, R. Huang, and
|
669 |
+
S. Cui, “Sparse single sweep lidar point cloud seg-
|
670 |
+
mentation via learning contextual shape priors from
|
671 |
+
scene completion,” in Proceedings of the AAAI Con-
|
672 |
+
ference on Artificial Intelligence, vol. 35, no. 4, 2021,
|
673 |
+
pp. 3101–3109. 2
|
674 |
+
[15] C. B. Rist, D. Emmerichs, M. Enzweiler, and D. M.
|
675 |
+
Gavrila, “Semantic scene completion using local deep
|
676 |
+
implicit functions on lidar data,” IEEE transactions
|
677 |
+
on pattern analysis and machine intelligence, vol. 44,
|
678 |
+
no. 10, pp. 7205–7218, 2021. 2
|
679 |
+
[16] L. Roldao, R. de Charette, and A. Verroust-Blondet,
|
680 |
+
“Lmscnet: Lightweight multiscale 3d semantic com-
|
681 |
+
pletion,” in 2020 International Conference on 3D Vi-
|
682 |
+
sion (3DV).
|
683 |
+
IEEE, 2020, pp. 111–119. 2, 5
|
684 |
+
[17] S. Song, F. Yu, A. Zeng, A. X. Chang, M. Savva, and
|
685 |
+
T. Funkhouser, “Semantic scene completion from a
|
686 |
+
single depth image,” Proceedings of 30th IEEE Con-
|
687 |
+
ference on Computer Vision and Pattern Recognition,
|
688 |
+
2017. 2, 5
|
689 |
+
[18] C. H. Jo, W. B. Im, and S.-E. Yoon, “In-n-out: Towards
|
690 |
+
good initialization for inpainting and outpainting,” in
|
691 |
+
The 32nd British Machine Vision Conference, BMVC
|
692 |
+
2021.
|
693 |
+
British Machine Vision Association (BMVA),
|
694 |
+
2021. 2
|
695 |
+
[19] A. Lugmayr, M. Danelljan, A. Romero, F. Yu, R. Tim-
|
696 |
+
ofte, and L. Van Gool, “Repaint: Inpainting using de-
|
697 |
+
noising diffusion probabilistic models,” in Proceed-
|
698 |
+
ings of the IEEE/CVF Conference on Computer Vision
|
699 |
+
and Pattern Recognition, 2022, pp. 11 461–11 471. 2
|
700 |
+
[20] Y.-T. Chen, M. Garbade, and J. Gall, “3d semantic
|
701 |
+
scene completion from a single depth image using ad-
|
702 |
+
versarial training,” in 2019 IEEE International Con-
|
703 |
+
ference on Image Processing (ICIP).
|
704 |
+
IEEE, 2019,
|
705 |
+
pp. 1835–1839. 2
|
706 |
+
[21] A.
|
707 |
+
Ramesh,
|
708 |
+
P.
|
709 |
+
Dhariwal,
|
710 |
+
A.
|
711 |
+
Nichol,
|
712 |
+
C.
|
713 |
+
Chu,
|
714 |
+
and M. Chen, “Hierarchical text-conditional im-
|
715 |
+
age generation with clip latents,” arXiv preprint
|
716 |
+
arXiv:2204.06125, 2022. 2
|
717 |
+
|
718 |
+
[22] C. Saharia, W. Chan, H. Chang, C. Lee, J. Ho, T. Sal-
|
719 |
+
imans, D. Fleet, and M. Norouzi, “Palette: Image-to-
|
720 |
+
image diffusion models,” in ACM SIGGRAPH 2022
|
721 |
+
Conference Proceedings, 2022, pp. 1–10. 2
|
722 |
+
[23] S. Gu, D. Chen, J. Bao, F. Wen, B. Zhang, D. Chen,
|
723 |
+
L. Yuan, and B. Guo, “Vector quantized diffusion
|
724 |
+
model for text-to-image synthesis,” in Proceedings of
|
725 |
+
the IEEE/CVF Conference on Computer Vision and
|
726 |
+
Pattern Recognition, 2022, pp. 10 696–10 706. 2
|
727 |
+
[24] S.-H. Shim, S. Hyun, D. Bae, and J.-P. Heo, “Local at-
|
728 |
+
tention pyramid for scene image generation,” in Pro-
|
729 |
+
ceedings of the IEEE/CVF Conference on Computer
|
730 |
+
Vision and Pattern Recognition, 2022, pp. 7774–7782.
|
731 |
+
2
|
732 |
+
[25] W.-C. Fan, Y.-C. Chen, D. Chen, Y. Cheng, L. Yuan,
|
733 |
+
and Y.-C. F. Wang, “Frido: Feature pyramid diffusion
|
734 |
+
for complex scene image synthesis,” arXiv preprint
|
735 |
+
arXiv:2208.13753, 2022. 2
|
736 |
+
[26] R. Rombach, A. Blattmann, D. Lorenz, P. Esser,
|
737 |
+
and B. Ommer, “High-resolution image synthesis
|
738 |
+
with latent diffusion models,” in Proceedings of the
|
739 |
+
IEEE/CVF Conference on Computer Vision and Pat-
|
740 |
+
tern Recognition, 2022, pp. 10 684–10 695. 2
|
741 |
+
[27] A. Van Den Oord, O. Vinyals et al., “Neural discrete
|
742 |
+
representation learning,” Advances in neural informa-
|
743 |
+
tion processing systems, vol. 30, 2017. 2, 4
|
744 |
+
[28] P. Esser, R. Rombach, and B. Ommer, “Taming trans-
|
745 |
+
formers for high-resolution image synthesis,” in Pro-
|
746 |
+
ceedings of the IEEE/CVF conference on computer
|
747 |
+
vision and pattern recognition, 2021, pp. 12 873–
|
748 |
+
12 883. 2
|
749 |
+
[29] A. Ramesh, M. Pavlov, G. Goh, S. Gray, C. Voss,
|
750 |
+
A. Radford, M. Chen, and I. Sutskever, “Zero-shot
|
751 |
+
text-to-image generation,” in International Confer-
|
752 |
+
ence on Machine Learning.
|
753 |
+
PMLR, 2021, pp. 8821–
|
754 |
+
8831. 2
|
755 |
+
[30] X. Zhu, H. Zhou, T. Wang, F. Hong, Y. Ma, W. Li,
|
756 |
+
H. Li, and D. Lin, “Cylindrical and asymmetrical 3d
|
757 |
+
convolution networks for lidar segmentation,” in Pro-
|
758 |
+
ceedings of the IEEE/CVF conference on computer vi-
|
759 |
+
sion and pattern recognition, 2021, pp. 9939–9948. 3,
|
760 |
+
4
|
761 |
+
[31] M. Xu, L. Yu, Y. Song, C. Shi, S. Ermon, and
|
762 |
+
J. Tang, “Geodiff: A geometric diffusion model for
|
763 |
+
molecular conformation generation,” arXiv preprint
|
764 |
+
arXiv:2203.02923, 2022. 4
|
765 |
+
[32] S. Luo and W. Hu, “Diffusion probabilistic models
|
766 |
+
for 3d point cloud generation,” in Proceedings of the
|
767 |
+
IEEE/CVF Conference on Computer Vision and Pat-
|
768 |
+
tern Recognition, 2021, pp. 2837–2845. 4
|
769 |
+
[33] M. Hu, Y. Wang, T.-J. Cham, J. Yang, and P. N. Sug-
|
770 |
+
anthan, “Global context with discrete diffusion in vec-
|
771 |
+
tor quantised modelling for image generation,” in Pro-
|
772 |
+
ceedings of the IEEE/CVF Conference on Computer
|
773 |
+
Vision and Pattern Recognition, 2022, pp. 11 502–
|
774 |
+
11 511. 5
|
775 |
+
|
6NAyT4oBgHgl3EQfpfik/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
79FLT4oBgHgl3EQfAy4h/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e34d98418330a49a45996f5e9a47e3f0daf9baef12c1eda46df8ae21dc14a630
|
3 |
+
size 3538989
|
7tE4T4oBgHgl3EQf2g0r/content/2301.05298v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aae5d96a289799346bfdf7ac4adda2649fe55f16616ccfa03e258f6499ac96ed
|
3 |
+
size 1685470
|
7tE4T4oBgHgl3EQf2g0r/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5f65aa4ac05973f602a93bb4a1e2f619c0316ab22510b4aebdfa26a2be321cb4
|
3 |
+
size 4653101
|
7tE4T4oBgHgl3EQf2g0r/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:31ccb30ad750690d9d638e4353f175ec295f416a5dcb1bc0e7a9c515e4b12193
|
3 |
+
size 194347
|
99AyT4oBgHgl3EQf3fke/content/tmp_files/2301.00768v1.pdf.txt
ADDED
@@ -0,0 +1,2921 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
1
|
2 |
+
|
3 |
+
Ontology-based Context Aware Recommender System Application
|
4 |
+
for Tourism.
|
5 |
+
|
6 |
+
Vitor T. Camacho1, José Cruz2
|
7 |
+
1 PhD, [email protected], R&D Data Science, Syone.
|
8 |
+
2 MSc, [email protected], R&D Data Science, Syone.
|
9 |
+
|
10 |
+
Abstract
|
11 |
+
In this work a novel recommender system (RS) for Tourism is presented. The RS is context aware
|
12 |
+
as is now the rule in the state-of-the-art for recommender systems and works on top of a tourism
|
13 |
+
ontology which is used to group the different items being offered. The presented RS mixes
|
14 |
+
different types of recommenders creating an ensemble which changes on the basis of the RS’s
|
15 |
+
maturity. Starting from simple content-based recommendations and iteratively adding popularity,
|
16 |
+
demographic and collaborative filtering methods as rating density and user cardinality increases.
|
17 |
+
The result is a RS that mutates during its lifetime and uses a tourism ontology and natural
|
18 |
+
language processing (NLP) to correctly bin the items to specific item categories and meta
|
19 |
+
categories in the ontology. This item classification facilitates the association between user
|
20 |
+
preferences and items, as well as allowing to better classify and group the items being offered,
|
21 |
+
which in turn is particularly useful for context-aware filtering.
|
22 |
+
|
23 |
+
Keywords: recommender system, CARS, ontology, tourism, content-based, collaborative
|
24 |
+
filtering, demographic-based.
|
25 |
+
|
26 |
+
|
27 |
+
1
|
28 |
+
Introduction
|
29 |
+
This work presents a novel recommender system (RS) approach, which builds on context
|
30 |
+
awareness, domain ontology and different types of recommenders that enter the process at
|
31 |
+
different stages of maturity. From simple recommenders that are less prone to cold-start issues
|
32 |
+
to more complex and powerful recommenders which struggle quite a bit with initial lack of data.
|
33 |
+
At the final stage of maturity, when all the recommenders are already deployed in the
|
34 |
+
recommender pool, the different recommenders analyze different aspects of the data, from
|
35 |
+
demographic features to ratings, and provide an ensemble of recommendations to the users,
|
36 |
+
based on different approaches and with varying degrees of personalization. The approach is novel
|
37 |
+
|
38 |
+
2
|
39 |
+
|
40 |
+
in how it uses several techniques, from domain ontology to bin the items using NLP to achieve
|
41 |
+
concept similarity, and then from there applies content-based, demographic-based, popularity-
|
42 |
+
based and collaborative filtering approaches to attain the recommended items. The collaborative
|
43 |
+
filtering employed are field-aware factorization machines which are the state-of-the-art in matrix
|
44 |
+
factorization, which can easily include context-awareness. The aim is to provide a powerful and
|
45 |
+
adaptable recommender system framework which can adapt to any domain, given the respective
|
46 |
+
domain ontology, and can overcome cold-start issues by using an approach with 4 stages of
|
47 |
+
maturity, which are subsequently entered when given thresholds are reached. In the following the
|
48 |
+
structure of the paper is presented, with an explanation of every section. In the present section,
|
49 |
+
the Introduction, an overview of the presented recommender system framework is provided as
|
50 |
+
well as a literature review of the relevant works on the subject. In section 2, the framework and
|
51 |
+
all its components are presented, from adopted technologies to used algorithms and techniques.
|
52 |
+
A presentation of the architecture is given as well as a mock-up of the designed UI to provide the
|
53 |
+
link between user and recommender system. In section 3, the technologies and techniques,
|
54 |
+
mainly the ones central to the recommender system are better explained with some formulas
|
55 |
+
being provided. In section 4, the recommender system is tested with a synthetic dataset with
|
56 |
+
varying stages of maturity, to show how the recommender system evolves as the data changes.
|
57 |
+
In section 5, conclusions are given as well as a brief discussion on future works.
|
58 |
+
|
59 |
+
1.1
|
60 |
+
Literature review
|
61 |
+
Recommender systems (RS) have been the focus of research for many years now, both on the
|
62 |
+
algorithm side and on the applied side. The study of RS started in the beginning of the 1990s but
|
63 |
+
it was in the last 15 years that research and the number of publications on the topic surged.
|
64 |
+
Concerning our application, tourism, RS have been the focus of studies since, at least, the start
|
65 |
+
of the 2000s with many publications having been made since then [1]–[15]. As for works that
|
66 |
+
concern more an algorithmic approach without an explicit thematic application, several studies
|
67 |
+
have been published on the different types of RS, from content-based approaches to collaborative
|
68 |
+
filtering, as well as context-aware solutions, so called CARS [16]–[64].
|
69 |
+
Going into more detail regarding the tourism themed recommenders, it is relevant to give
|
70 |
+
particular attention to ontology-based approaches. One of the more important examples
|
71 |
+
concerning the present work is Moreno, A. et al. [8]. In this work, an ontology-based approach
|
72 |
+
(SigTur/E-destination) is developed to get recommendations for tourism in the region of
|
73 |
+
Tarragona. The developed approach begins with the definition of a tourism domain ontology,
|
74 |
+
which describes the tourist activities in a hierarchy, and bins the activities according to a given
|
75 |
+
taxonomy. The ontology is thus used to explicitly classify the activities to recommend among a
|
76 |
+
predefined set of distinctive main concepts, which are used by the intelligent recommender
|
77 |
+
system in its reasoning processes. The recommender than applies collaborative and content-
|
78 |
+
based techniques to provide the recommendation. Another relevant work is that of García-
|
79 |
+
Crespo, A. et al. [11], which proposes a semantic based expert system to provide
|
80 |
+
|
81 |
+
3
|
82 |
+
|
83 |
+
recommendations in the tourist domain (Sem-Fit). The proposed system works based on the
|
84 |
+
consumer’s experience about recommendations provided by the system. Sem-Fit uses the
|
85 |
+
experience point of view in order to apply fuzzy logic techniques to relating customer and hotel
|
86 |
+
characteristics, represented by means of domain ontologies and affect grids. An early and
|
87 |
+
interesting work that applies Bayesian networks to attain personalized recommendations for
|
88 |
+
tourist attractions by Huang, Y. and Bian, L. [15] is also worth mentioning. This work is from 2009
|
89 |
+
and uses ontologies to classify different types of tourist attractions. It then uses a Bayesian
|
90 |
+
network, to calculate the posterior probabilities of a given tourist’s preferred activities and the
|
91 |
+
traveler category he fits into. Other works on recommender system tourism applications could
|
92 |
+
also be mentioned but instead one can mention three surveys done on this topic. First, one from
|
93 |
+
2014, Borràs, J. et al. [2] present a survey entitled “Intelligent tourism recommender systems”. In
|
94 |
+
this survey the various works in the state-of-the-art are analyzed and their different approaches
|
95 |
+
concerning user interface, functionalities, recommendation techniques and use of AI techniques
|
96 |
+
are presented. The second work that gives an overview on the topic is from Kzaz, L. et al. [3] from
|
97 |
+
2018. In this overview, the focus is essentially on recommender approaches and employed user
|
98 |
+
and item data models. A third survey on this topic is given to us by Renjith, S. et al. [60] in a work
|
99 |
+
titled “An extensive study on the evolution of context-aware personalized travel recommender
|
100 |
+
systems”. Herein, the authors start by defining the different recommender approaches that can
|
101 |
+
be employed: content-based, collaborative, demographic-based, knowledge-based, hybrid,
|
102 |
+
personalized and context-aware. The authors also go into detail on the different machine learning
|
103 |
+
algorithms that are commonly employed, as well as the different employed metrics to evaluate
|
104 |
+
the quality of the predictions. Finally, they present a table with many different works with the
|
105 |
+
identification of whether or not they employ the previously mentioned techniques.
|
106 |
+
One of the aspects of the present work is that, as happens with some of the examples given
|
107 |
+
above, it employs ontologies to organize and classify the items to be recommended in some way.
|
108 |
+
Two works can also be mentioned concerning tourism domain ontologies, but in this case their
|
109 |
+
formulation rather than their use. These works are by Ruíz-Martinez, J. et al. [65] and Barta, R.
|
110 |
+
et al. [66] and they present different approaches to integrate and define tourism domain
|
111 |
+
ontologies. In the latter work an approach is presented that shows how to cover the semantic
|
112 |
+
space of tourism and be able to integrate different modularized ontologies. In the former, a
|
113 |
+
strategy to automatically instantiate and populate a domain ontology by extracting semantic
|
114 |
+
content from textual web documents. This work deals essentially with natural language
|
115 |
+
processing and named entity recognition, which are some of the techniques also employed in this
|
116 |
+
paper in terms of ontology population or, in other words, the classification of the different items to
|
117 |
+
recommend according to the ontology.
|
118 |
+
Many other works should also be referenced, this time not necessarily linked to the tourism theme,
|
119 |
+
but instead due to their focus on the algorithmic aspect or rather the recommendation strategy
|
120 |
+
regardless of its field of application. One particular type of recommender system that is very much
|
121 |
+
dominant in the literature in recent times is the context aware recommender system (CARS). The
|
122 |
+
work by Kulkarni, S. et al. [32] gives us a review on the state-of-the-art techniques employed in
|
123 |
+
|
124 |
+
4
|
125 |
+
|
126 |
+
context aware recommender systems. In this work the authors list the most common algorithmic
|
127 |
+
approaches from bio-inspired algorithms to other common and less common machine learning
|
128 |
+
algorithms and then enumerate the works that employed each type of solution. Another review
|
129 |
+
study on context aware recommender systems is authored by Haruna, K. et al. [67]. In this work,
|
130 |
+
the authors particularly emphasize the manner in which the contextual filtration is applied, for
|
131 |
+
which there are three variants, pre-filtering, post-filtering and context modelling. The difference
|
132 |
+
between each approach has to do with how context filtering is applied together with the process
|
133 |
+
of recommendation. Hence, in pre-filtering the recommender filters the items prior to
|
134 |
+
recommendation, while in post-filtering the opposite happens. In context modelling there is a more
|
135 |
+
complex integration of the context filtering and the recommendations. The authors then go on to
|
136 |
+
classify the different works in the literature according to this and other topics such as employed
|
137 |
+
algorithms, etc. A third overview paper on the topic of CARS is the work by Raza, S. et al. [44].
|
138 |
+
In this work, the authors focus on the type of algorithms, the dimensionality reduction techniques,
|
139 |
+
user modelling techniques and finally the evaluation metrics and datasets employed. Still focusing
|
140 |
+
on CARS, a context-aware knowledge-based recommender system for movie showtimes called
|
141 |
+
RecomMetz is presented in the work by Colombo-Mendoza, L. et al. [58]. In this work, the CARS
|
142 |
+
developed has time awareness, crowd awareness and location awareness, as part of its context
|
143 |
+
awareness composition. It is interesting to verify that its location awareness employs an
|
144 |
+
exponential distance decay that discards items that are far away from the user. This sort of
|
145 |
+
mechanism is also employed in the current work but with other goals. A last example on CARS is
|
146 |
+
a genetic algorithm (GA) approach based on spatio-temporal aspects [68] by Linda, S. et al. Here,
|
147 |
+
the interesting aspect is the inclusion of a GA to optimize the temporal weights of each individual
|
148 |
+
while employing collaborative filtering for the recommendations.
|
149 |
+
Lately, one of the most studied techniques for recommender systems have been Factorization
|
150 |
+
Machines (FM) [69]. In the present work, a field-aware version of this technique is employed, also
|
151 |
+
known as an FFM. This technique is a kind of collaborative filtering method that gained some
|
152 |
+
notoriety for solving click-through prediction rates [64], among other problems. Several versions
|
153 |
+
exist of these FMs in the literature, with ensembles with deep neural networks [45], for example,
|
154 |
+
being one of such versions. The value of FM is that they are more powerful than traditional matrix
|
155 |
+
factorization techniques, being able to incorporate features and information such as implicit
|
156 |
+
feedback. For these reasons, an FM, more specifically an FFM, is one of the recommenders
|
157 |
+
employed in the proposed recommender system, constituting the collaborative filtering
|
158 |
+
component of the proposed RS.
|
159 |
+
|
160 |
+
|
161 |
+
|
162 |
+
|
163 |
+
5
|
164 |
+
|
165 |
+
1.2
|
166 |
+
Description of the RS and field of application
|
167 |
+
The proposed RS in this work is to be applied in the tourism industry. More specifically, the project
|
168 |
+
entails the creation of a recommender system to be used by hotel companies to recommend to
|
169 |
+
their guests their vast lists of partners in the region. It is very common that large hotel companies
|
170 |
+
have hundreds of partners offering products and most hotel guests are unaware of most of them.
|
171 |
+
The partners usually offer a wide array of products, which need an ontology to be organized and
|
172 |
+
better recommended. The proposed RS starts by having a Partner Management Platform (PMP)
|
173 |
+
for the hotel’s partners where they can manually introduce the items they want to be
|
174 |
+
recommended in the RS. The PMP, which is essentially an interface of the Item DB, feeds the
|
175 |
+
Domain Ontology which exists in a graph DB. The users are clients of the hotel that have checked-
|
176 |
+
in, and they exist in the User DB, which houses not only demographic information but also user
|
177 |
+
preferences which are collected and inferred by the RS. The RS interface is a web-app which is
|
178 |
+
presented in a further section of the paper. In the following sections more detail is provided
|
179 |
+
concerning the various components of the RS, starting with the presentation of the RS
|
180 |
+
architecture in the following section.
|
181 |
+
|
182 |
+
2
|
183 |
+
Architecture and frameworks of the recommender system
|
184 |
+
The architecture of the RS can be essentially divided into 4 parts, the data repository, the context-
|
185 |
+
aware subsystem, the recommender system per se and the user interface. In the following figure
|
186 |
+
the architecture is presented with each of its subcomponents. An overview of each of the
|
187 |
+
subcomponents is given in the following subsections.
|
188 |
+
|
189 |
+
Figure 1 Architecture of the RS.
|
190 |
+
|
191 |
+
2.1
|
192 |
+
Data repository
|
193 |
+
The first element of the recommender system is its data repository, in the sense that this is where
|
194 |
+
it starts, particularly with the Partner Management Platform (PMP). It is through this PMP that we
|
195 |
+
|
196 |
+
Location-aware
|
197 |
+
Weather-aware
|
198 |
+
Repetition-aware
|
199 |
+
Userprofile
|
200 |
+
manager
|
201 |
+
Context-awareSubsystem
|
202 |
+
Preference
|
203 |
+
UserInterface
|
204 |
+
manager
|
205 |
+
ItemDB
|
206 |
+
UserDB
|
207 |
+
Domain
|
208 |
+
Ontology
|
209 |
+
Recommender
|
210 |
+
pool
|
211 |
+
Partner
|
212 |
+
Management
|
213 |
+
Recommender
|
214 |
+
Platform
|
215 |
+
Data Repository
|
216 |
+
System6
|
217 |
+
|
218 |
+
have the introduction of the items, by the partners, to be recommended by the RS. In the PMP,
|
219 |
+
the partners introduce the items alongside necessary description and keywords. This information
|
220 |
+
introduced in the PMP is organized into an Item DB and later inserted into the domain ontology,
|
221 |
+
which is later explained in detail.
|
222 |
+
Other than the PMP with its Item DB and the mentioned domain ontology, the data repository also
|
223 |
+
has a User DB. This DB has both the demographic information collected from the users that
|
224 |
+
check-in to the hotel, but also the preference vectors that are inferred and managed by the RS.
|
225 |
+
The RS uses these two components of the user info to make predictions and to build different
|
226 |
+
recommendation models based on demographic, content, and collaborative filtering techniques.
|
227 |
+
|
228 |
+
2.1.1 Domain ontology - Neo4j and automatic population of ontology
|
229 |
+
As for the domain ontology, the initial approach was to adopt the ontology presented in SigTur
|
230 |
+
[8]. In addition, Neo4j (www.neo4j.com), which is a graph DB, was chosen to house the ontology
|
231 |
+
and to facilitate the automatic ontological extension with the items from the PMP. In the following
|
232 |
+
figures, the original ontology is shown already inserted in a Neo4j graph.
|
233 |
+
|
234 |
+
Figure 2 Ontology inserted in Neo4j.
|
235 |
+
|
236 |
+
Shopping
|
237 |
+
Wine
|
238 |
+
SCO
|
239 |
+
Wine_E..
|
240 |
+
Popular.
|
241 |
+
SCO
|
242 |
+
Music_F.
|
243 |
+
NightLife
|
244 |
+
Sco
|
245 |
+
SCO
|
246 |
+
SCo
|
247 |
+
sCO
|
248 |
+
BookF
|
249 |
+
Gastron..
|
250 |
+
Leisure
|
251 |
+
SCO
|
252 |
+
Leisure.
|
253 |
+
SCO
|
254 |
+
Oos
|
255 |
+
SCo
|
256 |
+
Gastron.
|
257 |
+
SCO
|
258 |
+
Events
|
259 |
+
SCO
|
260 |
+
Sco
|
261 |
+
Health
|
262 |
+
SCO
|
263 |
+
Tradifion.
|
264 |
+
Relaxafi..
|
265 |
+
Gastron.
|
266 |
+
Sport_ E.
|
267 |
+
Sco
|
268 |
+
TownR.
|
269 |
+
Relaxafi..
|
270 |
+
$Co
|
271 |
+
Arts_An.
|
272 |
+
Sco
|
273 |
+
ScO
|
274 |
+
Towns
|
275 |
+
SCO
|
276 |
+
Sco
|
277 |
+
SCO
|
278 |
+
SCC
|
279 |
+
NonAqu.
|
280 |
+
sCO
|
281 |
+
Sport_R..
|
282 |
+
SCO
|
283 |
+
Air_Spor.
|
284 |
+
SCo
|
285 |
+
Culture
|
286 |
+
SCo
|
287 |
+
Tradition..
|
288 |
+
Sco
|
289 |
+
SCo -
|
290 |
+
SCO
|
291 |
+
Sco
|
292 |
+
Sports
|
293 |
+
Culture
|
294 |
+
SCO
|
295 |
+
Tradition.
|
296 |
+
Driving.
|
297 |
+
MotorSp.
|
298 |
+
Sco
|
299 |
+
Aquatic_
|
300 |
+
Nature
|
301 |
+
Sco
|
302 |
+
Climbing
|
303 |
+
Sco
|
304 |
+
Monume.
|
305 |
+
SCO
|
306 |
+
Ethnogr
|
307 |
+
OOS
|
308 |
+
Saiting
|
309 |
+
Culture.
|
310 |
+
SCo
|
311 |
+
Surfing
|
312 |
+
Nature
|
313 |
+
UnderW.
|
314 |
+
ViewPoi.
|
315 |
+
8
|
316 |
+
SCO
|
317 |
+
Archeol.
|
318 |
+
sco
|
319 |
+
8
|
320 |
+
$CO
|
321 |
+
Protecte.
|
322 |
+
Art_Mus.
|
323 |
+
LandSc.
|
324 |
+
Nature
|
325 |
+
History.
|
326 |
+
ichitect
|
327 |
+
SCo
|
328 |
+
SCO
|
329 |
+
SCO
|
330 |
+
Mountai.
|
331 |
+
Coastal.
|
332 |
+
Inland
|
333 |
+
Rural_A.7
|
334 |
+
|
335 |
+
|
336 |
+
|
337 |
+
Figure 3 Sample of the ontology (highlighted section in previous figure).
|
338 |
+
|
339 |
+
The advantage of using the Neo4j framework is that it facilitates the automation of ontological
|
340 |
+
extension. This ontological extension is achieved through the use of NLP techniques, such as
|
341 |
+
named entity recognition and cosine similarity between semantic concepts, using the spaCy
|
342 |
+
Python library integrated with Neo4j methods. These processes start with the insertion of the
|
343 |
+
items from the PMP or the Item DB. These items are parsed and tokenized, using both the item
|
344 |
+
descriptions and/or keywords. These parsed and tokenized items are then linked to the ontology
|
345 |
+
by means of semantic similarity between its keywords and description with each of the ontological
|
346 |
+
subclasses. The similarity scores above a given threshold originate a link between the item and
|
347 |
+
that specific ontological subclass. This process that ends with concept similarity and starts with
|
348 |
+
parsing, removal of stopwords and tokenization is performed with methods in the spaCy library.
|
349 |
+
The concept similarity is performed using spaCy’s vast pretrained word vectors. In addition,
|
350 |
+
named entity recognition is also performed on the items, automatically linking a Wikipedia entry,
|
351 |
+
if such entry exists. In Figure 4, a representation of the ontology after being extended with some
|
352 |
+
items, via the described process. One can see the original nodes in orange, that belong to the
|
353 |
+
ontology classes, some of which are now linked to grey nodes representing the items. The green
|
354 |
+
nodes represent the Wikipedia page object when such an object was found. In Figure 5 a zoomed
|
355 |
+
view of the highlighted zone in Figure 4 is shown. One can see two instances in which a Wikipedia
|
356 |
+
page object was found from the Named Entity Recognition procedure. The items were linked to
|
357 |
+
the ontology subclasses and one can observe that the links make sense in these cases, with
|
358 |
+
driving an F1 racecar linked to “Motor Sports”, and golf lessons and discounts on clubs linked to
|
359 |
+
“Golf”.
|
360 |
+
|
361 |
+
ie oi
|
362 |
+
useums
|
363 |
+
istory
|
364 |
+
Culture
|
365 |
+
A uatic
|
366 |
+
usic
|
367 |
+
Sailing
|
368 |
+
nder
|
369 |
+
Rural A
|
370 |
+
onume
|
371 |
+
ature
|
372 |
+
Surfing
|
373 |
+
ight ife
|
374 |
+
ealth
|
375 |
+
o ns
|
376 |
+
radition
|
377 |
+
ood
|
378 |
+
eaches
|
379 |
+
Culture
|
380 |
+
Gastron
|
381 |
+
radition
|
382 |
+
ine
|
383 |
+
ature
|
384 |
+
Air Spor
|
385 |
+
ature
|
386 |
+
Golf
|
387 |
+
vents
|
388 |
+
otorSp
|
389 |
+
oo
|
390 |
+
Culture
|
391 |
+
riving
|
392 |
+
Shopping
|
393 |
+
Gastron
|
394 |
+
eisure
|
395 |
+
Relaxati
|
396 |
+
eisure
|
397 |
+
ine
|
398 |
+
Adventu
|
399 |
+
Climbing
|
400 |
+
Arts An
|
401 |
+
andSc
|
402 |
+
Relaxati
|
403 |
+
Gastron
|
404 |
+
Sport R
|
405 |
+
Sport
|
406 |
+
Architect
|
407 |
+
Routes
|
408 |
+
Archeol
|
409 |
+
radition
|
410 |
+
Art us
|
411 |
+
Inland
|
412 |
+
Sports
|
413 |
+
Coastal
|
414 |
+
thnogr
|
415 |
+
rotecte
|
416 |
+
o n R
|
417 |
+
ance
|
418 |
+
onA u
|
419 |
+
ountai
|
420 |
+
opular
|
421 |
+
|
422 |
+
8
|
423 |
+
|
424 |
+
|
425 |
+
Figure 4 Ontology extended with the addition of items.
|
426 |
+
|
427 |
+
|
428 |
+
9
|
429 |
+
|
430 |
+
|
431 |
+
Figure 5 Sample of the extended ontology (highlighted section in previous figure).
|
432 |
+
|
433 |
+
The recommender system module then imports the extended ontology, both the classes and the
|
434 |
+
items. It will use the extended ontology to give content-based recommendations.
|
435 |
+
|
436 |
+
2.2
|
437 |
+
Context-aware subsystem module
|
438 |
+
The context-aware subsystem module does item pre-filtering on the basis of three context
|
439 |
+
submodules: location-aware, weather-aware and repetition-aware. In the case of the location-
|
440 |
+
aware submodule, the objective is to filter out the hotel partners that are not located close by to a
|
441 |
+
specific instance of the hotel. Since the hotel company can have a wide array of partners that
|
442 |
+
may, in many cases, be close to one specific hotel but not to other hotels in other locations, such
|
443 |
+
as local or regional partners that only provide services to the hotels in the area, a first contextual
|
444 |
+
filtering phase is to apply location pre-filtering. Then we go on to the weather-aware submodule,
|
445 |
+
where the ontological sub-classes are associated with a given fuzzy definition of when they make
|
446 |
+
sense to be recommended, for example the beach ontology class or the outdoor sports ontology
|
447 |
+
class would tend to be penalized with bad weather. Finally, a third module, which is very much
|
448 |
+
novel, which is the repetition-aware module. Here, each ontological class would have a different
|
449 |
+
elapsed time parameter that affects an inverse exponential penalization factor to mimic the
|
450 |
+
repeatability of a given item. For example, one would probably be more adept to repeat a
|
451 |
+
restaurant than a museum in the same week. So, different ontological classes have different
|
452 |
+
factors that affect the inverse exponential function, that we may call the unwillingness to repeat
|
453 |
+
function, which defines how soon a user may be willing to repeat a given item.
|
454 |
+
ie oi
|
455 |
+
useums
|
456 |
+
istory
|
457 |
+
Culture
|
458 |
+
A uatic
|
459 |
+
usic
|
460 |
+
Sailing
|
461 |
+
nder
|
462 |
+
Rural A
|
463 |
+
onume
|
464 |
+
ature
|
465 |
+
Surfing
|
466 |
+
ight ife
|
467 |
+
ealth
|
468 |
+
o ns
|
469 |
+
radition
|
470 |
+
ood
|
471 |
+
eaches
|
472 |
+
Culture
|
473 |
+
Gastron
|
474 |
+
radition
|
475 |
+
ine
|
476 |
+
ature
|
477 |
+
A
|
478 |
+
service
|
479 |
+
that
|
480 |
+
offers
|
481 |
+
A
|
482 |
+
tavern
|
483 |
+
that
|
484 |
+
serv
|
485 |
+
ne
|
486 |
+
of the
|
487 |
+
main
|
488 |
+
nigh
|
489 |
+
Surfing
|
490 |
+
lessons
|
491 |
+
Ancient
|
492 |
+
history
|
493 |
+
muse
|
494 |
+
Great
|
495 |
+
meals
|
496 |
+
that are
|
497 |
+
tasty
|
498 |
+
Rest
|
499 |
+
and
|
500 |
+
relaxa
|
501 |
+
visiting
|
502 |
+
isneyl
|
503 |
+
atch a
|
504 |
+
live
|
505 |
+
football
|
506 |
+
edieval
|
507 |
+
fair
|
508 |
+
atch a
|
509 |
+
motogp
|
510 |
+
race
|
511 |
+
drive a
|
512 |
+
|
513 |
+
racecar
|
514 |
+
ry
|
515 |
+
spearfis
|
516 |
+
a e
|
517 |
+
a trip in a
|
518 |
+
hot air
|
519 |
+
ball
|
520 |
+
Go
|
521 |
+
shopping
|
522 |
+
in our
|
523 |
+
ne
|
524 |
+
ry
|
525 |
+
scubadi
|
526 |
+
ne
|
527 |
+
day
|
528 |
+
snor
|
529 |
+
Golf
|
530 |
+
lessons
|
531 |
+
go to the
|
532 |
+
spa
|
533 |
+
ry
|
534 |
+
go arts
|
535 |
+
ith your
|
536 |
+
frien
|
537 |
+
Get a
|
538 |
+
free pint
|
539 |
+
at the
|
540 |
+
pub
|
541 |
+
atch a
|
542 |
+
Sporting
|
543 |
+
C m
|
544 |
+
atch a
|
545 |
+
S
|
546 |
+
enfica
|
547 |
+
atch a
|
548 |
+
C orto
|
549 |
+
match
|
550 |
+
Get a
|
551 |
+
voucher
|
552 |
+
for
|
553 |
+
Sep
|
554 |
+
Get a
|
555 |
+
free
|
556 |
+
pi a at
|
557 |
+
i a
|
558 |
+
iscount
|
559 |
+
for Call
|
560 |
+
atch a
|
561 |
+
live
|
562 |
+
concert
|
563 |
+
Get a
|
564 |
+
discount
|
565 |
+
for
|
566 |
+
Co
|
567 |
+
Air Spor
|
568 |
+
ature
|
569 |
+
Golf
|
570 |
+
vents
|
571 |
+
otorSp
|
572 |
+
oo
|
573 |
+
Culture
|
574 |
+
riving
|
575 |
+
Shopping
|
576 |
+
Gastron
|
577 |
+
eisure
|
578 |
+
Relaxati
|
579 |
+
eisure
|
580 |
+
ine
|
581 |
+
Adventu
|
582 |
+
Climbing
|
583 |
+
Arts An
|
584 |
+
andSc
|
585 |
+
Relaxati
|
586 |
+
Gastron
|
587 |
+
Sport R
|
588 |
+
Sport
|
589 |
+
Architect
|
590 |
+
Routes
|
591 |
+
Archeol
|
592 |
+
radition
|
593 |
+
Art us
|
594 |
+
Inland
|
595 |
+
Sports
|
596 |
+
Coastal
|
597 |
+
thnogr
|
598 |
+
rotecte
|
599 |
+
o n R
|
600 |
+
ance
|
601 |
+
onA u
|
602 |
+
ountai
|
603 |
+
opular
|
604 |
+
|
605 |
+
10
|
606 |
+
|
607 |
+
|
608 |
+
2.3
|
609 |
+
Recommender system module
|
610 |
+
The recommender system module is the main module as the name entails. This module is
|
611 |
+
constituted by a user profile manager and a preference manager, besides the recommender pool.
|
612 |
+
Concerning the recommender pool and the models that compose it, that is addressed in depth in
|
613 |
+
Section 3 of this work. Here it suffices to say that the recommender pool is the set of different
|
614 |
+
recommender models that provide user recommendations. The models create an ensemble,
|
615 |
+
when more than one is active, that provides recommendations using different techniques and
|
616 |
+
approaches.
|
617 |
+
As for the remainder of the recommender system module, the user profile and the preference
|
618 |
+
manager, these two sub-modules manage the user related information, such as item ratings and
|
619 |
+
other user feedback in the case of the former, while the latter manages the user preference
|
620 |
+
vectors and propagates the user feedback on items to update the user preference vectors
|
621 |
+
accordingly. The way this is done will become clearer in the next sections.
|
622 |
+
|
623 |
+
2.4
|
624 |
+
User interface – web app
|
625 |
+
The last component is the user interface, which in this case is a web app that connects to the
|
626 |
+
recommender system module and other modules through a real-time and batch inference
|
627 |
+
endpoints that connect to ML pipelines defined in Azure.
|
628 |
+
|
629 |
+
11
|
630 |
+
|
631 |
+
|
632 |
+
Figure 6 App mockup showing the four main screens: welcome, preference definition, home and user profile.
|
633 |
+
|
634 |
+
In the previous figure one can observe the four different screens the user sees during his App
|
635 |
+
experience. The FILTER screen is only presented to the user on the first time he logs in and is,
|
636 |
+
in essence, a series of check boxes where the user defines his preferences. These check boxes
|
637 |
+
are used to give a first estimate on the user’s preferences concerning the ontology classes. The
|
638 |
+
user’s choices define his preference vectors which then are used to make content-based
|
639 |
+
recommendations. As for the HOME screen, it shows the different recommendations made to the
|
640 |
+
user by the RS, here the user can bookmark items, book items or mar an item as “uninteresting”.
|
641 |
+
Finally, in the PROFILE screen, the user can observe his profile in terms preferences collected
|
642 |
+
and inferred by the RS as well as demographic information, such as date of birth, nationality, etc.
|
643 |
+
The different interactions the user can have with the App and the consequent interactions
|
644 |
+
between the App and the RS and back to the user are shown in Figure 7. In this figure one can
|
645 |
+
see how these interactions cascade and what the user gets back from each action he undertakes.
|
646 |
+
One can summarize the actions the user can take in the following:
|
647 |
+
•
|
648 |
+
Logging in
|
649 |
+
•
|
650 |
+
Preference input
|
651 |
+
•
|
652 |
+
Viewing recommendations
|
653 |
+
|
654 |
+
viser
|
655 |
+
Adviser
|
656 |
+
see
|
657 |
+
syone
|
658 |
+
syone
|
659 |
+
YOU'RESTAYINGHERE
|
660 |
+
Ldviser
|
661 |
+
PAULAESTEVES
|
662 |
+
Whatareyouinterested in?
|
663 |
+
Hello
|
664 |
+
HotelGoldenCrown
|
665 |
+
MUSO
|
666 |
+
Noture
|
667 |
+
Paulo Esteves
|
668 |
+
Vewpoinitsv
|
669 |
+
HOTEL
|
670 |
+
TMYPROFILE
|
671 |
+
Concerte
|
672 |
+
Le'sure
|
673 |
+
Sports
|
674 |
+
Walks
|
675 |
+
Utorciaugue,faucibusatioculisid
|
676 |
+
Nnrnec
|
677 |
+
Pauio Esteves
|
678 |
+
efficitursagittis diam.Etiom eget nunc
|
679 |
+
RestourantsV
|
680 |
+
Foutes
|
681 |
+
Cinema
|
682 |
+
DOB:
|
683 |
+
10/03/1983
|
684 |
+
acus
|
685 |
+
Adcress: Ruo Efficitur:sogittis dion,#3,1c Lisboc
|
686 |
+
Finess
|
687 |
+
Beatchv
|
688 |
+
Top:5
|
689 |
+
Foryou
|
690 |
+
Jeb:
|
691 |
+
Sorior BIAnclyst
|
692 |
+
PhasellusacportatellusVivamus
|
693 |
+
EatProhik
|
694 |
+
tempormattisultrces.Proinvitae
|
695 |
+
Tellmemore
|
696 |
+
conseouortortorguispnoretotortor
|
697 |
+
A
|
698 |
+
WHATWE'VELEARNEDABOUTYOU
|
699 |
+
SKYDIVErush
|
700 |
+
50%
|
701 |
+
Ipere
|
702 |
+
Foucbusoticcuisidetficit
|
703 |
+
LEISURE
|
704 |
+
ROUTES
|
705 |
+
EVENTS
|
706 |
+
FooFightersy
|
707 |
+
tiverpoolFo
|
708 |
+
22,
|
709 |
+
gogittis Ciam.Etiam pgetnont
|
710 |
+
locus.
|
711 |
+
Francesinho
|
712 |
+
Guzado
|
713 |
+
START
|
714 |
+
TOWNS
|
715 |
+
CULTURE
|
716 |
+
NATURE
|
717 |
+
Dive classes
|
718 |
+
VEWPOINTS
|
719 |
+
SPORTS
|
720 |
+
50%
|
721 |
+
oucibuaticcuisi,eficitur
|
722 |
+
2
|
723 |
+
18.
|
724 |
+
pogittisdigmEtiomegetmunt
|
725 |
+
I'MALSOINTO:
|
726 |
+
WELCOME
|
727 |
+
Search fortopics you/reinterestedin
|
728 |
+
Shortintroduction
|
729 |
+
回
|
730 |
+
T
|
731 |
+
Woles
|
732 |
+
Andeboly
|
733 |
+
FILTER Screen
|
734 |
+
HOME
|
735 |
+
ACTMITES
|
736 |
+
HISTORY
|
737 |
+
MYPROTRE
|
738 |
+
...
|
739 |
+
Hke
|
740 |
+
Guizodo
|
741 |
+
Collectthefirstlayer
|
742 |
+
ofinformationfrom
|
743 |
+
HOMEScreen
|
744 |
+
HONE
|
745 |
+
ACTMTES
|
746 |
+
ISTORY
|
747 |
+
MYPRORLE
|
748 |
+
the user
|
749 |
+
Present the activities
|
750 |
+
andpositions the user
|
751 |
+
PROFILEScreen
|
752 |
+
Consult andeditthe user's
|
753 |
+
information12
|
754 |
+
|
755 |
+
•
|
756 |
+
Item feedback
|
757 |
+
•
|
758 |
+
Item booking
|
759 |
+
•
|
760 |
+
Item rating
|
761 |
+
|
762 |
+
|
763 |
+
Figure 7 User-App-RS interaction. User’s various possible actions and respective interactions between the
|
764 |
+
App and the RS.
|
765 |
+
|
766 |
+
3
|
767 |
+
Recommenders and stages in RS
|
768 |
+
The recommender system module mentioned in the previous section is composed by three
|
769 |
+
components: user profile manager, preference manager and recommender pool. The two former
|
770 |
+
ones have already been covered, and in this Section, the latter will be explained in depth. The
|
771 |
+
recommender pool is composed by four recommenders of different types: content-based,
|
772 |
+
popularity-based, demographic-based and collaborative. These four recommenders are modeled
|
773 |
+
with specific algorithms or employ specific techniques and they come into play in different phases
|
774 |
+
of maturity of the RS. These phases of maturity concern amount of data, that is, number of users
|
775 |
+
|
776 |
+
Yo
|
777 |
+
RecSys
|
778 |
+
User
|
779 |
+
App
|
780 |
+
1 User's first log in
|
781 |
+
2 Asks preferences
|
782 |
+
3 Inputspreferences
|
783 |
+
4 Sendspreferences
|
784 |
+
5Returnsrecommendations
|
785 |
+
6Views recommendations
|
786 |
+
7 Gives feedback and/or
|
787 |
+
makesabooking
|
788 |
+
-
|
789 |
+
8Sendsfeedback
|
790 |
+
9 Returns updated
|
791 |
+
recommendations
|
792 |
+
10Ratesbooked item
|
793 |
+
11Sends itemrating
|
794 |
+
(First rating in system)
|
795 |
+
12Hybrid Recommender initiated
|
796 |
+
13 Returns updated
|
797 |
+
recommendations13
|
798 |
+
|
799 |
+
and rating density. Only after certain pre-specified values of users and rating density have been
|
800 |
+
reached are some of these methods activated, or in other words, are some of the phases reached.
|
801 |
+
In the following, the different phases and algorithms used are explained.
|
802 |
+
|
803 |
+
3.1
|
804 |
+
Phase 1
|
805 |
+
At the beginning, the RS is void of any ratings or users, and only items exist in the RS. When a
|
806 |
+
new user logs in for the first time, in order for the RS to make any meaningful recommendation,
|
807 |
+
some information has to be provided in the form of user preferences. This is, at this stage, the
|
808 |
+
only way to overcome cold-start issues. he user’s preferences, which are associated to the
|
809 |
+
predetermined ontology are given and used to give content-based recommendations to the user.
|
810 |
+
The user then will provide explicit and implicit feedback, in the form of booking items, bookmarking
|
811 |
+
items or explicitly indicating they don’t li e the item. This feedback is then received by the RS who
|
812 |
+
then uses the said feedbac to update the user’s preference vectors. This update originates new
|
813 |
+
recommendations to the user.
|
814 |
+
|
815 |
+
3.1.1 Preference vectors
|
816 |
+
At the core of phase 1 are the user preference vectors. These preference vectors are ontology
|
817 |
+
related and they are used to make content-based recommendations. There are three preference
|
818 |
+
vectors per user:
|
819 |
+
•
|
820 |
+
High-level preferences
|
821 |
+
•
|
822 |
+
Low-level preferences
|
823 |
+
•
|
824 |
+
Specific preferences
|
825 |
+
The high-level preferences are the ones the user identifies in the beginning and are associated
|
826 |
+
with the ontological super-classes. These classes are the most abstract classes and lower in
|
827 |
+
number. They are the first layer of ontological classes and are the ones that don’t have a parent
|
828 |
+
class and only child classes. Observing Figure 4, the Sports ontological class is an example of a
|
829 |
+
high-level preference since there is no ontology class above it.
|
830 |
+
The low-level preferences are associated to the ontological classes that link directly to the items.
|
831 |
+
These ontological classes are more specific, less abstract and in larger number. Observing Figure
|
832 |
+
4 and Figure 5, Golf is an example of a low-level preference, because two items link to it.
|
833 |
+
Finally, the specific preferences relate directly to the items, and is a vector that results from the
|
834 |
+
other two higher-level preference vectors and the user’s feedbac on the items.
|
835 |
+
The way these vectors interact is explained in the following:
|
836 |
+
|
837 |
+
14
|
838 |
+
|
839 |
+
1. The user identifies the high-level preferences when he logs in for the first time. These
|
840 |
+
preferences are propagated by way of vector multiplication with the low-level ontological
|
841 |
+
preferences.
|
842 |
+
2. The low-level preferences are then propagated to the item level by way of vector
|
843 |
+
multiplication as well, originating the specific preference vector. The items are ranked,
|
844 |
+
and a subset of the highest ranked items are recommended to the user.
|
845 |
+
3. The user gives feedback on the recommendations by either bookmarking items, booking
|
846 |
+
items or dismissing items. The feedback is propagated upwards to the higher-level
|
847 |
+
preference vectors with different intensities. The low-level preference vector is strongly
|
848 |
+
affected, while the high-level preference vector is less affected because it is higher
|
849 |
+
upstream. This sort of “trickle-up” propagation of user feedback alters both high-level and
|
850 |
+
low-level preference vectors with different magnitude.
|
851 |
+
4. New item recommendations are calculated, this time using both the high-level and low-
|
852 |
+
level preference vectors to predict whether an item should be recommended or not. The
|
853 |
+
prediction by each vector is weighed and aggregated originating an ensemble prediction
|
854 |
+
using both high and low preference vectors. The items are ranked, and a subset of the
|
855 |
+
highest ranked items are recommended to the user.
|
856 |
+
5. Repeat step 3.
|
857 |
+
|
858 |
+
3.1.2 Ontological content-based recommender
|
859 |
+
The content-based recommender is essentially vector multiplication between preference vectors
|
860 |
+
and content vectors. Content vectors are binary vectors which map one preference level to the
|
861 |
+
items content or to another preference vector content, while preference vectors show the intensity
|
862 |
+
levels of preference for each ontological category.
|
863 |
+
In step 4, the high and low preference vectors multiply with their corresponding item content vector
|
864 |
+
originating a content-based prediction. Both predictions are weighed and aggregated, and a
|
865 |
+
subset of the highest ran ed items is recommended to the user. After the user’s feedbac both
|
866 |
+
preference vectors are updated according to the “tric le-up” propagation concept introduced
|
867 |
+
above. Then, new recommendations are calculated with the new preference vectors.
|
868 |
+
|
869 |
+
3.2
|
870 |
+
Phase 2
|
871 |
+
If the user booked and used an item, he can then rate said item, which will kickstart the hybrid
|
872 |
+
recommender composed by the initial content-based recommender and the new popularity-based
|
873 |
+
appendix. This popularity-based recommender uses a so-called damped mean on every item so
|
874 |
+
that little cardinality of ratings doesn’t give an exaggerated edge of an item over another, such as
|
875 |
+
an item with a single 5-star rating having a 5-star average.
|
876 |
+
|
877 |
+
15
|
878 |
+
|
879 |
+
𝐷𝑎𝑚𝑝𝑒𝑑 𝑀𝑒𝑎𝑛𝑗 =
|
880 |
+
∑
|
881 |
+
𝑟𝑗𝑖 + 𝑘 ∙ 𝑟̿𝐺
|
882 |
+
𝑛
|
883 |
+
𝑖=1
|
884 |
+
𝑛 + 𝑘
|
885 |
+
|
886 |
+
Where 𝑟𝑗𝑖 is item j’s rating i, 𝑘 is the damping coefficient, 𝑟̿𝐺 is the global mean rating or some
|
887 |
+
other default value, and 𝑛 is the number of reviews of item j.
|
888 |
+
|
889 |
+
3.2.1 Hybrid recommender (content-based + popularity-based)
|
890 |
+
The start of the hybrid recommender marks the start of phase 2. At this point in the RS, there
|
891 |
+
aren’t many users and there aren’t many ratings. he lac in both mean that popularity-based,
|
892 |
+
demographic-based or collaborative approaches are still of little use. As more users join and more
|
893 |
+
ratings are given, other recommenders can become increasingly useful. As we reach a given
|
894 |
+
threshold of user and rating numbers we can initiate the demographic-based recommender.
|
895 |
+
The way in which the hybrid recommender uses both recommenders is by cascading ensemble.
|
896 |
+
That is, the popularity recommender pre-filters the items according to a rating threshold and then
|
897 |
+
the content-based recommender recommends items that were not eliminated by the popularity
|
898 |
+
recommender.
|
899 |
+
|
900 |
+
3.3
|
901 |
+
Phase 3
|
902 |
+
As more users are added to the RS, and as these users give feedback on recommended items,
|
903 |
+
other types of recommenders can enter the recommender pool. A first set of threshold values for
|
904 |
+
number of users and rating density is defined. When these thresholds are reached, phase 3 is
|
905 |
+
initiated with yet another recommender being added: the demographic-based recommender.
|
906 |
+
|
907 |
+
3.3.1 Demographic-based recommender
|
908 |
+
The demographic-based recommender is composed by two ML algorithms. One clustering
|
909 |
+
algorithm and one classification algorithm. The clustering algorithm has the purpose of identifying
|
910 |
+
clusters of similar users based on their demographic features. he user’s demographic features
|
911 |
+
can be age, region/country, group composition, budget, academic degree, etc. These features
|
912 |
+
can be a mix of numerical, ordinal and nominal features and so a clustering algorithm that can
|
913 |
+
handle different data types is necessary. After the clustering has been performed, and the users
|
914 |
+
are all organized in clusters, a classification algorithm is used to predict whether a user will enjoy
|
915 |
+
each item based on the item feedback of other users in the same cluster.
|
916 |
+
For clustering, the algorithm employed was K-Prototypes, which works similarly to K-Means but
|
917 |
+
can deal with mixed data types, particularly ordinal and nominal data. To define the clustering
|
918 |
+
model, a knee region identifier is employed to automatically identify the optimal (or close to
|
919 |
+
|
920 |
+
16
|
921 |
+
|
922 |
+
optimal) number of clusters. The clustering model is retrained from time to time when sufficient
|
923 |
+
new users have been added since the last model fitting.
|
924 |
+
For classification a k-Nearest Neighbor algorithm, or kNN, was employed. Here, the users from
|
925 |
+
the same cluster are used to predict whether a given user will enjoy the items, based on those
|
926 |
+
users’ feedbac . he uses a custom distance metric that ta es into account both Jaccard
|
927 |
+
and Manhattan distance metrics for the ordinal and nominal features. The kNN than weighs the
|
928 |
+
opinion of the other users inversely proportional to their distance to the user to whom the
|
929 |
+
predictions are being made. The predictions given by this algorithm are weighed and added to
|
930 |
+
the predictions made by the hybrid recommender.
|
931 |
+
|
932 |
+
3.4
|
933 |
+
Phase 4
|
934 |
+
In phase 4, collaborative filtering is added to the pool. As it happens with phase 3, the entry into
|
935 |
+
phase 4 takes place when thresholds of user cardinality and rating density are reached. Once this
|
936 |
+
happens the collaborative filtering model is fitted and starts giving recommendations. The
|
937 |
+
algorithm used for collaborative filtering is a Field-Aware Factorization Machine (FFM), which has
|
938 |
+
already been introduced in Section 1. In the following sub-section, the FFM application is
|
939 |
+
explained in more detail.
|
940 |
+
|
941 |
+
3.4.1 Collaborative filtering with Field-Aware Factorization Machines (FFM)
|
942 |
+
To use FFMs, a specific Python library (xLearn) is used and the data also has to be transformed
|
943 |
+
into a specific format. A sample of a dataset in said format is shown in the following table.
|
944 |
+
|
945 |
+
Table 1 Dataset in the FFM format where each column represents a feature, except for column 0 which
|
946 |
+
represents the labels.
|
947 |
+
|
948 |
+
0
|
949 |
+
1
|
950 |
+
2
|
951 |
+
3
|
952 |
+
4
|
953 |
+
5
|
954 |
+
6
|
955 |
+
7
|
956 |
+
8
|
957 |
+
9
|
958 |
+
0
|
959 |
+
0
|
960 |
+
0:1:1
|
961 |
+
1:2:1
|
962 |
+
2:3:1
|
963 |
+
3:4:1
|
964 |
+
4:5:1
|
965 |
+
5:6:1
|
966 |
+
6:7:1
|
967 |
+
7:8:1
|
968 |
+
8:9:1
|
969 |
+
1
|
970 |
+
1
|
971 |
+
0:10:1
|
972 |
+
1:2:1
|
973 |
+
2:11:1
|
974 |
+
3:4:1
|
975 |
+
4:5:1
|
976 |
+
5:6:1
|
977 |
+
6:12:1
|
978 |
+
7:13:1
|
979 |
+
8:14:1
|
980 |
+
2
|
981 |
+
0
|
982 |
+
0:15:1
|
983 |
+
1:16:1
|
984 |
+
2:3:1
|
985 |
+
3:4:1
|
986 |
+
4:17:1
|
987 |
+
5:6:1
|
988 |
+
6:18:1
|
989 |
+
7:19:1
|
990 |
+
8:20:1
|
991 |
+
3
|
992 |
+
1
|
993 |
+
0:15:1
|
994 |
+
1:2:1
|
995 |
+
2:21:1
|
996 |
+
3:22:1
|
997 |
+
4:17:1
|
998 |
+
5:6:1
|
999 |
+
6:23:1
|
1000 |
+
7:8:1
|
1001 |
+
8:24:1
|
1002 |
+
4
|
1003 |
+
1
|
1004 |
+
0:10:1
|
1005 |
+
1:16:1
|
1006 |
+
2:3:1
|
1007 |
+
3:4:1
|
1008 |
+
4:17:1
|
1009 |
+
5:25:1
|
1010 |
+
6:23:1
|
1011 |
+
7:26:1
|
1012 |
+
8:27:1
|
1013 |
+
...
|
1014 |
+
...
|
1015 |
+
...
|
1016 |
+
...
|
1017 |
+
...
|
1018 |
+
...
|
1019 |
+
...
|
1020 |
+
...
|
1021 |
+
...
|
1022 |
+
...
|
1023 |
+
...
|
1024 |
+
686422
|
1025 |
+
1
|
1026 |
+
0:1:1
|
1027 |
+
1:2:1
|
1028 |
+
2:3:1
|
1029 |
+
3:4:1
|
1030 |
+
4:17:1
|
1031 |
+
5:25:1
|
1032 |
+
6:23:1
|
1033 |
+
7:8:1
|
1034 |
+
8:37:1
|
1035 |
+
686423
|
1036 |
+
1
|
1037 |
+
0:34:1
|
1038 |
+
1:2:1
|
1039 |
+
2:21:1
|
1040 |
+
3:4:1
|
1041 |
+
4:5:1
|
1042 |
+
5:25:1
|
1043 |
+
6:35:1
|
1044 |
+
7:8:1
|
1045 |
+
8:36:1
|
1046 |
+
686424
|
1047 |
+
1
|
1048 |
+
0:10:1
|
1049 |
+
1:16:1
|
1050 |
+
2:3:1
|
1051 |
+
3:4:1
|
1052 |
+
4:17:1
|
1053 |
+
5:25:1
|
1054 |
+
6:18:1
|
1055 |
+
7:8:1
|
1056 |
+
8:24:1
|
1057 |
+
686425
|
1058 |
+
1
|
1059 |
+
0:34:1
|
1060 |
+
1:16:1
|
1061 |
+
2:21:1
|
1062 |
+
3:22:1
|
1063 |
+
4:17:1
|
1064 |
+
5:25:1
|
1065 |
+
6:50:1
|
1066 |
+
7:13:1
|
1067 |
+
8:49:1
|
1068 |
+
686426
|
1069 |
+
1
|
1070 |
+
0:15:1
|
1071 |
+
1:2:1
|
1072 |
+
2:3:1
|
1073 |
+
3:4:1
|
1074 |
+
4:17:1
|
1075 |
+
5:6:1
|
1076 |
+
6:23:1
|
1077 |
+
7:8:1
|
1078 |
+
8:44:1
|
1079 |
+
|
1080 |
+
|
1081 |
+
17
|
1082 |
+
|
1083 |
+
This format is more complex than that for the Standard FM. This is due to the more complex
|
1084 |
+
information that is ingested by the FFM which uses information about the fields to define the latent
|
1085 |
+
vectors. That is, while in FMs each feature (field) has one latent vector, in FFMs this single
|
1086 |
+
representation is broken down into multiple latent vectors, one to represent each other field.
|
1087 |
+
𝑦̂(𝑥) ∶= 𝜔0 + ∑ 𝜔𝑖𝑥𝑖 + ∑ ∑ 〈𝕧𝑖, 𝕧𝑗〉𝑥𝑖𝑥𝑗
|
1088 |
+
𝑛
|
1089 |
+
𝑗=𝑖+1
|
1090 |
+
𝑛
|
1091 |
+
𝑖=1
|
1092 |
+
𝑛
|
1093 |
+
𝑖=1
|
1094 |
+
|
1095 |
+
In the equation that represents the FM, which is shown above, the feature interactions
|
1096 |
+
represented by 〈𝕧𝑖, 𝕧𝑗〉 would correspond to the following in our case scenario (user
|
1097 |
+
demographic features):
|
1098 |
+
𝑣𝑚𝑎𝑙𝑒 ∙ 𝑣𝑏𝑙𝑢𝑒𝑐𝑜𝑙𝑙𝑎𝑟 + 𝑣𝑚𝑎𝑙𝑒 ∙ 𝑣𝑙𝑜𝑤𝑏𝑢𝑑𝑔𝑒𝑡 + 𝑣𝑚𝑎𝑙𝑒 ∙ 𝑣𝑛𝑜𝑟𝑡ℎ𝑒𝑢𝑟𝑜𝑝𝑒 + ⋯
|
1099 |
+
That is, the male latent vector that multiplies with each other latent vector is the same. The idea
|
1100 |
+
behind FFM is that the weight of the male latent vector might not be the same when multiplying
|
1101 |
+
with the job latent vectors as they are with the budget latent vectors, and so on. Thus, in the FFM,
|
1102 |
+
the latent vectors are field-aware, which results in the following:
|
1103 |
+
𝑣𝑚𝑎𝑙𝑒,𝑗𝑜𝑏 ∙ 𝑣𝑏𝑙𝑢𝑒𝑐𝑜𝑙𝑙𝑎𝑟,𝑔𝑒𝑛𝑑𝑒𝑟 + 𝑣𝑚𝑎𝑙𝑒,𝑏𝑢𝑑𝑔𝑒𝑡 ∙ 𝑣𝑙𝑜𝑤𝑏𝑢𝑑𝑔𝑒𝑡,𝑔𝑒𝑛𝑑𝑒𝑟 + 𝑣𝑚𝑎𝑙𝑒,𝑟𝑒𝑔𝑖𝑜𝑛 ∙ 𝑣𝑛𝑜𝑟𝑡ℎ𝑒𝑢𝑟𝑜𝑝𝑒,𝑔𝑒𝑛𝑑𝑒𝑟 + ⋯
|
1104 |
+
|
1105 |
+
Besides demographic features, as is shown in this example, the latent-vectors can also easily
|
1106 |
+
incorporate item features as well as contextual features and can thus integrate context-awareness
|
1107 |
+
in a deeper sense than simple contextual pre-filtering or post-filtering.
|
1108 |
+
The FFM model represents the last phase addition to the recommender pool. The predictions
|
1109 |
+
attained from it are weighed and then aggregated with the predictions given by the other two, the
|
1110 |
+
hybrid and the demographic recommender. The weighs given to each recommender may be set
|
1111 |
+
to change over time so that it accompanies the maturity and complexity of each of the
|
1112 |
+
recommenders in the pool, thus giving progressively larger weight to the FFM as more users and
|
1113 |
+
more ratings are added to the system.
|
1114 |
+
|
1115 |
+
18
|
1116 |
+
|
1117 |
+
|
1118 |
+
Figure 8 Diagram of the various RS phases and interactions between RS and Data Repository (DB)
|
1119 |
+
components.
|
1120 |
+
|
1121 |
+
V1.0
|
1122 |
+
4
|
1123 |
+
Phase1
|
1124 |
+
11
|
1125 |
+
Phase2
|
1126 |
+
Phase3
|
1127 |
+
5
|
1128 |
+
Phase4
|
1129 |
+
8&9
|
1130 |
+
Newuserlogsn
|
1131 |
+
Feedback
|
1132 |
+
Seneretes reApp
|
1133 |
+
Gets user from UserDB
|
1134 |
+
receivedfromAp
|
1135 |
+
toApp
|
1136 |
+
12
|
1137 |
+
Hybridt Rec ir
|
1138 |
+
Updates ontology
|
1139 |
+
Step4.5.8.9811repealedh
|
1140 |
+
AftermanyStep
|
1141 |
+
A4.5.8.9&11
|
1142 |
+
AnerSlep4
|
1143 |
+
589811
|
1144 |
+
GeneratestreAs
|
1145 |
+
Serenerates recs
|
1146 |
+
Phase3beqins.
|
1147 |
+
all threemodeis.
|
1148 |
+
Shokds for FFM reiraining
|
1149 |
+
en
|
1150 |
+
Rec initiated
|
1151 |
+
Recalculate clusters
|
1152 |
+
Demog ec cotnue
|
1153 |
+
Recaculalecstrs
|
1154 |
+
Clusters defined
|
1155 |
+
Collab
|
1156 |
+
FFMiniliated
|
1157 |
+
RetrainFFM.
|
1158 |
+
Sends ue lo ecomnender
|
1159 |
+
Ses ies to GraoDB.
|
1160 |
+
GraphDB
|
1161 |
+
Sennsertsilemontology
|
1162 |
+
SsnsertsnewiteminOntoloy19
|
1163 |
+
|
1164 |
+
4
|
1165 |
+
Recommender system - Case study (CS) with synthetic data
|
1166 |
+
One of the main challenges in designing the recommender system proposed in this work was the
|
1167 |
+
lack of data to perform any type of experiment or even just to aid and inspire in the definition of
|
1168 |
+
the algorithms to employ. The lack of data was absolute, both on the side of the items as on the
|
1169 |
+
side of the users and preferences. The main issue is the non-existence of a dataset with user
|
1170 |
+
demographic features and user preferences, since such a dataset would allow to overcome some
|
1171 |
+
of the cold-start issues as well as give some idea of the data schema to be adopted.
|
1172 |
+
As a result, and since no public datasets were found that could overcome this hinderance, the
|
1173 |
+
decision was made to generate a synthetic dataset. The generated dataset was done so by using
|
1174 |
+
many different techniques from gaussian copulas to fuzzy logic. Further information on that work
|
1175 |
+
will be available in another paper by the author Camacho, VT. In the following sub-section, the
|
1176 |
+
synthetic data employed in this or ’s case study is presented.
|
1177 |
+
Besides the synthetic data, a set of metrics was chosen to get an idea about the quality of the
|
1178 |
+
results from the recommenders. Traditional ML metrics are not always adequate for RS, mainly
|
1179 |
+
because, by principle, the objective of an RS is not to emulate exactly the choices of a given user
|
1180 |
+
since, if that were the case, there ouldn’t be a need for an RS in the first place. In the metrics
|
1181 |
+
sub-section, the set of used metrics is presented.
|
1182 |
+
The remainder of this section is applying the recommenders introduced in the previous section
|
1183 |
+
and testing them with different amounts of data which will attempt to emulate the data present at
|
1184 |
+
the different phases.
|
1185 |
+
|
1186 |
+
4.1
|
1187 |
+
Synthetic data
|
1188 |
+
In the work mentioned above, a methodology for the generation of synthetic datasets for
|
1189 |
+
recommender systems is presented, thus allowing to overcome the obstacle of not having quality
|
1190 |
+
data in sufficient amount (or even at all) readily available. The difficulties that are associated with
|
1191 |
+
this task are essentially the definition of a dataset with multiple datatypes, such as numerical
|
1192 |
+
(continuous), ordinal and nominal, and with different levels of correlation among the data, as well
|
1193 |
+
as the definition of user-ratings based on well-defined latent user preferences. To overcome this,
|
1194 |
+
a methodology was devised where several different techniques are employed in sequence to
|
1195 |
+
create the datasets concerning user characteristics, item properties, item categories and latent
|
1196 |
+
user preferences associated to user and item features, and as a result, a user-item sparse ratings
|
1197 |
+
matrix. The output of the methodology is:
|
1198 |
+
1) Item dataset with item names and categories.
|
1199 |
+
2) User dataset with user characteristics (demographic features).
|
1200 |
+
3) User-item sparse ratings matrix.
|
1201 |
+
|
1202 |
+
20
|
1203 |
+
|
1204 |
+
4) Latent preferences and Multinomial Logit model to compare with the outputs of the
|
1205 |
+
Recommender System.
|
1206 |
+
|
1207 |
+
4.1.1 Data Schema
|
1208 |
+
From the output presented above, we can see 4 DataFrames with different information. These
|
1209 |
+
DataFrames each have their own schema and have features from different data types. In the
|
1210 |
+
following, the created DataFrames are introduced:
|
1211 |
+
•
|
1212 |
+
Demographic Features
|
1213 |
+
•
|
1214 |
+
Preferences
|
1215 |
+
•
|
1216 |
+
Item Features
|
1217 |
+
•
|
1218 |
+
User Ratings
|
1219 |
+
Going into more detail regarding the user demographic features DataFrame:
|
1220 |
+
•
|
1221 |
+
Demographic Features:
|
1222 |
+
o
|
1223 |
+
User ID
|
1224 |
+
o
|
1225 |
+
Age
|
1226 |
+
o
|
1227 |
+
Gender
|
1228 |
+
o
|
1229 |
+
Job
|
1230 |
+
o
|
1231 |
+
Academic Degree
|
1232 |
+
o
|
1233 |
+
Budget
|
1234 |
+
o
|
1235 |
+
Country/Region
|
1236 |
+
o
|
1237 |
+
Group Composition
|
1238 |
+
o
|
1239 |
+
Accommodation
|
1240 |
+
Concerning the type of feature, they can be divided essentially into three groups: numerical,
|
1241 |
+
categorical ordinal and categorical nominal. Concerning numerical and categorical ordinal
|
1242 |
+
features, we have the following:
|
1243 |
+
•
|
1244 |
+
Numerical
|
1245 |
+
o
|
1246 |
+
Age – numerical (can be transformed into age bins)
|
1247 |
+
•
|
1248 |
+
Ordinal:
|
1249 |
+
o
|
1250 |
+
Age bins = ['18-30','31-40', '41-50', '51-60', '60+']
|
1251 |
+
o
|
1252 |
+
Academic Degree = ['None', 'High School', 'Some College', 'College Degree']
|
1253 |
+
o
|
1254 |
+
Budget = ['Low', 'Mid', 'High']
|
1255 |
+
o
|
1256 |
+
Accommodation = ['Single', 'Double', 'Suite', 'Villa']
|
1257 |
+
As for categorical nominal features, the following were modelled:
|
1258 |
+
•
|
1259 |
+
Gender = ['Male', 'Female']
|
1260 |
+
•
|
1261 |
+
Job = ['Blue Collar', 'White Collar']
|
1262 |
+
|
1263 |
+
21
|
1264 |
+
|
1265 |
+
•
|
1266 |
+
Country/Region = ['South Europe', 'North Europe', 'East Europe', 'North America', 'South
|
1267 |
+
America', 'Asia', 'Africa', 'Middle East']
|
1268 |
+
•
|
1269 |
+
Group Composition = ['1 Adult', '2 Adults', '2 Adults + Child', 'Group of Friends']
|
1270 |
+
|
1271 |
+
4.1.2 Samples of the generated DataFrames
|
1272 |
+
The resulting DataFrames (DF) can be used to train and test RS. In the case of the present work,
|
1273 |
+
they are used to simulate the different phases of data availability, thus testing the recommenders
|
1274 |
+
employed in each of the four phases. In the following, samples of the generated DFs are
|
1275 |
+
presented. The first sample shown is the User DF in Table 2. This DF is composed by the user
|
1276 |
+
demographic features and UserID. The demographic features are ordinal (Age, AcDeg, Budget,
|
1277 |
+
Accom) and nominal (Gender, Job, Region, GroupComp). The entire set of users created has
|
1278 |
+
cardinality of 100,000.
|
1279 |
+
|
1280 |
+
Table 2 User DF composed by the demographic features of the users.
|
1281 |
+
UserID
|
1282 |
+
Age
|
1283 |
+
AcDeg
|
1284 |
+
Budget
|
1285 |
+
Accom
|
1286 |
+
Gender
|
1287 |
+
Job
|
1288 |
+
Region
|
1289 |
+
GroupComp
|
1290 |
+
0
|
1291 |
+
4
|
1292 |
+
2
|
1293 |
+
1
|
1294 |
+
2
|
1295 |
+
Female
|
1296 |
+
blue collar
|
1297 |
+
North Europe
|
1298 |
+
2Adlt
|
1299 |
+
1
|
1300 |
+
5
|
1301 |
+
4
|
1302 |
+
2
|
1303 |
+
3
|
1304 |
+
Male
|
1305 |
+
white collar
|
1306 |
+
North Europe
|
1307 |
+
GrpFriends
|
1308 |
+
2
|
1309 |
+
3
|
1310 |
+
3
|
1311 |
+
2
|
1312 |
+
2
|
1313 |
+
Female
|
1314 |
+
blue collar
|
1315 |
+
North Europe
|
1316 |
+
2Adlt+Child
|
1317 |
+
3
|
1318 |
+
4
|
1319 |
+
4
|
1320 |
+
2
|
1321 |
+
2
|
1322 |
+
Female
|
1323 |
+
white collar
|
1324 |
+
North Europe
|
1325 |
+
2Adlt+Child
|
1326 |
+
4
|
1327 |
+
3
|
1328 |
+
3
|
1329 |
+
2
|
1330 |
+
3
|
1331 |
+
Female
|
1332 |
+
white collar
|
1333 |
+
South Europe
|
1334 |
+
2Adlt
|
1335 |
+
...
|
1336 |
+
...
|
1337 |
+
...
|
1338 |
+
...
|
1339 |
+
...
|
1340 |
+
...
|
1341 |
+
...
|
1342 |
+
...
|
1343 |
+
...
|
1344 |
+
99995
|
1345 |
+
4
|
1346 |
+
4
|
1347 |
+
2
|
1348 |
+
2
|
1349 |
+
Female
|
1350 |
+
white collar
|
1351 |
+
North Europe
|
1352 |
+
2Adlt+Child
|
1353 |
+
99996
|
1354 |
+
3
|
1355 |
+
4
|
1356 |
+
3
|
1357 |
+
2
|
1358 |
+
Male
|
1359 |
+
white collar
|
1360 |
+
Asia
|
1361 |
+
2Adlt+Child
|
1362 |
+
99997
|
1363 |
+
1
|
1364 |
+
1
|
1365 |
+
1
|
1366 |
+
1
|
1367 |
+
Female
|
1368 |
+
blue collar
|
1369 |
+
South Europe
|
1370 |
+
2Adlt
|
1371 |
+
99998
|
1372 |
+
1
|
1373 |
+
3
|
1374 |
+
1
|
1375 |
+
2
|
1376 |
+
Female
|
1377 |
+
blue collar
|
1378 |
+
South Europe
|
1379 |
+
2Adlt+Child
|
1380 |
+
99999
|
1381 |
+
4
|
1382 |
+
3
|
1383 |
+
2
|
1384 |
+
2
|
1385 |
+
Male
|
1386 |
+
blue collar
|
1387 |
+
North America
|
1388 |
+
2Adlt+Child
|
1389 |
+
|
1390 |
+
The second DF is the User-Preference DF which contains the latent preferences and is presented
|
1391 |
+
in Table 3. These latent preferences are related to the ontology classes. The latent preferences
|
1392 |
+
of each user were modeled through a multinomial logit model based on their demographic
|
1393 |
+
features. This DF shows the relative interest of a given user in a given preference category versus
|
1394 |
+
any other preference category. The values between different users are not comparable.
|
1395 |
+
|
1396 |
+
Table 3 User-Preference DF containing the latent preferences from the Multinomial Logit model.
|
1397 |
+
UserID
|
1398 |
+
Beach
|
1399 |
+
Relax
|
1400 |
+
Shop
|
1401 |
+
Nightlife
|
1402 |
+
Theme park
|
1403 |
+
Gastro
|
1404 |
+
Sports
|
1405 |
+
Culture
|
1406 |
+
Nature
|
1407 |
+
Events
|
1408 |
+
|
1409 |
+
22
|
1410 |
+
|
1411 |
+
0
|
1412 |
+
0 .408
|
1413 |
+
0 .026
|
1414 |
+
0 .020
|
1415 |
+
0 .041
|
1416 |
+
0 .002
|
1417 |
+
0 .002
|
1418 |
+
0 .004
|
1419 |
+
0 .009
|
1420 |
+
0 .487
|
1421 |
+
0 .002
|
1422 |
+
1
|
1423 |
+
0 .002
|
1424 |
+
0 .077
|
1425 |
+
0 .017
|
1426 |
+
0 .015
|
1427 |
+
0 .009
|
1428 |
+
0 .457
|
1429 |
+
0 .041
|
1430 |
+
0 .271
|
1431 |
+
0 .107
|
1432 |
+
0 .002
|
1433 |
+
2
|
1434 |
+
0 .554
|
1435 |
+
0 .156
|
1436 |
+
0 .039
|
1437 |
+
0 .041
|
1438 |
+
0 .027
|
1439 |
+
0 .010
|
1440 |
+
0 .021
|
1441 |
+
0 .015
|
1442 |
+
0 .135
|
1443 |
+
0 .003
|
1444 |
+
3
|
1445 |
+
0 .005
|
1446 |
+
0 .038
|
1447 |
+
0 .012
|
1448 |
+
0 .000
|
1449 |
+
0 .003
|
1450 |
+
0 .252
|
1451 |
+
0 .003
|
1452 |
+
0 .674
|
1453 |
+
0 .009
|
1454 |
+
0 .002
|
1455 |
+
4
|
1456 |
+
0 .002
|
1457 |
+
0 .229
|
1458 |
+
0 .003
|
1459 |
+
0 .001
|
1460 |
+
0 .000
|
1461 |
+
0 .137
|
1462 |
+
0 .001
|
1463 |
+
0 .623
|
1464 |
+
0 .000
|
1465 |
+
0 .002
|
1466 |
+
...
|
1467 |
+
. . .
|
1468 |
+
. . .
|
1469 |
+
. . .
|
1470 |
+
. . .
|
1471 |
+
. . .
|
1472 |
+
. . .
|
1473 |
+
. . .
|
1474 |
+
. . .
|
1475 |
+
. . .
|
1476 |
+
. . .
|
1477 |
+
99995
|
1478 |
+
0 .003
|
1479 |
+
0 .106
|
1480 |
+
0 .202
|
1481 |
+
0 .000
|
1482 |
+
0 .020
|
1483 |
+
0 .115
|
1484 |
+
0 .005
|
1485 |
+
0 .202
|
1486 |
+
0 .337
|
1487 |
+
0 .010
|
1488 |
+
99996
|
1489 |
+
0 .001
|
1490 |
+
0 .127
|
1491 |
+
0 .064
|
1492 |
+
0 .000
|
1493 |
+
0 .002
|
1494 |
+
0 .034
|
1495 |
+
0 .001
|
1496 |
+
0 .750
|
1497 |
+
0 .016
|
1498 |
+
0 .005
|
1499 |
+
99997
|
1500 |
+
0 .050
|
1501 |
+
0 .285
|
1502 |
+
0 .030
|
1503 |
+
0 .337
|
1504 |
+
0 .110
|
1505 |
+
0 .006
|
1506 |
+
0 .091
|
1507 |
+
0 .019
|
1508 |
+
0 .015
|
1509 |
+
0 .057
|
1510 |
+
99998
|
1511 |
+
0 .031
|
1512 |
+
0 .712
|
1513 |
+
0 .007
|
1514 |
+
0 .083
|
1515 |
+
0 .103
|
1516 |
+
0 .004
|
1517 |
+
0 .021
|
1518 |
+
0 .027
|
1519 |
+
0 .006
|
1520 |
+
0 .007
|
1521 |
+
99999
|
1522 |
+
0 .005
|
1523 |
+
0 .880
|
1524 |
+
0 .064
|
1525 |
+
0 .000
|
1526 |
+
0 .035
|
1527 |
+
0 .000
|
1528 |
+
0 .009
|
1529 |
+
0 .003
|
1530 |
+
0 .002
|
1531 |
+
0 .003
|
1532 |
+
|
1533 |
+
The third DF sample presented is the Item DF in Table 4. Here a set of 29 items were included
|
1534 |
+
belonging to different categories which are the user latent preferences presented in the previous
|
1535 |
+
table.
|
1536 |
+
|
1537 |
+
Table 4 Item DF with corresponding item category (ontology and latent preferences).
|
1538 |
+
itemID
|
1539 |
+
Item Name
|
1540 |
+
Category
|
1541 |
+
0
|
1542 |
+
A service that offers you the opportunity to
|
1543 |
+
do bungee-jumping
|
1544 |
+
['Leisure', 'Sports', 'Routes', 'Events',
|
1545 |
+
'Nature']
|
1546 |
+
1
|
1547 |
+
A tavern that serves traditional food
|
1548 |
+
['Leisure', 'Events', 'Culture', 'Towns']
|
1549 |
+
2
|
1550 |
+
Ancient history museum
|
1551 |
+
['Culture', 'ViewPoints', 'Events',
|
1552 |
+
'Nature', 'Routes', 'Towns']
|
1553 |
+
3
|
1554 |
+
Discount for Callaway clubs
|
1555 |
+
['Sports']
|
1556 |
+
4
|
1557 |
+
Get a discount for Comic-Con
|
1558 |
+
['Sports']
|
1559 |
+
5
|
1560 |
+
Get a free pint at the pub
|
1561 |
+
['Events', 'Leisure']
|
1562 |
+
6
|
1563 |
+
Get a free pizza at Pizza Hut
|
1564 |
+
['Leisure']
|
1565 |
+
7
|
1566 |
+
Get a voucher for Sephora
|
1567 |
+
['Leisure']
|
1568 |
+
8
|
1569 |
+
Go shopping in our new mall
|
1570 |
+
['Leisure']
|
1571 |
+
9
|
1572 |
+
Golf lessons
|
1573 |
+
['Sports', 'Leisure', 'Events']
|
1574 |
+
|
1575 |
+
23
|
1576 |
+
|
1577 |
+
10
|
1578 |
+
Great meals that are tasty
|
1579 |
+
['Leisure', 'Events']
|
1580 |
+
11
|
1581 |
+
Medieval fair
|
1582 |
+
['Culture', 'Events', 'Nature', 'Towns']
|
1583 |
+
12
|
1584 |
+
One day snorkeling with the fish
|
1585 |
+
['Sports', 'Leisure', 'Nature']
|
1586 |
+
13
|
1587 |
+
One of the main nightclubs in the city
|
1588 |
+
['Culture', 'Events', 'Nature', 'Leisure',
|
1589 |
+
'Routes', 'Towns']
|
1590 |
+
14
|
1591 |
+
Rest and relaxation at the spa
|
1592 |
+
['Leisure', 'Routes']
|
1593 |
+
15
|
1594 |
+
Surfing lessons
|
1595 |
+
['Sports']
|
1596 |
+
16
|
1597 |
+
Take a trip in a hot-air balloon
|
1598 |
+
['Sports']
|
1599 |
+
17
|
1600 |
+
Try go-karts with your friends
|
1601 |
+
['Sports']
|
1602 |
+
18
|
1603 |
+
Try scubadiving
|
1604 |
+
['Sports']
|
1605 |
+
19
|
1606 |
+
Try spearfishing with a pro
|
1607 |
+
['Sports']
|
1608 |
+
20
|
1609 |
+
Watch a FC Porto match
|
1610 |
+
['Events', 'Sports']
|
1611 |
+
21
|
1612 |
+
Watch a SL Benfica match
|
1613 |
+
['Events', 'Sports']
|
1614 |
+
22
|
1615 |
+
Watch a Sporting CP match
|
1616 |
+
['Sports', 'Events']
|
1617 |
+
23
|
1618 |
+
Watch a live concert of Mastodon
|
1619 |
+
['Events']
|
1620 |
+
24
|
1621 |
+
Watch a live football match
|
1622 |
+
['Sports', 'Events']
|
1623 |
+
25
|
1624 |
+
Watch a motogp race
|
1625 |
+
['Events', 'Sports']
|
1626 |
+
26
|
1627 |
+
drive a F1 racecar
|
1628 |
+
['Sports']
|
1629 |
+
27
|
1630 |
+
go to the spa
|
1631 |
+
['Leisure']
|
1632 |
+
28
|
1633 |
+
visiting Disneyland
|
1634 |
+
['Leisure']
|
1635 |
+
|
1636 |
+
The last data sample is the result of an external product between the user preferences from the
|
1637 |
+
multinomial logit model and the item DF. The result is the input of a Fuzzy Inference System,
|
1638 |
+
which along with other implicit information on user and items returns the User-Item ratings DF, a
|
1639 |
+
sample of which is shown in Table 5.
|
1640 |
+
|
1641 |
+
Table 5 User-Item ratings DF.
|
1642 |
+
|
1643 |
+
0
|
1644 |
+
1
|
1645 |
+
2
|
1646 |
+
3
|
1647 |
+
4
|
1648 |
+
5
|
1649 |
+
…
|
1650 |
+
23
|
1651 |
+
24
|
1652 |
+
25
|
1653 |
+
26
|
1654 |
+
27
|
1655 |
+
28
|
1656 |
+
|
1657 |
+
userId
|
1658 |
+
|
1659 |
+
|
1660 |
+
|
1661 |
+
|
1662 |
+
|
1663 |
+
|
1664 |
+
|
1665 |
+
|
1666 |
+
|
1667 |
+
|
1668 |
+
|
1669 |
+
|
1670 |
+
|
1671 |
+
|
1672 |
+
0
|
1673 |
+
1.41
|
1674 |
+
0.00
|
1675 |
+
1.87
|
1676 |
+
0.00
|
1677 |
+
3.21
|
1678 |
+
0.00
|
1679 |
+
…
|
1680 |
+
0.00
|
1681 |
+
1.79
|
1682 |
+
0.00
|
1683 |
+
1.79
|
1684 |
+
2.96
|
1685 |
+
0.00
|
1686 |
+
|
1687 |
+
1
|
1688 |
+
0.00
|
1689 |
+
4.63
|
1690 |
+
1.77
|
1691 |
+
1.26
|
1692 |
+
0.00
|
1693 |
+
0.00
|
1694 |
+
…
|
1695 |
+
0.00
|
1696 |
+
0.00
|
1697 |
+
4.06
|
1698 |
+
0.00
|
1699 |
+
2.21
|
1700 |
+
1.77
|
1701 |
+
2
|
1702 |
+
0.00
|
1703 |
+
0.00
|
1704 |
+
0.00
|
1705 |
+
2.10
|
1706 |
+
3.20
|
1707 |
+
2.38
|
1708 |
+
…
|
1709 |
+
3.48
|
1710 |
+
0.00
|
1711 |
+
0.00
|
1712 |
+
0.00
|
1713 |
+
0.00
|
1714 |
+
0.00
|
1715 |
+
|
1716 |
+
3
|
1717 |
+
0.00
|
1718 |
+
3.12
|
1719 |
+
0.00
|
1720 |
+
0.00
|
1721 |
+
3.28
|
1722 |
+
2.89
|
1723 |
+
…
|
1724 |
+
0.00
|
1725 |
+
2.22
|
1726 |
+
0.00
|
1727 |
+
0.00
|
1728 |
+
0.00
|
1729 |
+
0.00
|
1730 |
+
|
1731 |
+
4
|
1732 |
+
1.37
|
1733 |
+
0.00
|
1734 |
+
2.31
|
1735 |
+
1.63
|
1736 |
+
0.00
|
1737 |
+
0.00
|
1738 |
+
…
|
1739 |
+
3.31
|
1740 |
+
2.30
|
1741 |
+
0.00
|
1742 |
+
0.00
|
1743 |
+
0.00
|
1744 |
+
0.00
|
1745 |
+
|
1746 |
+
…
|
1747 |
+
…
|
1748 |
+
…
|
1749 |
+
…
|
1750 |
+
…
|
1751 |
+
…
|
1752 |
+
…
|
1753 |
+
…
|
1754 |
+
|
1755 |
+
…
|
1756 |
+
…
|
1757 |
+
…
|
1758 |
+
…
|
1759 |
+
…
|
1760 |
+
…
|
1761 |
+
99995
|
1762 |
+
0.00
|
1763 |
+
0.00
|
1764 |
+
0.00
|
1765 |
+
1.21
|
1766 |
+
3.42
|
1767 |
+
0.00
|
1768 |
+
…
|
1769 |
+
0.00
|
1770 |
+
3.84
|
1771 |
+
3.79
|
1772 |
+
0.00
|
1773 |
+
3.36
|
1774 |
+
0.00
|
1775 |
+
|
1776 |
+
99996
|
1777 |
+
1.46
|
1778 |
+
0.00
|
1779 |
+
0.00
|
1780 |
+
0.00
|
1781 |
+
2.31
|
1782 |
+
0.00
|
1783 |
+
…
|
1784 |
+
2.31
|
1785 |
+
0.00
|
1786 |
+
0.00
|
1787 |
+
0.00
|
1788 |
+
0.00
|
1789 |
+
1.39
|
1790 |
+
99997
|
1791 |
+
1.47
|
1792 |
+
0.00
|
1793 |
+
0.00
|
1794 |
+
1.32
|
1795 |
+
2.74
|
1796 |
+
0.00
|
1797 |
+
….
|
1798 |
+
0.00
|
1799 |
+
0.00
|
1800 |
+
2.29
|
1801 |
+
0.00
|
1802 |
+
0.00
|
1803 |
+
0.00
|
1804 |
+
|
1805 |
+
99998
|
1806 |
+
0.00
|
1807 |
+
4.64
|
1808 |
+
4.11
|
1809 |
+
1.78
|
1810 |
+
0.00
|
1811 |
+
2.94
|
1812 |
+
…
|
1813 |
+
3.43
|
1814 |
+
2.65
|
1815 |
+
3.80
|
1816 |
+
0.00
|
1817 |
+
4.65
|
1818 |
+
4.33
|
1819 |
+
99999
|
1820 |
+
0.00
|
1821 |
+
3.54
|
1822 |
+
3.06
|
1823 |
+
0.00
|
1824 |
+
4.07
|
1825 |
+
2.65
|
1826 |
+
…
|
1827 |
+
0.00
|
1828 |
+
3.07
|
1829 |
+
3.51
|
1830 |
+
2.46
|
1831 |
+
3.50
|
1832 |
+
2.61
|
1833 |
+
|
1834 |
+
|
1835 |
+
24
|
1836 |
+
|
1837 |
+
|
1838 |
+
|
1839 |
+
4.2
|
1840 |
+
Metrics
|
1841 |
+
The metrics for a RS are not a trivial issue. Many works tend to use common ML metrics, such
|
1842 |
+
as classification metrics like precision, recall, accuracy, or regression metrics such as RMSE or
|
1843 |
+
MAE when the goal is to perform a regression on 1-5 ratings, for example. However, these metrics
|
1844 |
+
imply that the data available to us about user behavior is perfect, that is, users are aware of all
|
1845 |
+
the items they li e and the ones they haven’t tried aren’t as relevant. If this were the case, no RS
|
1846 |
+
would be needed in the first place. The drawback of using this type of metrics is that it can
|
1847 |
+
encourage the recommender to make obvious recommendations in some cases, by penalizing
|
1848 |
+
wrong recommendations too much. In addition, these metrics do nothing to the tune of comparing
|
1849 |
+
recommenders based on how personalized its recommendations are, or how diversified.
|
1850 |
+
Other metrics have been developed for RS in recent years that try to address these issues, some
|
1851 |
+
of which are presented in the following.
|
1852 |
+
|
1853 |
+
1. Mean Average Precision @ K and Mean Average Recall @ K
|
1854 |
+
As in more traditional machine learning, the dataset is split into training and test sets, and
|
1855 |
+
the test set is comprised of cases the learner did not train on and thus it is used to
|
1856 |
+
measure the model’s ability to generali e ith ne data. In recommender systems, the
|
1857 |
+
same is done, and the output of a recommender system is usually a list of K
|
1858 |
+
recommendations for each user in the test set, and to produce those recommendations
|
1859 |
+
the recommender only trained on the items that user enjoyed in the training set. MAP@K
|
1860 |
+
(Mean Average Precision @ K) gives insight to how relevant the list of recommended
|
1861 |
+
items are, whereas MAR@K (Mean Average Recall @ K) gives insight to how well the
|
1862 |
+
recommender system is able to discover all the items the user has rated positively in the
|
1863 |
+
test set.
|
1864 |
+
In recommender systems, precision and recall are essentially the same as in machine
|
1865 |
+
learning:
|
1866 |
+
𝑃𝑟𝑒𝑐𝑖𝑠𝑖𝑜𝑛 = # 𝑜𝑓 𝑟𝑒𝑙𝑒𝑣𝑎𝑛𝑡 𝑟𝑒𝑐𝑜𝑚𝑚𝑒𝑛𝑑𝑎𝑡𝑖𝑜𝑛𝑠
|
1867 |
+
# 𝑜𝑓 𝑖𝑡𝑒𝑚𝑠 𝑟𝑒𝑐𝑜𝑚𝑚𝑒𝑛𝑑𝑒𝑑
|
1868 |
+
|
1869 |
+
𝑅𝑒𝑐𝑎𝑙𝑙 = # 𝑜𝑓 𝑟𝑒𝑙𝑒𝑣𝑎𝑛𝑡 𝑟𝑒𝑐𝑜𝑚𝑚𝑒𝑛𝑑𝑎𝑡𝑖𝑜𝑛𝑠
|
1870 |
+
# 𝑜𝑓 𝑟𝑒𝑙𝑒𝑣𝑎𝑛𝑡 𝑖𝑡𝑒𝑚𝑠
|
1871 |
+
|
1872 |
+
o ever, these metrics don’t ta e ordering into account, and since the output of a
|
1873 |
+
recommender system is usually an ordered list, the metrics at cut-off k are introduced,
|
1874 |
+
MAP@K and MAR@K.
|
1875 |
+
|
1876 |
+
25
|
1877 |
+
|
1878 |
+
𝑀𝐴𝑃@𝐾 = 1
|
1879 |
+
|𝑈| ∑
|
1880 |
+
1
|
1881 |
+
min (𝑚, 𝐾)
|
1882 |
+
|𝑈|
|
1883 |
+
𝑢=1
|
1884 |
+
∑ 𝑃𝑢(𝑘) ∙ 𝑟𝑒𝑙𝑢(𝑘)
|
1885 |
+
𝐾
|
1886 |
+
𝑘=1
|
1887 |
+
|
1888 |
+
𝑀𝐴𝑅@𝐾 = 1
|
1889 |
+
|𝑈| ∑ 1
|
1890 |
+
𝑚
|
1891 |
+
|𝑈|
|
1892 |
+
𝑢=1
|
1893 |
+
∑ 𝑟𝑢(𝑘) ∙ 𝑟𝑒𝑙𝑢(𝑘)
|
1894 |
+
𝐾
|
1895 |
+
𝑘=1
|
1896 |
+
|
1897 |
+
Where 𝑈 is the set of users in the test set, 𝑚 is the number of relevant items for user 𝑢,
|
1898 |
+
𝑃𝑢(𝑘) and 𝑟𝑢(𝑘), are the precision@k and recall@k, respectively, and 𝑟𝑒𝑙𝑢(𝑘) is a factor
|
1899 |
+
equal to 1 if the 𝑘 th item is relevant, and 0 otherwise.
|
1900 |
+
|
1901 |
+
|
1902 |
+
2. Coverage
|
1903 |
+
|
1904 |
+
Coverage is the percentage of items on the training data that the recommender is able to
|
1905 |
+
recommend on a test set.
|
1906 |
+
|
1907 |
+
𝐶𝑜𝑣𝑒𝑟𝑎𝑔𝑒 = 𝐼
|
1908 |
+
𝑁 ∗ 100%
|
1909 |
+
|
1910 |
+
Where 𝐼 is the number of unique items the model recommends in the test data and 𝑁 is
|
1911 |
+
the total number of unique items in the training data.
|
1912 |
+
|
1913 |
+
|
1914 |
+
|
1915 |
+
3. Personalization
|
1916 |
+
Personalization is the dissimilarity between users lists of recommendations. A high score
|
1917 |
+
indicates user lists are different between each other, while a low score indicates they are
|
1918 |
+
very similar. Similarity between recommendation lists is calculated via the cosine
|
1919 |
+
similarity between said lists and then by calculating the average of the upper triangle of
|
1920 |
+
the cosine similarity matrix (avgCosim). The personalization is then given by:
|
1921 |
+
𝑃𝑒𝑟𝑠𝑜𝑛𝑎𝑙𝑖𝑧𝑎𝑡𝑖𝑜𝑛 = 1 − 𝑎𝑣𝑔𝐶𝑜𝑠𝑖𝑚
|
1922 |
+
|
1923 |
+
4. Diversity
|
1924 |
+
Diversity measures how different are the items being recommended to the user.
|
1925 |
+
𝐷𝑖𝑣𝑒𝑟𝑠𝑖𝑡𝑦 = 1 − 𝑖𝑙𝑠
|
1926 |
+
|
1927 |
+
26
|
1928 |
+
|
1929 |
+
Where 𝑖𝑙𝑠 corresponds to intra-list similarity, which is the average cosine similarity of all
|
1930 |
+
items in a list of recommendations. This calculation uses features of the recommended
|
1931 |
+
items (such as item metadata) to calculate the similarity. The feature matrix is indexed by
|
1932 |
+
the item id and includes one-hot-encoded features. If a recommender system is
|
1933 |
+
recommending lists of very similar items, the intra-list similarity will be high and
|
1934 |
+
conversely, the diversity will be low.
|
1935 |
+
|
1936 |
+
5. Novelty
|
1937 |
+
Finally, novelty measures the capacity of recommender systems to propose novel and
|
1938 |
+
unexpected items which a user is unlikely to know about already. It uses the self-
|
1939 |
+
information of the recommended item, and it calculates the mean self-information per top-
|
1940 |
+
N recommended list and averages them over all users.
|
1941 |
+
𝑁𝑜𝑣𝑒𝑙𝑡𝑦 = 1
|
1942 |
+
|𝑈| ∑ ∑
|
1943 |
+
𝑙𝑜𝑔2 (𝑐𝑜𝑢𝑛𝑡(𝑖)
|
1944 |
+
|𝑈|
|
1945 |
+
)
|
1946 |
+
|𝑁|
|
1947 |
+
|𝑁|
|
1948 |
+
𝑖=1
|
1949 |
+
|𝑈|
|
1950 |
+
𝑢=1
|
1951 |
+
|
1952 |
+
Where 𝑈 is the user list, 𝑁 is the top n-list and 𝑐𝑜𝑢𝑛𝑡(𝑖) is the number of users that have
|
1953 |
+
consumed the specific item.
|
1954 |
+
|
1955 |
+
4.3
|
1956 |
+
CS with increasing data quantity
|
1957 |
+
In this sub-section the previously presented datasets and the previously presented metrics are
|
1958 |
+
employed to test and evaluate the RS in its various phases. For this to work, the datasets will be
|
1959 |
+
gradually incremented, starting with very few users and no ratings, and ending with the full
|
1960 |
+
datasets. This process is meant to mimic the natural evolution of a RS, from initial cold-start
|
1961 |
+
conditions to thousands of users with thousands of reviews. In each phase different
|
1962 |
+
recommenders are employed as was already mentioned in previous sections.
|
1963 |
+
|
1964 |
+
4.3.1 CS in Phase 1
|
1965 |
+
As mentioned previously, phase 1 is characterized by little number of users and no ratings. At this
|
1966 |
+
point, only content-based approaches are possible, and only if there is some input from the user
|
1967 |
+
concerning his preferences, which the RS asks when the user first logs in. Otherwise, the RS
|
1968 |
+
would be incapable of giving any recommendation short of a random context-filtered one. To
|
1969 |
+
mimic this first stage, 98 initial users are added to the RS. Each user inputs their HL preference
|
1970 |
+
vector related to Table 3, which the phase 1 content-based recommender uses to generate
|
1971 |
+
recommendations. Unlike in Table 3, the HL preference vector takes either 0 or 1 values and thus
|
1972 |
+
not conveying information on interest intensity. In the following tables, a sample of the 98 users
|
1973 |
+
and their respective HL vectors are shown.
|
1974 |
+
|
1975 |
+
27
|
1976 |
+
|
1977 |
+
Table 6 High-level preferences of the users.
|
1978 |
+
userId
|
1979 |
+
ViewPoints
|
1980 |
+
Nature
|
1981 |
+
Towns
|
1982 |
+
Culture
|
1983 |
+
Events
|
1984 |
+
Leisure
|
1985 |
+
Routes
|
1986 |
+
Sports
|
1987 |
+
1
|
1988 |
+
0
|
1989 |
+
0
|
1990 |
+
0
|
1991 |
+
0
|
1992 |
+
0
|
1993 |
+
1
|
1994 |
+
0
|
1995 |
+
0
|
1996 |
+
2
|
1997 |
+
1
|
1998 |
+
0
|
1999 |
+
1
|
2000 |
+
0
|
2001 |
+
0
|
2002 |
+
1
|
2003 |
+
1
|
2004 |
+
0
|
2005 |
+
3
|
2006 |
+
0
|
2007 |
+
0
|
2008 |
+
0
|
2009 |
+
0
|
2010 |
+
0
|
2011 |
+
1
|
2012 |
+
0
|
2013 |
+
0
|
2014 |
+
4
|
2015 |
+
0
|
2016 |
+
0
|
2017 |
+
0
|
2018 |
+
0
|
2019 |
+
0
|
2020 |
+
1
|
2021 |
+
1
|
2022 |
+
1
|
2023 |
+
5
|
2024 |
+
0
|
2025 |
+
0
|
2026 |
+
1
|
2027 |
+
0
|
2028 |
+
0
|
2029 |
+
0
|
2030 |
+
0
|
2031 |
+
0
|
2032 |
+
…
|
2033 |
+
…
|
2034 |
+
…
|
2035 |
+
…
|
2036 |
+
…
|
2037 |
+
…
|
2038 |
+
…
|
2039 |
+
…
|
2040 |
+
…
|
2041 |
+
94
|
2042 |
+
0
|
2043 |
+
0
|
2044 |
+
0
|
2045 |
+
0
|
2046 |
+
0
|
2047 |
+
1
|
2048 |
+
0
|
2049 |
+
0
|
2050 |
+
95
|
2051 |
+
0
|
2052 |
+
0
|
2053 |
+
0
|
2054 |
+
0
|
2055 |
+
0
|
2056 |
+
1
|
2057 |
+
0
|
2058 |
+
0
|
2059 |
+
96
|
2060 |
+
1
|
2061 |
+
0
|
2062 |
+
1
|
2063 |
+
0
|
2064 |
+
0
|
2065 |
+
1
|
2066 |
+
1
|
2067 |
+
0
|
2068 |
+
97
|
2069 |
+
0
|
2070 |
+
0
|
2071 |
+
1
|
2072 |
+
0
|
2073 |
+
0
|
2074 |
+
0
|
2075 |
+
0
|
2076 |
+
0
|
2077 |
+
98
|
2078 |
+
1
|
2079 |
+
0
|
2080 |
+
1
|
2081 |
+
1
|
2082 |
+
0
|
2083 |
+
0
|
2084 |
+
1
|
2085 |
+
0
|
2086 |
+
|
2087 |
+
The recommendations given by the RS for each user are in the following table. We can apply all
|
2088 |
+
previously presented metrics to these results, including MAP@K and MAR@K because we are
|
2089 |
+
aware of some ratings given by the users, present in the User-Item ratings DF which we can use
|
2090 |
+
for this purpose.
|
2091 |
+
|
2092 |
+
Table 7 Sample of the recommendations given to the users by the content recommender.
|
2093 |
+
userId
|
2094 |
+
Recommendations
|
2095 |
+
1
|
2096 |
+
[(6, 'Get a free pizza at Pizza Hut'), (7, 'Get a voucher for Sephora'), (8, 'Go
|
2097 |
+
shopping in our new mall'), (27, 'go to the spa'), (28, 'visiting Disneyland')]
|
2098 |
+
2
|
2099 |
+
[(6, 'Get a free pizza at Pizza Hut'), (7, 'Get a voucher for Sephora'), (8, 'Go
|
2100 |
+
shopping in our new mall'), (14, 'Rest and relaxation at the spa'), (27, 'go to the
|
2101 |
+
spa')]
|
2102 |
+
3
|
2103 |
+
[(6, 'Get a free pizza at Pizza Hut'), (7, 'Get a voucher for Sephora'), (8, 'Go
|
2104 |
+
shopping in our new mall'), (27, 'go to the spa'), (28, 'visiting Disneyland')]
|
2105 |
+
4
|
2106 |
+
[(4, 'Get a discount for Comic-Con'), (6, 'Get a free pizza at Pizza Hut'), (7, 'Get a
|
2107 |
+
voucher for Sephora'), (8, 'Go shopping in our new mall'), (14, 'Rest and relaxation
|
2108 |
+
at the spa')]
|
2109 |
+
5
|
2110 |
+
[(11, 'Medieval fair'), (1, 'A tavern that serves traditional food'), (13, 'One of the
|
2111 |
+
main nightclubs in the city'), (2, 'Ancient history museum'), (0, 'A service that offers
|
2112 |
+
you the opportunity to do bungee-jumping')]
|
2113 |
+
…
|
2114 |
+
…
|
2115 |
+
94
|
2116 |
+
[(6, 'Get a free pizza at Pizza Hut'), (7, 'Get a voucher for Sephora'), (8, 'Go
|
2117 |
+
shopping in our new mall'), (27, 'go to the spa'), (28, 'visiting Disneyland')]
|
2118 |
+
|
2119 |
+
28
|
2120 |
+
|
2121 |
+
95
|
2122 |
+
[(6, 'Get a free pizza at Pizza Hut'), (7, 'Get a voucher for Sephora'), (8, 'Go
|
2123 |
+
shopping in our new mall'), (27, 'go to the spa'), (28, 'visiting Disneyland')]
|
2124 |
+
96
|
2125 |
+
[(6, 'Get a free pizza at Pizza Hut'), (7, 'Get a voucher for Sephora'), (8, 'Go
|
2126 |
+
shopping in our new mall'), (14, 'Rest and relaxation at the spa'), (27, 'go to the
|
2127 |
+
spa')]
|
2128 |
+
97
|
2129 |
+
[(11, 'Medieval fair'), (1, 'A tavern that serves traditional food'), (13, 'One of the
|
2130 |
+
main nightclubs in the city'), (2, 'Ancient history museum'), (0, 'A service that offers
|
2131 |
+
you the opportunity to do bungee-jumping')]
|
2132 |
+
98
|
2133 |
+
[(2, 'Ancient history museum'), (11, 'Medieval fair'), (13, 'One of the main
|
2134 |
+
nightclubs in the city'), (1, 'A tavern that serves traditional food'), (14, 'Rest and
|
2135 |
+
relaxation at the spa')]
|
2136 |
+
|
2137 |
+
|
2138 |
+
Table 8 Values for the various metrics on the content model recommendations.
|
2139 |
+
MAP@K
|
2140 |
+
MAR@K
|
2141 |
+
Coverage
|
2142 |
+
Personalization
|
2143 |
+
Diversity HL
|
2144 |
+
Diversity LL
|
2145 |
+
Novelty
|
2146 |
+
0.092
|
2147 |
+
0.092
|
2148 |
+
0.55
|
2149 |
+
0.51
|
2150 |
+
0.21
|
2151 |
+
0.76
|
2152 |
+
0.66
|
2153 |
+
|
2154 |
+
We can see that mean average precision and mean average recall have the same value, the
|
2155 |
+
value at K is equal to 5, since the recommender recommends 5 items to each user. The two
|
2156 |
+
diversity values pertain to high level and low-level preferences showing how diverse are the
|
2157 |
+
recommendations in terms of recommending diverse items. It is expected for the high-level
|
2158 |
+
diversity to be lower than the low-level diversity since the content recommender makes
|
2159 |
+
recommendations based on high-level preferences of the users. Low-level preferences are linked
|
2160 |
+
ontologically to high-level preferences, but they are greater in variety, hence the same higl-level
|
2161 |
+
preference is linked to many low-level preferences, this justifies the larger value of Diversity LL
|
2162 |
+
compared to Diversity HL. Coverage, personalization and both diversities return values from 0 to
|
2163 |
+
1, where 1 represents maximum coverage, personalization and diversity. The value for novelty
|
2164 |
+
can take any positive value, the greater the value the more unexpected recommendations are
|
2165 |
+
given based on popularity. In this study, the metric for novelty may not be very useful due to the
|
2166 |
+
relatively low cardinality of items and the fact that there are no less popular items per se, at least
|
2167 |
+
not very noticeably. In any case, these metrics are more useful in when used to compare different
|
2168 |
+
models.
|
2169 |
+
|
2170 |
+
|
2171 |
+
|
2172 |
+
|
2173 |
+
|
2174 |
+
29
|
2175 |
+
|
2176 |
+
4.3.2 CS in Phase 2
|
2177 |
+
In phase 2 there are ratings in the system, although not enough users to feed the demographic-
|
2178 |
+
based recommender. In this phase we can simulate an RS state where there are 98 users and
|
2179 |
+
64 ratings. The hybrid recommender is a hybridization of the initial content-based recommender
|
2180 |
+
with the new popularity-based recommender. The ratings are used to filter out items with average
|
2181 |
+
rating below a given threshold. Once again, the same metrics are applied, and the results are
|
2182 |
+
shown in the following table.
|
2183 |
+
|
2184 |
+
Table 9 Values for the various metrics on the hybrid model recommendations.
|
2185 |
+
MAP@K
|
2186 |
+
MAR@K
|
2187 |
+
Coverage
|
2188 |
+
Personalization
|
2189 |
+
Diversity HL
|
2190 |
+
Diversity LL
|
2191 |
+
Novelty
|
2192 |
+
0.219
|
2193 |
+
0.219
|
2194 |
+
0.17
|
2195 |
+
1.11e-16
|
2196 |
+
0.64
|
2197 |
+
0.91
|
2198 |
+
0.66
|
2199 |
+
|
2200 |
+
It is interesting to observe that the precision and recall have gone up, which makes sense because
|
2201 |
+
the items are now being filtered according to rating and higher rating items are more prone to
|
2202 |
+
having been liked by the users, at least the synthetic data was defined as such. The coverage
|
2203 |
+
has gone down, which makes sense since less items are being recommended due to filtering.
|
2204 |
+
Personalization has gone down since it now many users are being recommended the same items.
|
2205 |
+
Diversity has gone up; this can be due to recommending some items outside of the natural
|
2206 |
+
preference of the user due to ratings filtering. All in all, differences can be observed compared to
|
2207 |
+
the content-recommender, these differences make sense and seem to go towards an expected
|
2208 |
+
behavior by the recommender.
|
2209 |
+
|
2210 |
+
4.3.3 CS in Phase 3
|
2211 |
+
In phase 3, enough users with ratings given have been introduced in the system to kickstart the
|
2212 |
+
demographic-based recommender. This recommender works by defining user clusters based on
|
2213 |
+
demographic features and then giving item recommendations based on the predictions of a kNN.
|
2214 |
+
This phase 3 recommender works together with the hybrid recommender from phase 2. In the
|
2215 |
+
following table, the metrics are applied, and the results shown. The number of users in this phase
|
2216 |
+
total 198, with 191 ratings.
|
2217 |
+
|
2218 |
+
Table 10 Values for the various metrics on the hybrid and demographic model recommendations.
|
2219 |
+
|
2220 |
+
MAP@K MAR@K
|
2221 |
+
Coverage
|
2222 |
+
Personalization
|
2223 |
+
Diversity HL
|
2224 |
+
Diversity LL
|
2225 |
+
Novelty
|
2226 |
+
Hybrid
|
2227 |
+
0.178
|
2228 |
+
0.178
|
2229 |
+
0.34
|
2230 |
+
0.07
|
2231 |
+
0.64
|
2232 |
+
0.91
|
2233 |
+
0.66
|
2234 |
+
Demog
|
2235 |
+
0.151
|
2236 |
+
0.151
|
2237 |
+
0.72
|
2238 |
+
0.57
|
2239 |
+
0.63
|
2240 |
+
0.90
|
2241 |
+
0.66
|
2242 |
+
|
2243 |
+
|
2244 |
+
30
|
2245 |
+
|
2246 |
+
|
2247 |
+
We can see these results in a bar chart where a min max scaler has been applied. This basically
|
2248 |
+
shows which model wins in each category.
|
2249 |
+
|
2250 |
+
Figure 9 Scaled metrics for both models.
|
2251 |
+
|
2252 |
+
We can see that the hybrid model loses to the demographic model in coverage and
|
2253 |
+
personalization and has higher values in the other metrics. However, we can see that results are
|
2254 |
+
virtually equal in terms of Diversity and Novelty, and only on the Precision and Recall do we see
|
2255 |
+
larger values for the hybrid model, which are not that much higher. On the other hand, the
|
2256 |
+
demographic recommender has much larger personalization and coverage. Here we can see an
|
2257 |
+
increment by the demographic model compared to the hybrid model. This makes sense because
|
2258 |
+
the demographic model is more complex in how recommendations are given by finding similar
|
2259 |
+
users in terms of demographic features and then recommending similar items to the user on a
|
2260 |
+
more individual basis, whereas the hybrid model is again based on high level preferences.
|
2261 |
+
|
2262 |
+
Table 11 Values for the various metrics on the hybrid phase 2 and hybrid phase 3 model recommendations.
|
2263 |
+
|
2264 |
+
MAP@K MAR@K
|
2265 |
+
Coverage
|
2266 |
+
Personalization
|
2267 |
+
Diversity HL
|
2268 |
+
Diversity LL
|
2269 |
+
Novelty
|
2270 |
+
Hybrid
|
2271 |
+
P2
|
2272 |
+
0.219
|
2273 |
+
0.219
|
2274 |
+
0.17
|
2275 |
+
1.11e-16
|
2276 |
+
0.64
|
2277 |
+
0.91
|
2278 |
+
0.66
|
2279 |
+
Hybrid
|
2280 |
+
P3
|
2281 |
+
0.178
|
2282 |
+
0.178
|
2283 |
+
0.34
|
2284 |
+
0.07
|
2285 |
+
0.64
|
2286 |
+
0.91
|
2287 |
+
0.66
|
2288 |
+
|
2289 |
+
|
2290 |
+
MAP@K
|
2291 |
+
MAR@K
|
2292 |
+
Coverage
|
2293 |
+
Personalization
|
2294 |
+
Diversity HL
|
2295 |
+
Diversity LL
|
2296 |
+
Novelty31
|
2297 |
+
|
2298 |
+
It is also interesting to compare the metrics between the hybrid in phase 2 and phase 3. We can
|
2299 |
+
see that most metrics remain similar with a slight decrease in precision and recall, which may be
|
2300 |
+
just random, a slight increase in personalization, and a rather large increase in coverage. This
|
2301 |
+
can be due to more items recommended and not filtered out due to poor ratings because of the
|
2302 |
+
existence of more users and ratings on items. It is interesting to see a variation of the metrics of
|
2303 |
+
the same recommender as the amount of data increases.
|
2304 |
+
|
2305 |
+
4.3.4 CS in Phase 4
|
2306 |
+
Phase 4 starts when a given number of users and a given density of the user-item rating DF is
|
2307 |
+
achieved. When this happens, the final recommender is initiated. This recommender is the
|
2308 |
+
already mentioned FFM. In phase 4, the recommendations are, once again, the result of an
|
2309 |
+
ensemble of recommenders, the same one in phase 3 with the addition of the new FFM. The
|
2310 |
+
resulting metrics are once more applied to the recommendations and are shown in the following
|
2311 |
+
table. In this phase we have 250 users and 191 ratings.
|
2312 |
+
|
2313 |
+
Table 12 Values for the various metrics on the hybrid, demographic and collaborative model
|
2314 |
+
recommendations.
|
2315 |
+
|
2316 |
+
MAP@K
|
2317 |
+
MAR@K
|
2318 |
+
Coverage
|
2319 |
+
Personalization
|
2320 |
+
Diversity HL
|
2321 |
+
Diversity LL
|
2322 |
+
Novelty
|
2323 |
+
Hybrid
|
2324 |
+
0.158
|
2325 |
+
0.158
|
2326 |
+
0.34
|
2327 |
+
0.06
|
2328 |
+
0.64
|
2329 |
+
0.91
|
2330 |
+
0.66
|
2331 |
+
Demog
|
2332 |
+
0.137
|
2333 |
+
0.137
|
2334 |
+
0.68
|
2335 |
+
0.55
|
2336 |
+
0.66
|
2337 |
+
0.91
|
2338 |
+
0.66
|
2339 |
+
Collab
|
2340 |
+
0.181
|
2341 |
+
0.181
|
2342 |
+
0.72
|
2343 |
+
0.54
|
2344 |
+
0.67
|
2345 |
+
0.91
|
2346 |
+
0.66
|
2347 |
+
|
2348 |
+
Comparing the recommenders, we can observe that the collaborative recommender, which was
|
2349 |
+
added in this later stage has high levels of personalization and coverage and achieves the highest
|
2350 |
+
values for precision and recall, compared to the other two models. The values for diversity are all
|
2351 |
+
similar at this stage, and novelty again doesn’t provide useful information ith this number of total
|
2352 |
+
items. In terms of precision and recall, coverage and personalization, the collaborative
|
2353 |
+
recommender gives us expected results which is relatively high values in these metrics. We can
|
2354 |
+
observe that each recommender brings different recommendations to the table with clear
|
2355 |
+
improvements in some metrics as the recommender system matures. It would be interesting to
|
2356 |
+
view this with a dataset comprising many more items and users. In the following figure we can
|
2357 |
+
see the metrics in a scaled graph.
|
2358 |
+
|
2359 |
+
|
2360 |
+
32
|
2361 |
+
|
2362 |
+
|
2363 |
+
Figure 10 Scaled metrics for all three models
|
2364 |
+
|
2365 |
+
As said, we observe that the collaborative metrics are good in comparison to the other two,
|
2366 |
+
however, the collaborative model is only useful when the recommender system has seen
|
2367 |
+
sufficient data. The metrics for the other t o are not as high but they don’t suffer so much from
|
2368 |
+
cold-start issues. We can see that between the demographic and the hybrid models there is a
|
2369 |
+
trade-off in metrics. We had already seen this in the previous phase.
|
2370 |
+
|
2371 |
+
Table 13 Values for the various metrics on the phase 1, phase 2 and phase 3 model recommendations of
|
2372 |
+
hybrid and demographic models.
|
2373 |
+
|
2374 |
+
MAP@K
|
2375 |
+
MAR@K
|
2376 |
+
Coverage
|
2377 |
+
Personalization
|
2378 |
+
Diversity HL
|
2379 |
+
Diversity LL
|
2380 |
+
Novelty
|
2381 |
+
Hybrid
|
2382 |
+
P2
|
2383 |
+
0.219
|
2384 |
+
0.219
|
2385 |
+
0.17
|
2386 |
+
1.11e-16
|
2387 |
+
0.64
|
2388 |
+
0.91
|
2389 |
+
0.66
|
2390 |
+
Hybrid
|
2391 |
+
P3
|
2392 |
+
0.178
|
2393 |
+
0.178
|
2394 |
+
0.34
|
2395 |
+
0.07
|
2396 |
+
0.64
|
2397 |
+
0.91
|
2398 |
+
0.66
|
2399 |
+
Hybrid
|
2400 |
+
P4
|
2401 |
+
0.158
|
2402 |
+
0.158
|
2403 |
+
0.34
|
2404 |
+
0.06
|
2405 |
+
0.64
|
2406 |
+
0.91
|
2407 |
+
0.66
|
2408 |
+
Demog
|
2409 |
+
P3
|
2410 |
+
0.151
|
2411 |
+
0.151
|
2412 |
+
0.72
|
2413 |
+
0.57
|
2414 |
+
0.63
|
2415 |
+
0.90
|
2416 |
+
0.66
|
2417 |
+
Demog
|
2418 |
+
P4
|
2419 |
+
0.137
|
2420 |
+
0.137
|
2421 |
+
0.68
|
2422 |
+
0.55
|
2423 |
+
0.66
|
2424 |
+
0.91
|
2425 |
+
0.66
|
2426 |
+
|
2427 |
+
|
2428 |
+
MAP@K
|
2429 |
+
MAR@K
|
2430 |
+
Coverage
|
2431 |
+
Personalization
|
2432 |
+
Diversity HL
|
2433 |
+
Diversity LL
|
2434 |
+
Novelty33
|
2435 |
+
|
2436 |
+
Here we can see a comparison between the metrics of the different models along each phase,
|
2437 |
+
we can see a slight decrease of precision and recall in the evolving phases for hybrid and
|
2438 |
+
demographic models, but this might have to do with insufficient ratings being added between
|
2439 |
+
phase 3 and phase 4, which are important for the demographic recommender. With a further
|
2440 |
+
increase in data, we can see further differences in the metrics. Feeding the recommender system
|
2441 |
+
with 1000 users and 883 ratings, we attain the following results.
|
2442 |
+
|
2443 |
+
Table 14 Values for the various metrics on the hybrid, demographic and collaborative model
|
2444 |
+
recommendations, in the case of 250 users and 191 ratings as well as 1000 users and 883 ratings.
|
2445 |
+
|
2446 |
+
MAP@K
|
2447 |
+
MAR@K
|
2448 |
+
Coverage
|
2449 |
+
Personalization
|
2450 |
+
Diversity HL
|
2451 |
+
Diversity LL
|
2452 |
+
Novelty
|
2453 |
+
Hybrid
|
2454 |
+
0.158
|
2455 |
+
0.158
|
2456 |
+
0.34
|
2457 |
+
0.06
|
2458 |
+
0.64
|
2459 |
+
0.91
|
2460 |
+
0.66
|
2461 |
+
Demog
|
2462 |
+
0.137
|
2463 |
+
0.137
|
2464 |
+
0.68
|
2465 |
+
0.55
|
2466 |
+
0.66
|
2467 |
+
0.91
|
2468 |
+
0.66
|
2469 |
+
Collab
|
2470 |
+
0.181
|
2471 |
+
0.181
|
2472 |
+
0.72
|
2473 |
+
0.54
|
2474 |
+
0.67
|
2475 |
+
0.91
|
2476 |
+
0.66
|
2477 |
+
Hybrid
|
2478 |
+
1000
|
2479 |
+
0.088
|
2480 |
+
0.088
|
2481 |
+
0.28
|
2482 |
+
0.19
|
2483 |
+
0.69
|
2484 |
+
0.89
|
2485 |
+
0.66
|
2486 |
+
Demog
|
2487 |
+
1000
|
2488 |
+
0.128
|
2489 |
+
0.128
|
2490 |
+
0.97
|
2491 |
+
0.59
|
2492 |
+
0.48
|
2493 |
+
0.89
|
2494 |
+
0.66
|
2495 |
+
Collab
|
2496 |
+
1000
|
2497 |
+
0.119
|
2498 |
+
0.119
|
2499 |
+
0.79
|
2500 |
+
0.61
|
2501 |
+
0.52
|
2502 |
+
0.89
|
2503 |
+
0.66
|
2504 |
+
|
2505 |
+
|
2506 |
+
|
2507 |
+
Figure 11 Scaled metrics for all three models.
|
2508 |
+
|
2509 |
+
MAP@K
|
2510 |
+
MAR@K
|
2511 |
+
Coverage
|
2512 |
+
Personalization
|
2513 |
+
Diversity HL
|
2514 |
+
Diversity LL
|
2515 |
+
Novelty34
|
2516 |
+
|
2517 |
+
We can see that the metrics are qualitatively similar to the case before with less users and ratings.
|
2518 |
+
Still the number of ratings is low, there is not a lot of rating density, which particularly penalizes
|
2519 |
+
the collaborative model. Nonetheless, we can observe that the collaborative model is the one that
|
2520 |
+
offers more personalization, which increased for all models with the increment in users and
|
2521 |
+
ratings. Coverage also increased heavily for the demographic model while only increasing slightly
|
2522 |
+
for the collaborative model. As for precision and recall, the demographic model maintains the
|
2523 |
+
metric with only a slight decrease while the hybrid and collaborative model saw a rather significant
|
2524 |
+
decrease. In regard to the collaborative model this might have to do with the low density in ratings.
|
2525 |
+
All in all we see that the demographic and collaborative models clearly become more dominant
|
2526 |
+
and useful as more data is added to the RS. The phases also make sense, by having the
|
2527 |
+
collaborative model initiate after all others have been initiated, since the collaborative model is
|
2528 |
+
very sensitive to rating density, while the demographic model is more robust in that sense. The
|
2529 |
+
hybrid model by this phase has clearly been passed by the two other models in most metrics
|
2530 |
+
which is exactly what would be expected.
|
2531 |
+
|
2532 |
+
5
|
2533 |
+
Conclusion and future works
|
2534 |
+
In this work an ontology-based context aware recommender system application for tourism was
|
2535 |
+
presented where different recommenders are used at different stages of maturity of the
|
2536 |
+
recommender system. The novel aspect is the evolution of the recommender system with different
|
2537 |
+
types of recommenders entering the recommendation pool as the system’s maturity evolves. The
|
2538 |
+
ontology extension of the recommender system allows items to be binned and recommended to
|
2539 |
+
users based on user preference vectors with different degrees of detail that link to the item
|
2540 |
+
ontology. These preference vectors will be ever changing based on user feedback, while other
|
2541 |
+
recommenders based on demographic features and field-aware factorization machines join the
|
2542 |
+
pool as data increases.
|
2543 |
+
Along this work, the RS was presented and ultimately tested with synthetic data mimicking
|
2544 |
+
different stages of maturity. One could observe that at each new phase the new recommenders
|
2545 |
+
added value as observed from the comparison between the different adopted metrics, which were
|
2546 |
+
MAP@K, MAR@K, Coverage, Personalization, Diversity HL, Diversity LL and finally Novelty.
|
2547 |
+
These metrics are the state of the art for Recommender Systems because they attempt to go
|
2548 |
+
beyond the usual metrics adopted in , hich don’t al ays have much meaning in RS. The
|
2549 |
+
results obtained were expected where Collaborative and Demographic approaches essentially
|
2550 |
+
brought more personalization and coverage to the table. However, the full extent of differences
|
2551 |
+
between recommenders could not be captured mainly due to the relatively low cardinality of items
|
2552 |
+
being offered, only 29.
|
2553 |
+
Future works would entail a broader analysis with more items, and also context-aware data which
|
2554 |
+
was not tested at this instance. Nonetheless, the context-aware would be essentially pre-filtering
|
2555 |
+
which would not be of much interest regarding the results concerning the metrics.
|
2556 |
+
|
2557 |
+
35
|
2558 |
+
|
2559 |
+
6
|
2560 |
+
Acknowledgements
|
2561 |
+
The present paper was developed in the context of the PMP project – Partnership Management
|
2562 |
+
Platform, code LISBOA-01-0247-FEDER-045411, co-financed by LISBOA 2020 and Portugal
|
2563 |
+
2020 through the European Regional Development Fund.
|
2564 |
+
|
2565 |
+
7
|
2566 |
+
References
|
2567 |
+
|
2568 |
+
[1]
|
2569 |
+
C. I. ee, . C. sia, . C. su, and J. Y. in, “ ntology-based tourism recommendation
|
2570 |
+
system,” 2017 4th International Conference on Industrial Engineering and Applications,
|
2571 |
+
ICIEA 2017, pp. 376–379, 2017, doi: 10.1109/IEA.2017.7939242.
|
2572 |
+
[2]
|
2573 |
+
J. Borràs, A. Moreno, and A. alls, “Intelligent tourism recommender systems: A survey,”
|
2574 |
+
Expert Systems with Applications, vol. 41, no. 16. Elsevier Ltd, pp. 7370–7389, Nov. 15,
|
2575 |
+
2014. doi: 10.1016/j.eswa.2014.06.007.
|
2576 |
+
[3]
|
2577 |
+
. K a , . a hchoune, and . ahab, “ ourism Recommender Systems: An Overview
|
2578 |
+
of Recommendation Approaches,” International Journal of Computer Applications, vol.
|
2579 |
+
180, no. 20, pp. 9–13, 2018, doi: 10.5120/ijca2018916458.
|
2580 |
+
[4]
|
2581 |
+
A. Montejo-Ráez, J. M. Perea-Ortega, M. Á. García-Cumbreras, and F. Martínez-
|
2582 |
+
Santiago, “ tiûm: A eb based planner for tourism and leisure,” Expert Systems with
|
2583 |
+
Applications,
|
2584 |
+
vol.
|
2585 |
+
38,
|
2586 |
+
no.
|
2587 |
+
8,
|
2588 |
+
pp.
|
2589 |
+
10085–10093,
|
2590 |
+
Aug.
|
2591 |
+
2011,
|
2592 |
+
doi:
|
2593 |
+
10.1016/j.eswa.2011.02.005.
|
2594 |
+
[5]
|
2595 |
+
I. Garcia, . Sebastia, and . naindia, “ n the design of individual and group
|
2596 |
+
recommender systems for tourism,” Expert Systems with Applications, vol. 38, no. 6, pp.
|
2597 |
+
7683–7692, 2011, doi: 10.1016/j.eswa.2010.12.143.
|
2598 |
+
[6]
|
2599 |
+
. Khallou i, A. Abatal, and . ahaj, “An ontology-based context awareness for smart
|
2600 |
+
tourism recommendation system,” ay 20 8. doi: 0. 45/3230905.3230935.
|
2601 |
+
[7]
|
2602 |
+
A. . Kashevni , A. v onomarev, and A. v Smirnov, “I IG C A ultimodel
|
2603 |
+
Context-A are ourism Recommendation Service : Approach and Architecture,” vol. 56,
|
2604 |
+
no. 2, pp. 245–258, 2017, doi: 10.1134/S1064230717020125.
|
2605 |
+
[8]
|
2606 |
+
A. oreno, A. alls, . Isern, . arin, and J. orràs, “Sig ur/E-Destination: Ontology-
|
2607 |
+
based personali ed recommendation of ourism and eisure Activities,” Engineering
|
2608 |
+
Applications of Artificial Intelligence, vol. 26, no. 1, pp. 633–651, Jan. 2013, doi:
|
2609 |
+
10.1016/j.engappai.2012.02.014.
|
2610 |
+
|
2611 |
+
36
|
2612 |
+
|
2613 |
+
[9]
|
2614 |
+
M. Nilashi, K. Bagherifard, . Rahmani, and . Rafe, “A recommender system for tourism
|
2615 |
+
industry using cluster ensemble and prediction machine learning techni ues,” Computers
|
2616 |
+
and Industrial Engineering, vol. 109, pp. 357–368, 2017, doi: 10.1016/j.cie.2017.05.016.
|
2617 |
+
[10]
|
2618 |
+
M. Nilashi, K. agherifard, . Rahmani, and . Rafe, “A recommender system for tourism
|
2619 |
+
industry using cluster ensemble and prediction machine learning techni ues,” Computers
|
2620 |
+
and Industrial Engineering, vol. 109, 2017, doi: 10.1016/j.cie.2017.05.016.
|
2621 |
+
[11]
|
2622 |
+
Á. García-Crespo, J. L. López-Cuadrado, R. Colomo-Palacios, I. González-Carrasco, and
|
2623 |
+
B. Ruiz- e cua, “Sem-Fit: A semantic based expert system to provide recommendations
|
2624 |
+
in the tourism domain,” Expert Systems with Applications, vol. 38, no. 10, pp. 13310–
|
2625 |
+
13319, Sep. 2011, doi: 10.1016/j.eswa.2011.04.152.
|
2626 |
+
[12]
|
2627 |
+
. ilashi, . bin Ibrahim, . Ithnin, and . . Sarmin, “A multi-criteria collaborative
|
2628 |
+
filtering recommender system for the tourism domain using Expectation Maximization
|
2629 |
+
(EM) and PCA-A IS,” Electronic Commerce Research and Applications, vol. 14, no. 6,
|
2630 |
+
pp. 542–562, Oct. 2015, doi: 10.1016/j.elerap.2015.08.004.
|
2631 |
+
[13]
|
2632 |
+
J. Borràs et al., “Sig ur/ -Destination: A System for the Management of Complex Tourist
|
2633 |
+
Regions,” in Information and Communication Technologies in Tourism 2011, 2011, pp.
|
2634 |
+
39–50. doi: 10.1007/978-3-7091-0503-0_4.
|
2635 |
+
[14]
|
2636 |
+
C. Grün, J. eidhardt, and . erthner, “ ntology-Based Matchmaking to Provide
|
2637 |
+
ersonali ed Recommendations for ourists,” in Information and Communication
|
2638 |
+
Technologies in Tourism 2017, Springer International Publishing, 2017, pp. 3–16. doi:
|
2639 |
+
10.1007/978-3-319-51168-9_1.
|
2640 |
+
[15]
|
2641 |
+
Y. Huang and L. Bian, “A ayesian net or and analytic hierarchy process based
|
2642 |
+
personali ed recommendations for tourist attractions over the Internet,” Expert Systems
|
2643 |
+
with Applications, vol. 36, no. 1, pp. 933–943, 2009, doi: 10.1016/j.eswa.2007.10.019.
|
2644 |
+
[16]
|
2645 |
+
J. Beel, C. Breitinger, S. anger, A. ommat sch, and . Gipp, “ o ards reproducibility in
|
2646 |
+
recommender-systems research,” User Modeling and User-Adapted Interaction, vol. 26,
|
2647 |
+
no. 1, pp. 69–101, Mar. 2016, doi: 10.1007/s11257-016-9174-x.
|
2648 |
+
[17]
|
2649 |
+
J. J. Carroll, D. Reynolds, I. ic inson, A. Seaborne, C. ollin, and K. il inson, “Jena:
|
2650 |
+
Implementing the semantic eb recommendations,” in Proceedings of the 13th
|
2651 |
+
International World Wide Web Conference on Alternate Track, Papers and Posters, WWW
|
2652 |
+
Alt. 2004, May 2004, pp. 74–83. doi: 10.1145/1013367.1013381.
|
2653 |
+
[18]
|
2654 |
+
C. ouras and . sog as, “Improving ne s articles recommendations via user
|
2655 |
+
clustering,” International Journal of Machine Learning and Cybernetics, vol. 8, no. 1, pp.
|
2656 |
+
223–237, Feb. 2017, doi: 10.1007/s13042-014-0316-3.
|
2657 |
+
[19]
|
2658 |
+
P. Sit rong ong, S. aneeroj, . Samatthiyadi un, and A. a asu, “ ayesian probabilistic
|
2659 |
+
model for context-a are recommendations,” 17th International Conference on Information
|
2660 |
+
|
2661 |
+
37
|
2662 |
+
|
2663 |
+
Integration and Web-Based Applications and Services, iiWAS 2015 - Proceedings, 2015,
|
2664 |
+
doi: 10.1145/2837185.2837223.
|
2665 |
+
[20]
|
2666 |
+
. asid and R. Ali, “Context Similarity easurement ased on Genetic Algorithm for
|
2667 |
+
Improved Recommendations,” Applications of Soft Computing for the Web, pp. 11–29,
|
2668 |
+
2017, doi: 10.1007/978-981-10-7098-3_2.
|
2669 |
+
[21]
|
2670 |
+
Y. Zheng, . obasher, and R. ur e, “Context recommendation using multi-label
|
2671 |
+
classification,” Proceedings - 2014 IEEE/WIC/ACM International Joint Conference on Web
|
2672 |
+
Intelligence and Intelligent Agent Technology - Workshops, WI-IAT 2014, vol. 2, no. May,
|
2673 |
+
pp. 288–295, 2014, doi: 10.1109/WI-IAT.2014.110.
|
2674 |
+
[22]
|
2675 |
+
. Shin, J. . ee, J. Yeon, and S. G. ee, “Context-aware recommendation by
|
2676 |
+
aggregating user context,” 2009 IEEE Conference on Commerce and Enterprise
|
2677 |
+
Computing, CEC 2009, pp. 423–430, 2009, doi: 10.1109/CEC.2009.38.
|
2678 |
+
[23]
|
2679 |
+
Y. Gu, J. Song, . iu, . Zou, and Y. Yao, “CA : Context A are atrix actori ation
|
2680 |
+
for Social Recommendation,” Web Intelligence, vol. 16, no. 1, pp. 53–71, 2018, doi:
|
2681 |
+
10.3233/WEB-180373.
|
2682 |
+
[24]
|
2683 |
+
G. Adomavicius and A. u hilin, “Context-a are recommender systems,” Recommender
|
2684 |
+
Systems Handbook, Second Edition, pp. 191–226, 2015, doi: 10.1007/978-1-4899-7637-
|
2685 |
+
6_6.
|
2686 |
+
[25]
|
2687 |
+
R. ur e, A. elfernig, and . . Gö er, “Recommender Systems: An vervie ,” 20 ,
|
2688 |
+
[Online]. Available: www.aaai.org
|
2689 |
+
[26]
|
2690 |
+
. . Knijnenburg and . C. illemsen, “ valuating recommender systems ith user
|
2691 |
+
experiments,” Recommender Systems Handbook, Second Edition, pp. 309–352, 2015,
|
2692 |
+
doi: 10.1007/978-1-4899-7637-6_9.
|
2693 |
+
[27]
|
2694 |
+
R. Irfan et al., “ obiContext: A Context-Aware Cloud-Based Venue Recommendation
|
2695 |
+
rame or ,” IEEE Transactions on Cloud Computing, vol. 5, no. 4, pp. 712–724, 2015,
|
2696 |
+
doi: 10.1109/tcc.2015.2440243.
|
2697 |
+
[28]
|
2698 |
+
G. Adomavicius, . obasher, . Ricci, and A. u hilin, “Context-Aware Recommender
|
2699 |
+
Systems,” AI Magazine, vol. 32, no. 3, p. 67, Oct. 2011, doi: 10.1609/aimag.v32i3.2364.
|
2700 |
+
[29]
|
2701 |
+
J. iu, C. u, and . iu, “ ayesian probabilistic matrix factori ation ith social relations
|
2702 |
+
and item contents for recommendation,” Decision Support Systems, vol. 55, no. 3, pp.
|
2703 |
+
838–850, 2013, doi: 10.1016/j.dss.2013.04.002.
|
2704 |
+
[30]
|
2705 |
+
R. ur e, “ ybrid Recommender Systems: Survey and xperiments,” User Modeling and
|
2706 |
+
User-Adapted Interaction, vol. 12, no. 4, pp. 331–370, 2002, [Online]. Available:
|
2707 |
+
http://www.springerlink.com/openurl.asp?id=doi:10.1023/A:1021240730564%5Cnpapers
|
2708 |
+
2://publication/doi/10.1023/A:1021240730564
|
2709 |
+
|
2710 |
+
38
|
2711 |
+
|
2712 |
+
[31]
|
2713 |
+
. agci and . Karago , “Context-aware location recommendation by using a random
|
2714 |
+
walk-based approach,” Knowledge and Information Systems, vol. 47, no. 2, pp. 241–260,
|
2715 |
+
2016, doi: 10.1007/s10115-015-0857-0.
|
2716 |
+
[32]
|
2717 |
+
S. Kul arni and S. . Rodd, “Context A are Recommendation Systems: A revie of the
|
2718 |
+
state of the art techni ues,” Computer Science Review, vol. 37, p. 100255, 2020, doi:
|
2719 |
+
10.1016/j.cosrev.2020.100255.
|
2720 |
+
[33]
|
2721 |
+
. Chen and Y. . Chuang, “ u y and nonlinear programming approach for optimi ing
|
2722 |
+
the performance of ubi uitous hotel recommendation,” Journal of Ambient Intelligence and
|
2723 |
+
Humanized Computing, vol. 9, no. 2, pp. 275–284, Apr. 2018, doi: 10.1007/s12652-015-
|
2724 |
+
0335-2.
|
2725 |
+
[34]
|
2726 |
+
Kha aei and Alimohammadi, “Context-Aware Group-Oriented Location Recommendation
|
2727 |
+
in Location- ased Social et or s,” ISPRS International Journal of Geo-Information, vol.
|
2728 |
+
8, no. 9, p. 406, 2019, doi: 10.3390/ijgi8090406.
|
2729 |
+
[35]
|
2730 |
+
. Gabor and . Altmann, “ enchmar ing Surrogate-Assisted Genetic Recommender
|
2731 |
+
Systems,” 20 9, [ nline]. Available: http://arxiv.org/abs/ 908.02880
|
2732 |
+
[36]
|
2733 |
+
. Khalid, . . S. Khan, S. . Khan, and A. Y. Zomaya, “ mniSuggest: A ubi uitous
|
2734 |
+
cloud-based context-a are recommendation system for mobile social net or s,” IEEE
|
2735 |
+
Transactions on Services Computing, vol. 7, no. 3, pp. 401–414, 2014, doi:
|
2736 |
+
10.1109/TSC.2013.53.
|
2737 |
+
[37]
|
2738 |
+
M. H. Kuo, . C. Chen, and C. . iang, “ uilding and evaluating a location-based service
|
2739 |
+
recommendation system ith a preference adjustment mechanism,” Expert Systems with
|
2740 |
+
Applications,
|
2741 |
+
vol.
|
2742 |
+
36,
|
2743 |
+
no.
|
2744 |
+
2
|
2745 |
+
PART
|
2746 |
+
2,
|
2747 |
+
pp.
|
2748 |
+
3543–3554,
|
2749 |
+
2009,
|
2750 |
+
doi:
|
2751 |
+
10.1016/j.eswa.2008.02.014.
|
2752 |
+
[38]
|
2753 |
+
Y. ang and Y. Guo, “A context-a are matrix factori ation recommender algorithm,”
|
2754 |
+
Proceedings of the IEEE International Conference on Software Engineering and Service
|
2755 |
+
Sciences, ICSESS, pp. 914–918, 2013, doi: 10.1109/ICSESS.2013.6615454.
|
2756 |
+
[39]
|
2757 |
+
M. Sadeghi and S. A. Asghari, “Recommender Systems ased on volutionary
|
2758 |
+
Computing: A Survey,” Journal of Software Engineering and Applications, vol. 10, no. 05,
|
2759 |
+
pp. 407–421, 2017, doi: 10.4236/jsea.2017.105023.
|
2760 |
+
[40]
|
2761 |
+
. . ao, S. R. Jeong, and . Ahn, “A novel recommendation model of location-based
|
2762 |
+
advertising: Context-A are Collaborative iltering using GA approach,” Expert Systems
|
2763 |
+
with Applications, vol. 39, no. 3, pp. 3731–3739, 2012, doi: 10.1016/j.eswa.2011.09.070.
|
2764 |
+
[41]
|
2765 |
+
A. Livne, M. Unger, B. Shapira, and L. Ro ach, “ eep Context-Aware Recommender
|
2766 |
+
System
|
2767 |
+
tili ing
|
2768 |
+
Se uential
|
2769 |
+
atent
|
2770 |
+
Context,”
|
2771 |
+
20 9,
|
2772 |
+
[ nline].
|
2773 |
+
Available:
|
2774 |
+
http://arxiv.org/abs/1909.03999
|
2775 |
+
|
2776 |
+
39
|
2777 |
+
|
2778 |
+
[42]
|
2779 |
+
. ossein adeh Aghdam, “Context-aware recommender systems using hierarchical
|
2780 |
+
hidden ar ov model,” Physica A: Statistical Mechanics and its Applications, vol. 518, pp.
|
2781 |
+
89–98, 2019, doi: 10.1016/j.physa.2018.11.037.
|
2782 |
+
[43]
|
2783 |
+
N. M. Villegas, C. Sánchez, J. Díaz-Cely, and G. amura, “Characteri ing context-aware
|
2784 |
+
recommender systems: A systematic literature revie ,” Knowledge-Based Systems, vol.
|
2785 |
+
140, pp. 173–200, 2018, doi: 10.1016/j.knosys.2017.11.003.
|
2786 |
+
[44]
|
2787 |
+
S. Ra a and C. ing, “ rogress in context-aware recommender systems - An overvie ,”
|
2788 |
+
Computer Science Review, vol. 31, pp. 84–97, 2019, doi: 10.1016/j.cosrev.2019.01.001.
|
2789 |
+
[45]
|
2790 |
+
J. ian, Z. Chen, X. Zhou, X. Xie, . Zhang, and G. Sun, “x eep : Combining explicit
|
2791 |
+
and implicit feature interactions for recommender systems,” Proceedings of the ACM
|
2792 |
+
SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 1754–
|
2793 |
+
1763, 2018, doi: 10.1145/3219819.3220023.
|
2794 |
+
[46]
|
2795 |
+
S. Sivapalan, “A Genetic Algorithm Approach to Recommender System Cold Start
|
2796 |
+
roblem,” 20 5.
|
2797 |
+
[47]
|
2798 |
+
. Alhija i, “ he se of the Genetic Algorithms in the Recommender Systems,” no. arch,
|
2799 |
+
2017, doi: 10.13140/RG.2.2.24308.76169.
|
2800 |
+
[48]
|
2801 |
+
J. Rajes ari and S. ariharan, “ ersonali ed Search Recommender System: State of Art,
|
2802 |
+
xperimental Results and Investigations,” International Journal of Education and
|
2803 |
+
Management
|
2804 |
+
Engineering,
|
2805 |
+
vol.
|
2806 |
+
6,
|
2807 |
+
no.
|
2808 |
+
3,
|
2809 |
+
pp.
|
2810 |
+
1–8,
|
2811 |
+
May
|
2812 |
+
2016,
|
2813 |
+
doi:
|
2814 |
+
10.5815/ijeme.2016.03.01.
|
2815 |
+
[49]
|
2816 |
+
. ivedi and K. K. harad aj, “A fu y approach to multidimensional context aware e-
|
2817 |
+
learning recommender system,” Lecture Notes in Computer Science (including subseries
|
2818 |
+
Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 8284
|
2819 |
+
LNAI, pp. 600–610, 2013, doi: 10.1007/978-3-319-03844-5_59.
|
2820 |
+
[50]
|
2821 |
+
S. . in and I. an, “ etection of the customer time-variant pattern for improving
|
2822 |
+
recommender systems,” Expert Systems with Applications, vol. 28, no. 2, pp. 189–199,
|
2823 |
+
2005, doi: 10.1016/j.eswa.2004.10.001.
|
2824 |
+
[51]
|
2825 |
+
. ernando and . amayo, “Smart articipation A Fuzzy-Based Recommender System
|
2826 |
+
for Political Community- uilding,” 20 4.
|
2827 |
+
[52]
|
2828 |
+
A. Ciaramella, . G. C. A. Cimino, . a erini, and . arcelloni, “ sing context history
|
2829 |
+
to personali e a resource recommender via a genetic algorithm,” Proceedings of the 2010
|
2830 |
+
10th International Conference on Intelligent Systems Design and Applications, ISDA’10,
|
2831 |
+
pp. 965–970, 2010, doi: 10.1109/ISDA.2010.5687064.
|
2832 |
+
[53]
|
2833 |
+
. ouneffouf, A. ou eghoub, and A. . Gançars i, “A contextual-bandit algorithm for
|
2834 |
+
mobile context-a are recommender system,” Lecture Notes in Computer Science
|
2835 |
+
|
2836 |
+
40
|
2837 |
+
|
2838 |
+
(including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in
|
2839 |
+
Bioinformatics), vol. 7665 LNCS, no. PART 3, pp. 324–331, 2012, doi: 10.1007/978-3-
|
2840 |
+
642-34487-9_40.
|
2841 |
+
[54]
|
2842 |
+
R. Meena and K. K. harad aj, “A Genetic Algorithm Approach for Group Recommender
|
2843 |
+
System ased on artial Ran ings,” Journal of Intelligent Systems, vol. 29, no. 1, pp. 653–
|
2844 |
+
663, 2020, doi: 10.1515/jisys-2017-0561.
|
2845 |
+
[55]
|
2846 |
+
J. A. Konstan and G. Adomavicius, “ o ard identification and adoption of best practices
|
2847 |
+
in algorithmic recommender systems research,” in ACM International Conference
|
2848 |
+
Proceeding Series, 2013, pp. 23–28. doi: 10.1145/2532508.2532513.
|
2849 |
+
[56]
|
2850 |
+
. Zheng and Q. i, “A recommender system based on tag and time information for social
|
2851 |
+
tagging systems,” Expert Systems with Applications, vol. 38, no. 4, pp. 4575–4587, 2011,
|
2852 |
+
doi: 10.1016/j.eswa.2010.09.131.
|
2853 |
+
[57]
|
2854 |
+
M. A. Domingues, A. M. Jorge, and C. Soares, “ imensions as irtual Items: Improving
|
2855 |
+
the predictive ability of top- recommender systems,” Information Processing and
|
2856 |
+
Management, vol. 49, no. 3, pp. 698–720, 2013, doi: 10.1016/j.ipm.2012.07.009.
|
2857 |
+
[58]
|
2858 |
+
L. O. Colombo-Mendoza, R. Valencia-García, A. Rodríguez-González, G. Alor-
|
2859 |
+
Hernández, and J. J. Samper-Zapater, “Recom et : A context-aware knowledge-based
|
2860 |
+
mobile recommender system for movie sho times,” Expert Systems with Applications, vol.
|
2861 |
+
42, no. 3, pp. 1202–1222, 2015, doi: 10.1016/j.eswa.2014.09.016.
|
2862 |
+
[59]
|
2863 |
+
M. Y. H. Al-Shamri and K. K. harad aj, “ u y-genetic approach to recommender
|
2864 |
+
systems based on a novel hybrid user model,” Expert Systems with Applications, vol. 35,
|
2865 |
+
no. 3, pp. 1386–1399, 2008, doi: 10.1016/j.eswa.2007.08.016.
|
2866 |
+
[60]
|
2867 |
+
S. Renjith, A. Sree umar, and . Jathavedan, “An extensive study on the evolution of
|
2868 |
+
context-a are personali ed travel recommender systems,” Information Processing and
|
2869 |
+
Management, vol. 57, no. 1, p. 102078, 2020, doi: 10.1016/j.ipm.2019.102078.
|
2870 |
+
[61]
|
2871 |
+
U. Marung, N. Theera- mpon, and S. Auephan iriya ul, “ op-N recommender systems
|
2872 |
+
using genetic algorithm-based visual-clustering methods,” Symmetry (Basel), vol. 8, no.
|
2873 |
+
7, pp. 1–19, 2016, doi: 10.3390/sym8070054.
|
2874 |
+
[62]
|
2875 |
+
. ohamed, . Abdulsalam, and . ohammed, “Adaptive genetic algorithm for
|
2876 |
+
improving prediction accuracy of a multi-criteria recommender system,” Proceedings -
|
2877 |
+
2018 IEEE 12th International Symposium on Embedded Multicore/Many-Core Systems-
|
2878 |
+
on-Chip, MCSoC 2018, vol. 11, pp. 79–86, 2018, doi: 10.1109/MCSoC2018.2018.00025.
|
2879 |
+
[63]
|
2880 |
+
Y. Kilani, A. . toom, A. Alsarhan, and . Almaayah, “A genetic algorithms-based hybrid
|
2881 |
+
recommender system of matrix factorization and neighborhood-based techni ues,”
|
2882 |
+
Journal
|
2883 |
+
of
|
2884 |
+
Computational
|
2885 |
+
Science,
|
2886 |
+
vol.
|
2887 |
+
28,
|
2888 |
+
pp.
|
2889 |
+
78–93,
|
2890 |
+
2018,
|
2891 |
+
doi:
|
2892 |
+
10.1016/j.jocs.2018.08.007.
|
2893 |
+
|
2894 |
+
41
|
2895 |
+
|
2896 |
+
[64]
|
2897 |
+
Y. Juan, Y. Zhuang, . S. Chin, and C. J. in, “ ield-aware factorization machines for
|
2898 |
+
C R prediction,” RecSys 2016 - Proceedings of the 10th ACM Conference on
|
2899 |
+
Recommender Systems, pp. 43–50, 2016, doi: 10.1145/2959100.2959134.
|
2900 |
+
[65]
|
2901 |
+
J. M. Ruiz-Martínez, J. A. Miñarro-Giménez, D. Castellanos-Nieves, F. García-Sáanchez,
|
2902 |
+
and R. Valencia-García, “ ntology population: An application for the -tourism domain,”
|
2903 |
+
International Journal of Innovative Computing, Information and Control, vol. 7, no. 11, pp.
|
2904 |
+
6115–6183, 2011.
|
2905 |
+
[66]
|
2906 |
+
R. arta, C. eilmayr, . röll, C. Grün, and . erthner, “Covering the semantic space
|
2907 |
+
of tourism : An approach based on modulari ed ontologies,” in ACM International
|
2908 |
+
Conference Proceeding Series, 2009, p. 79. doi: 10.1145/1552262.1552263.
|
2909 |
+
[67]
|
2910 |
+
K. Haruna et al., “Context-aware recommender system: A review of recent developmental
|
2911 |
+
process and future research direction,” Applied Sciences (Switzerland), vol. 7, no. 12, pp.
|
2912 |
+
1–25, 2017, doi: 10.3390/app7121211.
|
2913 |
+
[68]
|
2914 |
+
S. inda and K. K. harad aj, “A Genetic Algorithm Approach to Context-Aware
|
2915 |
+
Recommendations Based on Spatio-temporal Aspects,” vol. 77 , Springer Singapore,
|
2916 |
+
2019, pp. 59–70. doi: 10.1007/978-981-10-8797-4_7.
|
2917 |
+
[69]
|
2918 |
+
S. Rendle, “ actori ation machines,” in Proceedings - IEEE International Conference on
|
2919 |
+
Data Mining, ICDM, 2010, pp. 995–1000. doi: 10.1109/ICDM.2010.127.
|
2920 |
+
|
2921 |
+
|
99AyT4oBgHgl3EQf3fke/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
9NE4T4oBgHgl3EQfdgy1/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb9fc27d56756712e6c800b61b43082afbb2b815f08bc3bfb08636a5350de9e5
|
3 |
+
size 4128813
|
9tFLT4oBgHgl3EQfCC72/content/2301.11974v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:32d2d8b0b7d70a2379732eb344eafa3ff4fd590706a889e660c132ddb973be33
|
3 |
+
size 339789
|
AdFLT4oBgHgl3EQfEy_H/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:06223c0b4d312a337e0db5702afe1bf3f58346735ac440db6f7c8538052615c7
|
3 |
+
size 12189741
|
B9AzT4oBgHgl3EQfwP5n/content/tmp_files/2301.01719v1.pdf.txt
ADDED
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Radiance Textures for Rasterizing Ray-Traced Data
|
2 |
+
Jakub Maksymilian Fober
|
3 | |
4 |
+
Abstract
|
5 |
+
Presenting real-time rendering of 3D surfaces using radiance
|
6 |
+
textures for fast synthesis of complex incidence-variable ef-
|
7 |
+
fects and environment interactions. This includes iridescence,
|
8 |
+
parallax occlusion and interior mapping, (specular, regular,
|
9 |
+
diffuse, total-internal) reflections with many bounces, re-
|
10 |
+
fraction, subsurface scattering, transparency, and possibly
|
11 |
+
more. This method divides textures into a matrix of radiance
|
12 |
+
buckets, where each bucket represent some data at various
|
13 |
+
incidence angles. Data can show final pixel color, or deferred
|
14 |
+
rendering ambient occlusion, reflections, shadow map, etc.
|
15 |
+
Resolution of the final synthesized output is the radiance
|
16 |
+
bucket matrix size. Technique can be implemented with a
|
17 |
+
simple fragment shader. The computational footprint of this
|
18 |
+
technique is of simple diffuse-only graphics, but with vi-
|
19 |
+
sual fidelity of complex (off-line) ray-traced render at the
|
20 |
+
cost of storage memory footprint. Balance between com-
|
21 |
+
putational footprint and storage memory footprint can be
|
22 |
+
easily achieved with variable compression ratio of repetitive
|
23 |
+
radiance scene textures.
|
24 |
+
CCS Concepts: • Computing methodologies → Reflectance
|
25 |
+
modeling; Rasterization; Texturing; Ray tracing.
|
26 |
+
Keywords: 3D graphics, holography, light field, plenoptic,
|
27 |
+
radiance field, rasterization, ray tracing, reflectance field
|
28 |
+
© 2023 Jakub Maksymilian Fober
|
29 |
+
This work is licensed under Creative Commons BY-NC-ND 3.0 license.
|
30 |
+
https://creativecommons.org/licenses/by-nc-nd/3.0/
|
31 |
+
For all other uses including commercial, contact the owner/author(s).
|
32 |
+
1
|
33 |
+
Introduction
|
34 |
+
Radiance and reflectance field rendering techniques are a
|
35 |
+
class of algorithms used in computer graphics to generate im-
|
36 |
+
ages of three-dimensional scenes. These algorithms simulate
|
37 |
+
the way light interacts with surfaces in a virtual environment,
|
38 |
+
producing realistic and detailed images.
|
39 |
+
These techniques have been the subject of extensive re-
|
40 |
+
search in computer graphics and rendering, as they offer a
|
41 |
+
powerful and flexible way to generate high-quality images.
|
42 |
+
There is a wide range of applications for radiance and re-
|
43 |
+
flectance field algorithms, including film and video game
|
44 |
+
production, architectural visualization, and scientific visual-
|
45 |
+
ization.
|
46 |
+
In this paper, technique is presented to capture and render
|
47 |
+
complex precomputed light interactions, via radiance field
|
48 |
+
textures, embedded onto three-dimensional-object’s surface.
|
49 |
+
The presented technique utilizes a standard fragment pixel
|
50 |
+
shader and a two-dimensional texture lookup to render dy-
|
51 |
+
namic, view-independent, photo-realistic images at a fraction
|
52 |
+
of the computational cost associated with effects such as real-
|
53 |
+
time ray tracing, parallax mapping, and dynamic shadowing.
|
54 |
+
It is well-suited for real-time execution in video games,
|
55 |
+
virtual reality, and virtual production environments on mod-
|
56 |
+
ern hardware. It can take advantage of the direct storage
|
57 |
+
capability in ninth-generation gaming systems, providing
|
58 |
+
high-fidelity, high-performance images.
|
59 |
+
This technique can replace computationally heavy rendering-
|
60 |
+
pipeline chains, while preserving hardware-accelerated, highly-
|
61 |
+
optimized rasterization elements. It can also enable wider
|
62 |
+
implementation of real-time GPU ray-tracing, with ability
|
63 |
+
to combine bounce rays with precomputed radiance of the
|
64 |
+
environment.
|
65 |
+
1.1
|
66 |
+
Previous work
|
67 |
+
Mainstream implementations of radiance field rendering
|
68 |
+
focus on volumetric data structures and spherical harmonics
|
69 |
+
for rendering images[Yu et al. 2021]. While volumetric data
|
70 |
+
can be sparse in order to exclude void regions[Yu et al. 2021],
|
71 |
+
the ultimate goal would logically be to perfectly match the
|
72 |
+
geometry of the represented object. And since the inside
|
73 |
+
volume of the object is of no interest (most of the time), only
|
74 |
+
half of the radiance sphere is considered practically useful.
|
75 |
+
Therefore, such fields could effectively be spread across the
|
76 |
+
surface of the object.
|
77 |
+
Some researchers embraced this approach, with neural
|
78 |
+
reflectance fields as texturing primitives[Baatz et al. 2022],
|
79 |
+
which rendered high-fidelity results. But while neural fields
|
80 |
+
produce fantastic results, they are computationally inten-
|
81 |
+
sive at rendering time[Yu et al. 2021] and therefore are not
|
82 |
+
suitable for real-time applications.
|
83 |
+
1.2
|
84 |
+
Overview of the content
|
85 |
+
In this initial version of paper you will find theoretical ex-
|
86 |
+
planation and implementation of the subject, along with
|
87 |
+
equations and schematics. Some elements had been tested,
|
88 |
+
like mapping functions, some yet to be presented, as the
|
89 |
+
follow-up updates continue.
|
90 |
+
1.3
|
91 |
+
Document naming convention
|
92 |
+
This document uses the following naming convention:
|
93 |
+
• Left-handed coordinate system.
|
94 |
+
arXiv:2301.01719v1 [cs.GR] 4 Jan 2023
|
95 |
+
|
96 |
+
Fober, J.M.
|
97 |
+
• Vectors presented natively in column.
|
98 |
+
• Row-major order matrix arranged, denoted “𝑀row col”.
|
99 |
+
• Matrix multiplication by “[column]𝑎 · [row]𝑏 = 𝑀𝑎 𝑏”.
|
100 |
+
• A single bar enclosure “|𝑢|” represents scalar absolute.
|
101 |
+
• A single bar enclosure “|�𝑣|” represents vector’s length.
|
102 |
+
• Vectors with an arithmetic sign, or without, are calcu-
|
103 |
+
lated component-wise and form another vector.
|
104 |
+
• Centered dot “·” represents the vector dot product.
|
105 |
+
• Square brackets with a comma “[𝑓 ,𝑐]” denote interval.
|
106 |
+
• Square brackets with blanks “[𝑥 𝑦]” denote vectors
|
107 |
+
and matrices.
|
108 |
+
• The power of “−1” implies the reciprocal of the value.
|
109 |
+
• QED symbol “□” marks the final result or output.
|
110 |
+
This naming convention simplifies the process of transform-
|
111 |
+
ing formulas into shader code.
|
112 |
+
2
|
113 |
+
Methodology
|
114 |
+
Each pixel of the model’s texture contains discrete radiance
|
115 |
+
hemispherical map of size 𝑛 ×𝑛, called “bucket”. Buckets are
|
116 |
+
arranged in place of initial texture’s pixels, increasing overall
|
117 |
+
resolution to 𝑤 ·𝑛 ×ℎ ·𝑛 pixels, where 𝑤 and ℎ denote width
|
118 |
+
and height of the synthesized output texture, respectively.
|
119 |
+
Buckets are highly repetitive and change only slightly from
|
120 |
+
one to another. This is a great case for a simple compression.
|
121 |
+
To synthesize output texture for a given view position,
|
122 |
+
single sample per bucket is taken, giving normal resolution
|
123 |
+
texture output.
|
124 |
+
Model’s 𝑢, 𝑣 texture coordinates correspond to bucket ma-
|
125 |
+
trix position index, while incidence vector, correspond to
|
126 |
+
bucket’s internal 𝑢, 𝑣 position. Therefore radiance texture
|
127 |
+
sampling algorithm can be described as a four-dimensional
|
128 |
+
plenoptic function 𝐿(𝑢, 𝑣,𝜃,𝜙), where 𝑢, 𝑣 denote model’s
|
129 |
+
texture coordinates and 𝜃,𝜙 incidence angles.
|
130 |
+
Figure 1. Radiance texture sampling model, where the inci-
|
131 |
+
dence R3 vector (blue) is projected and squarified (orange)
|
132 |
+
to R2 texture coordinates (red and green), which map onto
|
133 |
+
hemispherical radiance bucket represented as a flat square.
|
134 |
+
Each radiance bucket should represent a hemisphere of
|
135 |
+
reflectivity. Equisolid azimuthal projection was chosen for
|
136 |
+
this task, for its properties, as it preserves area and resem-
|
137 |
+
bles spherical mirror reflection[Wikipedia contributors 2022].
|
138 |
+
Resolution of the radiance bucket, in such projection, directly
|
139 |
+
corresponds to sin(𝜃/2)
|
140 |
+
√
|
141 |
+
2, where 𝜃 is the incidence angle.
|
142 |
+
To efficiently spread information across square buckets, ad-
|
143 |
+
ditional disc-to-square mapping function was implemented,
|
144 |
+
providing uniform pixel count across both orthogonal direc-
|
145 |
+
tions and diagonal directions.
|
146 |
+
Equisolid azimuthal projection mapping can be easily im-
|
147 |
+
plemented in the vector domain without the use of anti-
|
148 |
+
trigonometric functions, as the orthographically projected
|
149 |
+
normalized sum of the incidence and normal vectors has
|
150 |
+
a length of sin(𝜃/2). This eliminates 𝜃,𝜙 angles from the
|
151 |
+
plenoptic function, resulting in new 𝐿′(𝑢, 𝑣,𝑥,𝑦,𝑧), where
|
152 |
+
𝑥,𝑦,𝑧 correspond to incidence unit-vector components in
|
153 |
+
orthogonal texture space.
|
154 |
+
2.1
|
155 |
+
Mapping of incident vector to radiance bucket
|
156 |
+
For every visible pixel there is an incidence vector ˆ𝐼 ∈ R3.
|
157 |
+
This vector can be mapped and projected to R2 texture coor-
|
158 |
+
dinates using translation and R2×3-matrix transformation.
|
159 |
+
Following equation maps incidence vector to azimuthal
|
160 |
+
equisolid projection, with 𝑟 = 1, at Ω = 180°.
|
161 |
+
|
162 |
+
�𝐴𝑥
|
163 |
+
�𝐴𝑦
|
164 |
+
√
|
165 |
+
2 cos 𝜃/2
|
166 |
+
|
167 |
+
=
|
168 |
+
√
|
169 |
+
2
|
170 |
+
������
|
171 |
+
|
172 |
+
ˆ𝐼𝑥 + ˆ𝑁𝑥
|
173 |
+
ˆ𝐼𝑦 + ˆ𝑁𝑦
|
174 |
+
ˆ𝐼𝑧 + ˆ𝑁𝑧
|
175 |
+
|
176 |
+
������
|
177 |
+
(1a)
|
178 |
+
� �𝐴𝑥
|
179 |
+
�𝐴𝑦
|
180 |
+
�
|
181 |
+
=
|
182 |
+
√
|
183 |
+
2
|
184 |
+
���ˆ𝐼𝑥
|
185 |
+
ˆ𝐼𝑦
|
186 |
+
ˆ𝐼𝑧 + 1
|
187 |
+
���
|
188 |
+
�ˆ𝐼𝑥
|
189 |
+
ˆ𝐼𝑦
|
190 |
+
�
|
191 |
+
, if ˆ𝑁𝑧 = 1
|
192 |
+
(1b)
|
193 |
+
Inverse mapping:
|
194 |
+
|
195 |
+
ˆ𝐴′
|
196 |
+
𝑥
|
197 |
+
ˆ𝐴′
|
198 |
+
𝑦
|
199 |
+
ˆ𝐴′
|
200 |
+
𝑧
|
201 |
+
|
202 |
+
=
|
203 |
+
|
204 |
+
�𝐴𝑥
|
205 |
+
√︁
|
206 |
+
1/2
|
207 |
+
�𝐴𝑦
|
208 |
+
√︁
|
209 |
+
1/2
|
210 |
+
√︃
|
211 |
+
1 −
|
212 |
+
�𝐴2𝑥
|
213 |
+
2 −
|
214 |
+
�𝐴2𝑦
|
215 |
+
2
|
216 |
+
|
217 |
+
(2a)
|
218 |
+
|
219 |
+
ˆ𝐼𝑥
|
220 |
+
ˆ𝐼𝑦
|
221 |
+
ˆ𝐼𝑧
|
222 |
+
|
223 |
+
= 2
|
224 |
+
���
|
225 |
+
�
|
226 |
+
ˆ𝐴′ · ˆ𝑁
|
227 |
+
|
228 |
+
ˆ𝐴′
|
229 |
+
𝑥
|
230 |
+
ˆ𝐴′
|
231 |
+
𝑦
|
232 |
+
ˆ𝐴′
|
233 |
+
𝑧
|
234 |
+
|
235 |
+
−
|
236 |
+
|
237 |
+
ˆ𝑁𝑥
|
238 |
+
ˆ𝑁𝑦
|
239 |
+
ˆ𝑁𝑧
|
240 |
+
|
241 |
+
���
|
242 |
+
�
|
243 |
+
+
|
244 |
+
|
245 |
+
ˆ𝑁𝑥
|
246 |
+
ˆ𝑁𝑦
|
247 |
+
ˆ𝑁𝑧
|
248 |
+
|
249 |
+
(2b)
|
250 |
+
=
|
251 |
+
|
252 |
+
�𝐴𝑥
|
253 |
+
√︃
|
254 |
+
2 − �𝐴2𝑥 − �𝐴2𝑦
|
255 |
+
�𝐴𝑦
|
256 |
+
√︃
|
257 |
+
2 − �𝐴2𝑥 − �𝐴2𝑦
|
258 |
+
1 − �𝐴2
|
259 |
+
𝑥 − �𝐴2
|
260 |
+
𝑦
|
261 |
+
|
262 |
+
, if ˆ𝑁𝑧 = 1
|
263 |
+
(2c)
|
264 |
+
where �𝐴 ∈ [−1, 1]2 is the azimuthal equisolid projection
|
265 |
+
coordinate. 𝜃 is the incidence angle. ˆ𝑁 ∈ R3 is the surface
|
266 |
+
normal vector. As the incidence ˆ𝐼 ∈ R3 is mapped to or from
|
267 |
+
orthogonal texture space, where ˆ𝑁𝑧 = 1, the transformation
|
268 |
+
can take form of equation 1b and 2c.
|
269 |
+
|
270 |
+
Radiance Textures for Rasterizing Ray-Traced Data
|
271 |
+
Following equation transforms azimuthal projection vec-
|
272 |
+
tor, into square coordinates, for the radiance bucket sam-
|
273 |
+
pling.1
|
274 |
+
� �𝐵𝑥
|
275 |
+
�𝐵𝑦
|
276 |
+
�
|
277 |
+
=
|
278 |
+
�� � �𝐴𝑥
|
279 |
+
�𝐴𝑦
|
280 |
+
� ��
|
281 |
+
max �| �𝐴𝑥 |, | �𝐴𝑦|�
|
282 |
+
� �𝐴𝑥
|
283 |
+
�𝐴𝑦
|
284 |
+
�
|
285 |
+
if �𝐴𝑥 and �𝐴𝑦 ≠ 0
|
286 |
+
(3)
|
287 |
+
where �𝐵 ∈ [−1, 1]2 is the bucket’s centered texture coordi-
|
288 |
+
nate and �𝐴 ∈ [−1, 1]2 is the azimuthal projection vector.
|
289 |
+
Note. It is important to prevent pixel blending between
|
290 |
+
edges of neighboring buckets. This can be done by clamping
|
291 |
+
bucket coordinates to �𝐵 ∈ [𝐵−1
|
292 |
+
res − 1, 1 − 𝐵−1
|
293 |
+
res]2 range.
|
294 |
+
Inverse transformation of bucked, centered coordinates
|
295 |
+
�𝐵 ∈ R2 to azimuthal projection coordinates ˆ𝐴 ∈ R2 can be
|
296 |
+
achieved with same, but inverted method.
|
297 |
+
� �𝐴𝑥
|
298 |
+
�𝐴𝑦
|
299 |
+
�
|
300 |
+
= max �| �𝐵𝑥 |, | �𝐵𝑦|�
|
301 |
+
√︃
|
302 |
+
�𝐵2𝑥 + �𝐵2𝑦
|
303 |
+
� �𝐵𝑥
|
304 |
+
�𝐵𝑦
|
305 |
+
�
|
306 |
+
(4a)
|
307 |
+
|
308 |
+
ˆ𝑅𝑥
|
309 |
+
ˆ𝑅𝑦
|
310 |
+
ˆ𝑅𝑧
|
311 |
+
|
312 |
+
=
|
313 |
+
|
314 |
+
− �𝐴𝑥
|
315 |
+
√︃
|
316 |
+
2 − �𝐴2𝑥 − �𝐴2𝑦
|
317 |
+
− �𝐴𝑦
|
318 |
+
√︃
|
319 |
+
2 − �𝐴2𝑥 − �𝐴2𝑦
|
320 |
+
1 − �𝐴2
|
321 |
+
𝑥 − �𝐴2
|
322 |
+
𝑦
|
323 |
+
|
324 |
+
(4b)
|
325 |
+
where ˆ𝑅 ∈ R3 denotes equisolid reflection vector. This vector
|
326 |
+
is used to sample ray-traced data onto radiance field texture.
|
327 |
+
It is a version of the vector mirrored along the normal, found
|
328 |
+
in equation 2c on the preceding page.
|
329 |
+
3
|
330 |
+
Results
|
331 |
+
TBA
|
332 |
+
4
|
333 |
+
Conclusion
|
334 |
+
I have theorized about possible implementation of radiance
|
335 |
+
field texturing using modern hardware shading capabilities,
|
336 |
+
and presented mathematical solution for executing such con-
|
337 |
+
cept.
|
338 |
+
Note. More conclusion are to be added, after the update to
|
339 |
+
the paper.
|
340 |
+
5
|
341 |
+
Possible applications
|
342 |
+
Radiance field texture sampling can replace shading pipeline
|
343 |
+
or supplement it with enhanced effects. Some such effects
|
344 |
+
include:
|
345 |
+
Parallax interior mapping. This effect is used to mimic
|
346 |
+
interior of a room, as seen through a window, or it can simu-
|
347 |
+
late a portal to another place.
|
348 |
+
Proxy meshes with parallax mapping. Radiance tex-
|
349 |
+
ture with alpha mask can simulate more complex or furry
|
350 |
+
objects bound inside a proxy mesh. Similarly to neural radi-
|
351 |
+
ance fields texturing primitives[Baatz et al. 2022].
|
352 |
+
1See figure 2 for visual reference.
|
353 |
+
(a) Picture of one cent American coin.
|
354 |
+
(b) One cent coin mapped to a rectangle, using equation 3.
|
355 |
+
Figure 2. A visual example of disc to square mapping using
|
356 |
+
the formulation found in equation 3.
|
357 |
+
Reflections. Many light bounces can be combined into a
|
358 |
+
single pixel of the radiance texture map. Dynamic objects
|
359 |
+
can then sample such radiance field to obtain environment
|
360 |
+
reflections. Also semi real-time ray-tracing can accumulate
|
361 |
+
dynamically generated reflections into such texture map, to
|
362 |
+
update and enhance environment one.
|
363 |
+
Shadowing. 1-bit radiance field texture map can repre-
|
364 |
+
sent shadowing of static objects. Here, incidence vector is
|
365 |
+
replaced with light direction vector for shadow occlusion
|
366 |
+
sampling. It can work with both parallel light sources and
|
367 |
+
point lights. With more than one sample per bucket, area
|
368 |
+
shadows are possible to produce.
|
369 |
+
Subsurface scattering. This computationally demand-
|
370 |
+
ing effect can be encoded in a radiance texture map, which
|
371 |
+
then replaces incidence vector, with the light direction vector
|
372 |
+
in relation to the view position for sampling.
|
373 |
+
References
|
374 |
+
H. Baatz, J. Granskog, M. Papas, F. Rousselle, and J. Novák. 2022. NeRF-
|
375 |
+
Tex: Neural Reflectance Field Textures. Computer Graphics Forum 41, 6
|
376 |
+
(March 2022), 287–301. https://doi.org/10.1111/cgf.14449
|
377 |
+
|
378 |
+
W
|
379 |
+
L.188R 0
|
380 |
+
2021
|
381 |
+
DW
|
382 |
+
L1888809
|
383 |
+
2021Fober, J.M.
|
384 |
+
Wikipedia contributors. 2022. Fisheye lens: Mapping function. Wikipedia,
|
385 |
+
The Free Encyclopedia.
|
386 |
+
https://en.wikipedia.org/w/index.php?title=
|
387 |
+
Fisheye_lens&oldid=1124809304#Mapping_function [Online].
|
388 |
+
Alex Yu, Sara Fridovich-Keil, Matthew Tancik, Qinhong Chen, Benjamin
|
389 |
+
Recht, and Angjoo Kanazawa. 2021. Plenoxels: Radiance Fields without
|
390 |
+
Neural Networks. arXiv (Dec. 2021). https://doi.org/10.48550/ARXIV.
|
391 |
+
2112.05131
|
392 |
+
Received January 2023
|
393 |
+
|
B9AzT4oBgHgl3EQfwP5n/content/tmp_files/load_file.txt
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf,len=155
|
2 |
+
page_content='Radiance Textures for Rasterizing Ray-Traced Data Jakub Maksymilian Fober talk@maxfober.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
3 |
+
page_content='space Abstract Presenting real-time rendering of 3D surfaces using radiance textures for fast synthesis of complex incidence-variable ef- fects and environment interactions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
4 |
+
page_content=' This includes iridescence, parallax occlusion and interior mapping, (specular, regular, diffuse, total-internal) reflections with many bounces, re- fraction, subsurface scattering, transparency, and possibly more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
5 |
+
page_content=' This method divides textures into a matrix of radiance buckets, where each bucket represent some data at various incidence angles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
6 |
+
page_content=' Data can show final pixel color, or deferred rendering ambient occlusion, reflections, shadow map, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
7 |
+
page_content=' Resolution of the final synthesized output is the radiance bucket matrix size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
8 |
+
page_content=' Technique can be implemented with a simple fragment shader.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
9 |
+
page_content=' The computational footprint of this technique is of simple diffuse-only graphics, but with vi- sual fidelity of complex (off-line) ray-traced render at the cost of storage memory footprint.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
10 |
+
page_content=' Balance between com- putational footprint and storage memory footprint can be easily achieved with variable compression ratio of repetitive radiance scene textures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
11 |
+
page_content=' CCS Concepts: • Computing methodologies → Reflectance modeling;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
12 |
+
page_content=' Rasterization;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
13 |
+
page_content=' Texturing;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
14 |
+
page_content=' Ray tracing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
15 |
+
page_content=' Keywords: 3D graphics, holography, light field, plenoptic, radiance field, rasterization, ray tracing, reflectance field © 2023 Jakub Maksymilian Fober This work is licensed under Creative Commons BY-NC-ND 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
16 |
+
page_content='0 license.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
17 |
+
page_content=' https://creativecommons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
18 |
+
page_content='org/licenses/by-nc-nd/3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
19 |
+
page_content='0/ For all other uses including commercial, contact the owner/author(s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
20 |
+
page_content=' 1 Introduction Radiance and reflectance field rendering techniques are a class of algorithms used in computer graphics to generate im- ages of three-dimensional scenes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
21 |
+
page_content=' These algorithms simulate the way light interacts with surfaces in a virtual environment, producing realistic and detailed images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
22 |
+
page_content=' These techniques have been the subject of extensive re- search in computer graphics and rendering, as they offer a powerful and flexible way to generate high-quality images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
23 |
+
page_content=' There is a wide range of applications for radiance and re- flectance field algorithms, including film and video game production, architectural visualization, and scientific visual- ization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
24 |
+
page_content=' In this paper, technique is presented to capture and render complex precomputed light interactions, via radiance field textures, embedded onto three-dimensional-object’s surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
25 |
+
page_content=' The presented technique utilizes a standard fragment pixel shader and a two-dimensional texture lookup to render dy- namic, view-independent, photo-realistic images at a fraction of the computational cost associated with effects such as real- time ray tracing, parallax mapping, and dynamic shadowing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
26 |
+
page_content=' It is well-suited for real-time execution in video games, virtual reality, and virtual production environments on mod- ern hardware.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
27 |
+
page_content=' It can take advantage of the direct storage capability in ninth-generation gaming systems, providing high-fidelity, high-performance images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
28 |
+
page_content=' This technique can replace computationally heavy rendering- pipeline chains, while preserving hardware-accelerated, highly- optimized rasterization elements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
29 |
+
page_content=' It can also enable wider implementation of real-time GPU ray-tracing, with ability to combine bounce rays with precomputed radiance of the environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
30 |
+
page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
31 |
+
page_content='1 Previous work Mainstream implementations of radiance field rendering focus on volumetric data structures and spherical harmonics for rendering images[Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
32 |
+
page_content=' 2021].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
33 |
+
page_content=' While volumetric data can be sparse in order to exclude void regions[Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
34 |
+
page_content=' 2021], the ultimate goal would logically be to perfectly match the geometry of the represented object.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
35 |
+
page_content=' And since the inside volume of the object is of no interest (most of the time), only half of the radiance sphere is considered practically useful.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
36 |
+
page_content=' Therefore, such fields could effectively be spread across the surface of the object.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
37 |
+
page_content=' Some researchers embraced this approach, with neural reflectance fields as texturing primitives[Baatz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
38 |
+
page_content=' 2022], which rendered high-fidelity results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
39 |
+
page_content=' But while neural fields produce fantastic results, they are computationally inten- sive at rendering time[Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
40 |
+
page_content=' 2021] and therefore are not suitable for real-time applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
41 |
+
page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
42 |
+
page_content='2 Overview of the content In this initial version of paper you will find theoretical ex- planation and implementation of the subject, along with equations and schematics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
43 |
+
page_content=' Some elements had been tested, like mapping functions, some yet to be presented, as the follow-up updates continue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
44 |
+
page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
45 |
+
page_content='3 Document naming convention This document uses the following naming convention: Left-handed coordinate system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
46 |
+
page_content=' arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
47 |
+
page_content='01719v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
48 |
+
page_content='GR] 4 Jan 2023 Fober, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
49 |
+
page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
50 |
+
page_content=' Vectors presented natively in column.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
51 |
+
page_content=' Row-major order matrix arranged, denoted “𝑀row col”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
52 |
+
page_content=' Matrix multiplication by “[column]𝑎 · [row]𝑏 = 𝑀𝑎 𝑏”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
53 |
+
page_content=' A single bar enclosure “|𝑢|” represents scalar absolute.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
54 |
+
page_content=' A single bar enclosure “|�𝑣|” represents vector’s length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
55 |
+
page_content=' Vectors with an arithmetic sign, or without, are calcu- lated component-wise and form another vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
56 |
+
page_content=' Centered dot “·” represents the vector dot product.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
57 |
+
page_content=' Square brackets with a comma “[𝑓 ,𝑐]” denote interval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
58 |
+
page_content=' Square brackets with blanks “[𝑥 𝑦]” denote vectors and matrices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
59 |
+
page_content=' The power of “−1” implies the reciprocal of the value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
60 |
+
page_content=' QED symbol “□” marks the final result or output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
61 |
+
page_content=' This naming convention simplifies the process of transform- ing formulas into shader code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
62 |
+
page_content=' 2 Methodology Each pixel of the model’s texture contains discrete radiance hemispherical map of size 𝑛 ×𝑛, called “bucket”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
63 |
+
page_content=' Buckets are arranged in place of initial texture’s pixels, increasing overall resolution to 𝑤 ·𝑛 ×ℎ ·𝑛 pixels, where 𝑤 and ℎ denote width and height of the synthesized output texture, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
64 |
+
page_content=' Buckets are highly repetitive and change only slightly from one to another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
65 |
+
page_content=' This is a great case for a simple compression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
66 |
+
page_content=' To synthesize output texture for a given view position, single sample per bucket is taken, giving normal resolution texture output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
67 |
+
page_content=' Model’s 𝑢, 𝑣 texture coordinates correspond to bucket ma- trix position index, while incidence vector, correspond to bucket’s internal 𝑢, 𝑣 position.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
68 |
+
page_content=' Therefore radiance texture sampling algorithm can be described as a four-dimensional plenoptic function 𝐿(𝑢, 𝑣,𝜃,𝜙), where 𝑢, 𝑣 denote model’s texture coordinates and 𝜃,𝜙 incidence angles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
69 |
+
page_content=' Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
70 |
+
page_content=' Radiance texture sampling model, where the inci- dence R3 vector (blue) is projected and squarified (orange) to R2 texture coordinates (red and green), which map onto hemispherical radiance bucket represented as a flat square.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
71 |
+
page_content=' Each radiance bucket should represent a hemisphere of reflectivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
72 |
+
page_content=' Equisolid azimuthal projection was chosen for this task, for its properties, as it preserves area and resem- bles spherical mirror reflection[Wikipedia contributors 2022].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
73 |
+
page_content=' Resolution of the radiance bucket, in such projection, directly corresponds to sin(𝜃/2) √ 2, where 𝜃 is the incidence angle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
74 |
+
page_content=' To efficiently spread information across square buckets, ad- ditional disc-to-square mapping function was implemented, providing uniform pixel count across both orthogonal direc- tions and diagonal directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
75 |
+
page_content=' Equisolid azimuthal projection mapping can be easily im- plemented in the vector domain without the use of anti- trigonometric functions, as the orthographically projected normalized sum of the incidence and normal vectors has a length of sin(𝜃/2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
76 |
+
page_content=' This eliminates 𝜃,𝜙 angles from the plenoptic function, resulting in new 𝐿′(𝑢, 𝑣,𝑥,𝑦,𝑧), where 𝑥,𝑦,𝑧 correspond to incidence unit-vector components in orthogonal texture space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
77 |
+
page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
78 |
+
page_content='1 Mapping of incident vector to radiance bucket For every visible pixel there is an incidence vector ˆ𝐼 ∈ R3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
79 |
+
page_content=' This vector can be mapped and projected to R2 texture coor- dinates using translation and R2×3-matrix transformation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
80 |
+
page_content=' Following equation maps incidence vector to azimuthal equisolid projection, with 𝑟 = 1, at Ω = 180°.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
81 |
+
page_content=' \uf8ee\uf8ef\uf8ef\uf8ef\uf8ef\uf8f0 �𝐴𝑥 �𝐴𝑦 √ 2 cos 𝜃/2 \uf8f9\uf8fa\uf8fa\uf8fa\uf8fa\uf8fb = √ 2 ������ \uf8ee\uf8ef\uf8ef\uf8ef\uf8ef\uf8f0 ˆ𝐼𝑥 + ˆ𝑁𝑥 ˆ𝐼𝑦 + ˆ𝑁𝑦 ˆ𝐼𝑧 + ˆ𝑁𝑧 \uf8f9\uf8fa\uf8fa\uf8fa\uf8fa\uf8fb ������ (1a) � �𝐴𝑥 �𝐴𝑦 � = √ 2 ���ˆ𝐼𝑥 ˆ𝐼𝑦 ˆ𝐼𝑧 + 1 ��� �ˆ𝐼𝑥 ˆ𝐼𝑦 � ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
82 |
+
page_content=' if ˆ𝑁𝑧 = 1 (1b) Inverse mapping: \uf8ee\uf8ef\uf8ef\uf8ef\uf8ef\uf8f0 ˆ𝐴′ 𝑥 ˆ𝐴′ 𝑦 ˆ𝐴′ 𝑧 \uf8f9\uf8fa\uf8fa\uf8fa\uf8fa\uf8fb = \uf8ee\uf8ef\uf8ef\uf8ef\uf8ef\uf8ef\uf8f0 �𝐴𝑥 √︁ 1/2 �𝐴𝑦 √︁ 1/2 √︃ 1 − �𝐴2𝑥 2 − �𝐴2𝑦 2 \uf8f9\uf8fa\uf8fa\uf8fa\uf8fa\uf8fa\uf8fb (2a) \uf8ee\uf8ef\uf8ef\uf8ef\uf8ef\uf8f0 ˆ𝐼𝑥 ˆ𝐼𝑦 ˆ𝐼𝑧 \uf8f9\uf8fa\uf8fa\uf8fa\uf8fa\uf8fb = 2 ��� � ˆ𝐴′ · ˆ𝑁 \uf8ee\uf8ef\uf8ef\uf8ef\uf8ef\uf8f0 ˆ𝐴′ 𝑥 ˆ𝐴′ 𝑦 ˆ𝐴′ 𝑧 \uf8f9\uf8fa\uf8fa\uf8fa\uf8fa\uf8fb − \uf8ee\uf8ef\uf8ef\uf8ef\uf8ef\uf8f0 ˆ𝑁𝑥 ˆ𝑁𝑦 ˆ𝑁𝑧 \uf8f9\uf8fa\uf8fa\uf8fa\uf8fa\uf8fb ��� � + \uf8ee\uf8ef\uf8ef\uf8ef\uf8ef\uf8f0 ˆ𝑁𝑥 ˆ𝑁𝑦 ˆ𝑁𝑧 \uf8f9\uf8fa\uf8fa\uf8fa\uf8fa\uf8fb (2b) = \uf8ee\uf8ef\uf8ef\uf8ef\uf8ef\uf8ef\uf8ef\uf8f0 �𝐴𝑥 √︃ 2 − �𝐴2𝑥 − �𝐴2𝑦 �𝐴𝑦 √︃ 2 − �𝐴2𝑥 − �𝐴2𝑦 1 − �𝐴2 𝑥 − �𝐴2 𝑦 \uf8f9\uf8fa\uf8fa\uf8fa\uf8fa\uf8fa\uf8fa\uf8fb ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
83 |
+
page_content=' if ˆ𝑁𝑧 = 1 (2c) where �𝐴 ∈ [−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
84 |
+
page_content=' 1]2 is the azimuthal equisolid projection coordinate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
85 |
+
page_content=' 𝜃 is the incidence angle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
86 |
+
page_content=' ˆ𝑁 ∈ R3 is the surface normal vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
87 |
+
page_content=' As the incidence ˆ𝐼 ∈ R3 is mapped to or from orthogonal texture space, where ˆ𝑁𝑧 = 1, the transformation can take form of equation 1b and 2c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
88 |
+
page_content=' Radiance Textures for Rasterizing Ray-Traced Data Following equation transforms azimuthal projection vec- tor, into square coordinates, for the radiance bucket sam- pling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
89 |
+
page_content='1 � �𝐵𝑥 �𝐵𝑦 � = �� � �𝐴𝑥 �𝐴𝑦 � �� max �| �𝐴𝑥 |, | �𝐴𝑦|� � �𝐴𝑥 �𝐴𝑦 � if �𝐴𝑥 and �𝐴𝑦 ≠ 0 (3) where �𝐵 ∈ [−1, 1]2 is the bucket’s centered texture coordi- nate and �𝐴 ∈ [−1, 1]2 is the azimuthal projection vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
90 |
+
page_content=' Note.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
91 |
+
page_content=' It is important to prevent pixel blending between edges of neighboring buckets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
92 |
+
page_content=' This can be done by clamping bucket coordinates to �𝐵 ∈ [𝐵−1 res − 1, 1 − 𝐵−1 res]2 range.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
93 |
+
page_content=' Inverse transformation of bucked, centered coordinates �𝐵 ∈ R2 to azimuthal projection coordinates ˆ𝐴 ∈ R2 can be achieved with same, but inverted method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
94 |
+
page_content=' � �𝐴𝑥 �𝐴𝑦 � = max �| �𝐵𝑥 |, | �𝐵𝑦|� √︃ �𝐵2𝑥 + �𝐵2𝑦 � �𝐵𝑥 �𝐵𝑦 � (4a) \uf8ee\uf8ef\uf8ef\uf8ef\uf8ef\uf8f0 ˆ𝑅𝑥 ˆ𝑅𝑦 ˆ𝑅𝑧 \uf8f9\uf8fa\uf8fa\uf8fa\uf8fa\uf8fb = \uf8ee\uf8ef\uf8ef\uf8ef\uf8ef\uf8ef\uf8ef\uf8f0 − �𝐴𝑥 √︃ 2 − �𝐴2𝑥 − �𝐴2𝑦 − �𝐴𝑦 √︃ 2 − �𝐴2𝑥 − �𝐴2𝑦 1 − �𝐴2 𝑥 − �𝐴2 𝑦 \uf8f9\uf8fa\uf8fa\uf8fa\uf8fa\uf8fa\uf8fa\uf8fb (4b) where ˆ𝑅 ∈ R3 denotes equisolid reflection vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
95 |
+
page_content=' This vector is used to sample ray-traced data onto radiance field texture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
96 |
+
page_content=' It is a version of the vector mirrored along the normal, found in equation 2c on the preceding page.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
97 |
+
page_content=' 3 Results TBA 4 Conclusion I have theorized about possible implementation of radiance field texturing using modern hardware shading capabilities, and presented mathematical solution for executing such con- cept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
98 |
+
page_content=' Note.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
99 |
+
page_content=' More conclusion are to be added, after the update to the paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
100 |
+
page_content=' 5 Possible applications Radiance field texture sampling can replace shading pipeline or supplement it with enhanced effects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
101 |
+
page_content=' Some such effects include: Parallax interior mapping.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
102 |
+
page_content=' This effect is used to mimic interior of a room, as seen through a window, or it can simu- late a portal to another place.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
103 |
+
page_content=' Proxy meshes with parallax mapping.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
104 |
+
page_content=' Radiance tex- ture with alpha mask can simulate more complex or furry objects bound inside a proxy mesh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
105 |
+
page_content=' Similarly to neural radi- ance fields texturing primitives[Baatz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
106 |
+
page_content=' 2022].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
107 |
+
page_content=' 1See figure 2 for visual reference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
108 |
+
page_content=' (a) Picture of one cent American coin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
109 |
+
page_content=' (b) One cent coin mapped to a rectangle, using equation 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
110 |
+
page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
111 |
+
page_content=' A visual example of disc to square mapping using the formulation found in equation 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
112 |
+
page_content=' Reflections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
113 |
+
page_content=' Many light bounces can be combined into a single pixel of the radiance texture map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
114 |
+
page_content=' Dynamic objects can then sample such radiance field to obtain environment reflections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
115 |
+
page_content=' Also semi real-time ray-tracing can accumulate dynamically generated reflections into such texture map, to update and enhance environment one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
116 |
+
page_content=' Shadowing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
117 |
+
page_content=' 1-bit radiance field texture map can repre- sent shadowing of static objects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
118 |
+
page_content=' Here, incidence vector is replaced with light direction vector for shadow occlusion sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
119 |
+
page_content=' It can work with both parallel light sources and point lights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
120 |
+
page_content=' With more than one sample per bucket, area shadows are possible to produce.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
121 |
+
page_content=' Subsurface scattering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
122 |
+
page_content=' This computationally demand- ing effect can be encoded in a radiance texture map, which then replaces incidence vector, with the light direction vector in relation to the view position for sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
123 |
+
page_content=' References H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
124 |
+
page_content=' Baatz, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
125 |
+
page_content=' Granskog, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
126 |
+
page_content=' Papas, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
127 |
+
page_content=' Rousselle, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
128 |
+
page_content=' Novák.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
129 |
+
page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
130 |
+
page_content=' NeRF- Tex: Neural Reflectance Field Textures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
131 |
+
page_content=' Computer Graphics Forum 41, 6 (March 2022), 287–301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
132 |
+
page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
133 |
+
page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
134 |
+
page_content='1111/cgf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
135 |
+
page_content='14449 W L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
136 |
+
page_content='188R 0 2021 DW L1888809 2021Fober, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
137 |
+
page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
138 |
+
page_content=' Wikipedia contributors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
139 |
+
page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
140 |
+
page_content=' Fisheye lens: Mapping function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
141 |
+
page_content=' Wikipedia, The Free Encyclopedia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
142 |
+
page_content=' https://en.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
143 |
+
page_content='wikipedia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
144 |
+
page_content='org/w/index.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
145 |
+
page_content='php?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
146 |
+
page_content='title= Fisheye_lens&oldid=1124809304#Mapping_function [Online].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
147 |
+
page_content=' Alex Yu, Sara Fridovich-Keil, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
148 |
+
page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
149 |
+
page_content=' Plenoxels: Radiance Fields without Neural Networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
150 |
+
page_content=' arXiv (Dec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
151 |
+
page_content=' 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
152 |
+
page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
153 |
+
page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
154 |
+
page_content='48550/ARXIV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
155 |
+
page_content=' 2112.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
156 |
+
page_content='05131 Received January 2023' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfwP5n/content/2301.01719v1.pdf'}
|
B9FJT4oBgHgl3EQfACzo/content/tmp_files/2301.11418v1.pdf.txt
ADDED
@@ -0,0 +1,980 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Parkinson gait modelling from an anomaly deep
|
2 |
+
representation
|
3 |
+
Edgar Rangela, Fabio Martineza,∗
|
4 |
+
a Biomedical Imaging, Vision and Learning Laboratory (BIVL2ab), Universidad Industrial
|
5 |
+
de Santander, 680002, Bucaramanga, Colombia
|
6 |
+
Abstract
|
7 |
+
Parkinson’s Disease is associated with gait movement disorders, such as pos-
|
8 |
+
tural instability, stiffness, and tremors. Today, some approaches implemented
|
9 |
+
learning representations to quantify kinematic patterns during locomotion, sup-
|
10 |
+
porting clinical procedures such as diagnosis and treatment planning. These
|
11 |
+
approaches assumes a large amount of stratified and labeled data to optimize
|
12 |
+
discriminative representations. Nonetheless, these considerations may restrict
|
13 |
+
the operability of approaches in real scenarios during clinical practice.
|
14 |
+
This
|
15 |
+
work introduces a self-supervised generative representation, under the pretext
|
16 |
+
of video reconstruction and anomaly detection framework. This architecture
|
17 |
+
is trained following a one-class weakly supervised learning to avoid inter-class
|
18 |
+
variance and approach the multiple relationships that represent locomotion. For
|
19 |
+
validation 14 PD patients and 23 control subjects were recorded, and trained
|
20 |
+
with the control population only, achieving an AUC of 86.9%, homoscedasticity
|
21 |
+
level of 80% and shapeness level of 70% in the classification task considering its
|
22 |
+
generalization.
|
23 |
+
Keywords:
|
24 |
+
Anomaly detection, Deep Learning, Weakly Supervised, Parkinson
|
25 |
+
Disease
|
26 |
+
1. Introduction
|
27 |
+
Parkinson’s Disease (PD) is the second most common neurodegenerative dis-
|
28 |
+
order, affecting more than 6.2 million people worldwide [1, 2]. According to the
|
29 |
+
World Health Organization, this number will increase by more than 12 million by
|
30 |
+
2030 [3]. PD is characterized by the progressive loss of dopamine, a neurotrans-
|
31 |
+
mitter involved in the execution of voluntary movements. For this reason, the
|
32 |
+
main diagnostic support is based on the observation and analysis of progressive
|
33 |
+
motor disorders, such as tremor, rigidity, slowness of movement (bradykinesia),
|
34 |
+
∗Corresponding author
|
35 |
+
Email addresses: [email protected] (Edgar Rangel),
|
36 |
+
[email protected] (Fabio Martinez)
|
37 |
+
URL: https://bivl2ab.uis.edu.co/ (Fabio Martinez)
|
38 |
+
Preprint submitted to Pattern Recognition
|
39 |
+
January 30, 2023
|
40 |
+
arXiv:2301.11418v1 [cs.CV] 26 Jan 2023
|
41 |
+
|
42 |
+
postural instability, among many other related symptoms [4]. Despite of impor-
|
43 |
+
tant advances to determine the sources of the disease and multiple symptoms,
|
44 |
+
today, there is not a definitive and universal biomarker to characterize, diagnose,
|
45 |
+
and follow the patient progression of PD patients.
|
46 |
+
Particularly, the gait is a multi-factorial and complex locomotion process
|
47 |
+
that involves several subsystems. The associated kinematics patterns are typ-
|
48 |
+
ically recovered over standard marker-based setups, that coarsely approximate
|
49 |
+
complex motion behaviors, resulting in restrictive, intrusive and, altering natu-
|
50 |
+
ral postural gestures for PD description. Alternative, markerless video strate-
|
51 |
+
gies together with discriminative learning approximations have emerged as key
|
52 |
+
solutions to support the PD characterization and classification from other dis-
|
53 |
+
eases [5–9]. These methodologies have been successful in controlled studies but
|
54 |
+
strongly require a stratified, balanced, and well-labeled dataset to avoid over-
|
55 |
+
fitting. Besides, these approaches are biased to the physicians’ experience to
|
56 |
+
determine the disease and limiting the quantification to general scale indexes
|
57 |
+
[10]. Even worst, these approaches solve classification tasks but remains limited
|
58 |
+
on further explanation about data representation to define the generalization
|
59 |
+
capability w.r.t the new data.
|
60 |
+
This work introduces a deep generative and anomaly architecture to learn a
|
61 |
+
hidden descriptor to represent locomotion patterns. Following a weakly super-
|
62 |
+
vised methodology, a 3D net is self-trained under a gait video reconstruction pre-
|
63 |
+
text. Then, the resultant embedding representation encodes complex dynamic
|
64 |
+
gait relationships, captured from control population, that allows to discrimi-
|
65 |
+
nate parkinson patients. The main contributions of this work are summarized
|
66 |
+
as follows:
|
67 |
+
• A new digital biomarker coded as an embedding vector with the capability
|
68 |
+
to represent hidden kinematic relationships of Parkinson disease.
|
69 |
+
• A 3D Convolutional GAN net dedicated to learn spatio-temporal pat-
|
70 |
+
terns of gait video-sequences. This architecture integrates an auto-encoder
|
71 |
+
net to learn video patterns in reconstruction tasks and a complementary
|
72 |
+
decoder that discriminates between reconstructed and original video se-
|
73 |
+
quences.
|
74 |
+
• A statistical test framework to validate the capability of the approach in
|
75 |
+
terms of generalization, coverage of data and discrimination capability for
|
76 |
+
any class with different groups between them, i.e. evaluate the general-
|
77 |
+
ization of Parkinsonian patients, at different stages of the disease, with
|
78 |
+
respect to a control population.
|
79 |
+
2. Current Work
|
80 |
+
Deep discriminative learning is nowadays the standard methodology in much
|
81 |
+
of the computer vision challenges, demonstrating remarkable results in very dif-
|
82 |
+
ferent domains. For instance, the Parkinson characterization is achieved from
|
83 |
+
2
|
84 |
+
|
85 |
+
sensor-based and vision-based approaches, following a supervised scheme to cap-
|
86 |
+
ture main observed relationships and to generate a particular prediction about
|
87 |
+
the condition of the patients [5]. These approaches in general are dedicated
|
88 |
+
to classify and discriminate between a control population and patients with the
|
89 |
+
Parkinson condition. The sensor-based approaches capture kinematics from mo-
|
90 |
+
tion signals, approximating to PD classification, but in many of the cases results
|
91 |
+
marker-invasive, alter natural gestures, and only have recognition capabilities
|
92 |
+
in advanced stages of the disease [11]. Contrary, the vision-based approaches
|
93 |
+
exploit postural and dynamic features, from video recordings, but the represen-
|
94 |
+
tations underlies on supervised schemes that requires a large amount of labeled
|
95 |
+
data to learn the inter and intra variability among classes [6–9]. Also, these
|
96 |
+
learning methodologies require that training data have well-balanced conditions
|
97 |
+
among classes, i.e., to have the same proportion of sample observations for each
|
98 |
+
of the considered class [12].
|
99 |
+
Unsupervised, semi-supervised and weakly supervised approaches have emerged
|
100 |
+
as a key alternative to model biomedical problems, with significative variabil-
|
101 |
+
ity among observations but limited training samples.
|
102 |
+
However, to the best
|
103 |
+
of our knowledge, these learning methods have been poorly explored and ex-
|
104 |
+
ploited in Parkinson characterization, with some preliminary alternatives that
|
105 |
+
use principles of Minimum Distance Classifiers and K-means Clustering [5, 13–
|
106 |
+
17]. In such sense, the PD modelling from non-supervised perspective may be
|
107 |
+
addressed from reconstruction, prediction and generative tasks [18], that help
|
108 |
+
to determine sample distributions and determine future postural and kinematic
|
109 |
+
events. In fact, the PD pattern distribution results key to understand multi-
|
110 |
+
factorial nature of PD, being determinant to define variations such as laterality
|
111 |
+
affectation of disease, abnormality sources, but also to define patient prognosis,
|
112 |
+
emulating the development of a particular patient during the gait.
|
113 |
+
3. Proposed approach
|
114 |
+
This work introduces a digital PD biomarker that embedded gait motor pat-
|
115 |
+
terns, from anomaly video reconstruction task. Contrary to typical classification
|
116 |
+
modeling, we are dedicated to deal with one class learning, i.e., only to learn
|
117 |
+
control gait patterns, approaching the high variability on training samples, with-
|
118 |
+
out using explicit disease labels. Hence, we hypothesize that a digital biomarker
|
119 |
+
of the disease can be modeled as a mixture of distributions, composed of samples
|
120 |
+
that were labeled as outliers, from learned representation. In consequence, we
|
121 |
+
analyze the embedding, reconstruction, and discrimination space to later define
|
122 |
+
rules to separate Parkinson from control vectors, during test validation. The
|
123 |
+
general pipeline of the proposed approach is illustrated in Figure 1.
|
124 |
+
3.1. A volumetric autoencoder to recover gait embedding patterns
|
125 |
+
Here, we are interested on capture complex dynamic interactions during lo-
|
126 |
+
comotion, observed in videos as spatio-temporal textural interactions. From a
|
127 |
+
self-supervised strategy (video-reconstruction task), we implemented a 3D deep
|
128 |
+
3
|
129 |
+
|
130 |
+
Figure 1: Pipeline of the proposed model separated in volumetric auto-encoder to recover gait
|
131 |
+
patterns (a), Digital gait biomarker (b), Auxiliary task to discriminate reconstructions (c),
|
132 |
+
and statistical validation of learned classes distributions (d)
|
133 |
+
autoencoder that projects videos into low-dimensional vectors, learning the com-
|
134 |
+
plex gait dynamics into a latent space (see the architecture in Figure 1-a). For
|
135 |
+
doing so, 3D convolutional blocks were implemented, structured hierarchically,
|
136 |
+
with the main purpose to carry out a spatio-temporal reduction while increasing
|
137 |
+
feature descriptions. Formally, a gait sequence x ∈ Nf×h×w×c, where f denotes
|
138 |
+
the number of temporal frames, (h × w) are the spatial dimensions, and c is the
|
139 |
+
number of color channels in the video. This sequence is received as input in the
|
140 |
+
convolutional block which is convolved with a kernel κ of dimensions (kt, kh,
|
141 |
+
kw), where kt convolves on the temporal axis and kh, kw on the spatial axes.
|
142 |
+
At each level l of processing, we obtain a new volume xl ∈ Zf/2l×h/2l×w/2l×2lc
|
143 |
+
that represents a bank of spatio-temporal feature maps. Each of these volumet-
|
144 |
+
ric features are dedicated to stand out relevant gait patterns in a zG reduced
|
145 |
+
projection, that summarizes a multiscale gait motion representation.
|
146 |
+
The resultant embedding vector zG encodes principal dynamic non-linear
|
147 |
+
correlations, which are necessary to achieve a video reconstruction x′. In this
|
148 |
+
study, the validated datasets are recorded from a relative static background, so,
|
149 |
+
the major dependencies to achieve an effective reconstruction lies in temporal
|
150 |
+
and dynamic information expressed during the gait. Here, we adopt zG as a
|
151 |
+
digital gait biomarker that, among others, allows to study motion abnormalities
|
152 |
+
associated to the Parkinson disease.
|
153 |
+
To complete end-to-end learning, 3D transposed convolutional blocks were
|
154 |
+
implemented as decoder, positioned in a symmetrical configuration regarding the
|
155 |
+
encoder levels, and upsampling spatio-temporal dimensions to recover original
|
156 |
+
video-sequence. Formally, having the embedded feature vector zG ∈ Zn with
|
157 |
+
n coded features, we obtain x′l ∈ Z2lf×2lh×2lw×c/2l volumes from transpose
|
158 |
+
4
|
159 |
+
|
160 |
+
Generator
|
161 |
+
Conv 3D
|
162 |
+
Conv 3D
|
163 |
+
Conv 3D
|
164 |
+
ZG
|
165 |
+
Decoder
|
166 |
+
Encoder
|
167 |
+
2'G
|
168 |
+
Encoder
|
169 |
+
a
|
170 |
+
(a)
|
171 |
+
(b)
|
172 |
+
Discriminator
|
173 |
+
Statistical Validation
|
174 |
+
Xtest
|
175 |
+
control
|
176 |
+
-test
|
177 |
+
control
|
178 |
+
control?
|
179 |
+
Conv 3D
|
180 |
+
Encoder
|
181 |
+
ZD
|
182 |
+
Dense
|
183 |
+
Xtest
|
184 |
+
parkinson
|
185 |
+
(c)
|
186 |
+
(d)convolutional blocks until obtaining a video reconstruction x′ ∈ Nf×h×w×c. The
|
187 |
+
quality of reconstruction is key to guarantee the deep representation learning
|
188 |
+
in the autoencoder part of generator. To do this, an L1 loss is implemented
|
189 |
+
between x and x′ and its named contextual loss: Lcon = ∥x − x′∥1.
|
190 |
+
3.2. Auxiliary task to discriminate reconstructions
|
191 |
+
From a generative learning, the capability of the deep representations to code
|
192 |
+
locomotion patterns may be expressed in the quality of video reconstructions
|
193 |
+
x′. Hence, we hypothesize that embedding descriptors zG that properly repro-
|
194 |
+
duce videos x′ should encode sufficient kinematic information of trained class,
|
195 |
+
allowing to discriminate among locomotion populations, i.e. between control
|
196 |
+
and Parkinson samples.
|
197 |
+
To measure this reconstruction capability, an auxiliary task is here intro-
|
198 |
+
duced to receive tuples with original and reconstructed videos (x, x′), and out-
|
199 |
+
put a discriminatory decision y = {y, y′}, regarding video source.
|
200 |
+
In such
|
201 |
+
case, y corresponds to the label for real videos, while y′ as labels for embed-
|
202 |
+
dings from reconstructed sequences. For doing so, we implement an adversarial
|
203 |
+
L2 loss, expressed as: Ladv = ∥zD − z′
|
204 |
+
D∥2. In such case, for large differences
|
205 |
+
between (zD, z′
|
206 |
+
D) it will be a significant error that will be propagated to the
|
207 |
+
generator. It should be noted that such minimization rule optimizes only the
|
208 |
+
generator. Then discriminator is only minimized following a classical equally
|
209 |
+
weighted cross-entropy rule, as: Ldisc = log(y)+log(1−y′)
|
210 |
+
2
|
211 |
+
.
|
212 |
+
The auxiliary task to monitor video reconstruction is implemented from a
|
213 |
+
discriminatory convolutional net that follows the same structure that encoder
|
214 |
+
in Figure 1-a, which halves the spatio-temporal dimension while increases the
|
215 |
+
features and finally dense layer determines its realness level (see in Figure 1-
|
216 |
+
c.). Interestingly, from such deep convolutional representation the input videos
|
217 |
+
are projected to an embedding vector zD ∈ Zm with m coded features, which
|
218 |
+
thereafter may be used as latent vectors descriptors that also encode motion
|
219 |
+
and realness information. To guarantee an optimal coding into low-dimensional
|
220 |
+
embeddings, the reconstructed video x′ is mapped to an additional encoder
|
221 |
+
projecting representation basis in a z′G embedding. In such sense, zG and z′G
|
222 |
+
must be similar, and lead to x and x′ to be equal which helps in generalization
|
223 |
+
of the generator, following an encoder L2 loss: Lenc = ∥zG − z′
|
224 |
+
G∥2.
|
225 |
+
3.3. A Digital gait biomarker from anomaly embeddings
|
226 |
+
The video samples are high-dimensional motor observations that can be
|
227 |
+
projected into a low-dimensional embedding space, through the proposed model.
|
228 |
+
Formally, each video sample is an independent and random variable x(i)
|
229 |
+
ℓ
|
230 |
+
from the
|
231 |
+
class (i) that follows a distribution x(i)
|
232 |
+
ℓ
|
233 |
+
∈ Ψ(i)[µ(x(i)), σ(x(i))] with mean µ(x(i)),
|
234 |
+
and standard deviation σ(x(i)). We then considered the proposed model as an
|
235 |
+
operator that transform each sample F(x(i)
|
236 |
+
ℓ ) into a low dimensional space, while
|
237 |
+
preserves the original distribution, as: F(x(i)
|
238 |
+
ℓ ) ∈ Ψ(i)[F(µ(x(i))), F(σ(x(i)))].
|
239 |
+
From this assumption we can measure statistical properties over low-dimensional
|
240 |
+
space and explore properties as the generalization of the modeling.
|
241 |
+
5
|
242 |
+
|
243 |
+
Figure 2: Field of action of standard metrics of the model, where the dataset used only cover
|
244 |
+
the intersection area but the model performance for new samples is not being evaluated
|
245 |
+
Hence, we can adopt a new digital kinematic descriptor by considering em-
|
246 |
+
bedding vector differences between (zG, z′G). For instance, large difference be-
|
247 |
+
tween zG, z′G may suggest a new motion class, regarding the original distribu-
|
248 |
+
tion of training. From such approximation, we can model a scheme of one-class
|
249 |
+
learning (in this case, anomaly learning) over the video distributions from the
|
250 |
+
low-embedding differences observations. This scheme learns data distribution
|
251 |
+
without any label constraint. Furthermore, if we train the architecture only with
|
252 |
+
videos of a control population (c), we can define a discriminatory problem from
|
253 |
+
the reconstruction, by inducing: ∥zG − z′G∥2 ≤ τ → c ∧ ∥zG − z′G∥2 > τ → p,
|
254 |
+
where p is a label imposed to a video with a significant error reconstruction and
|
255 |
+
projected to a Parkinson population.
|
256 |
+
3.4. Statistical validation setup
|
257 |
+
This new discriminatory descriptor can be validated following standard met-
|
258 |
+
rics into binary projection ˆy = {c, p}. For a particular threshold τ we can re-
|
259 |
+
cover metrics such as the accuracy, precision and recall. Also, ROC-AUC (the
|
260 |
+
Area Under the Curve) can estimate a performance by iterating over different
|
261 |
+
τ values. However, these metrics say us about the capability of the proposed
|
262 |
+
approach to discriminate classes but not about data distribution among classes
|
263 |
+
[19, 20]. To robustly characterize a Parkinson digital biomarker is then demand-
|
264 |
+
ing to explore more robust statistical alternatives that evidence the generaliza-
|
265 |
+
tion of the embedded descriptor and estimate the performance for new samples
|
266 |
+
(Figure 2 illustrates typical limitations of standard classification metrics for un-
|
267 |
+
seen data being positioned on unknown places). In fact, we hypothesize that
|
268 |
+
Parkinson and control distributions, observed from an embedding representa-
|
269 |
+
tion, should remain with equal properties from training and test samples. To
|
270 |
+
address such assumption, in this work is explored two statistical properties to
|
271 |
+
validate the shape and variance of motor population distributions:
|
272 |
+
6
|
273 |
+
|
274 |
+
Ctest
|
275 |
+
Ctest
|
276 |
+
parkinson
|
277 |
+
Conv 3D
|
278 |
+
Encoder3.4.1. Variance analysis from Homoscedasticity
|
279 |
+
Here, a equality among variance of data distributions is estimated through
|
280 |
+
homoscedasticity operators. Particularly, this analysis is carried out for two
|
281 |
+
independent groups ⟨k⟩, ⟨u⟩ with cardinality |x(i)
|
282 |
+
⟨k⟩|, |x(j)
|
283 |
+
⟨u⟩| of classes (i), (j). Here,
|
284 |
+
it was considered two dispersion metrics regarding the Levene mean (∆⟨g⟩
|
285 |
+
ℓ
|
286 |
+
=
|
287 |
+
|x⟨g⟩
|
288 |
+
ℓ
|
289 |
+
− µ(x⟨g⟩)|), and the Brown-Forsythe median (∆⟨g⟩
|
290 |
+
ℓ
|
291 |
+
= |x⟨g⟩
|
292 |
+
ℓ
|
293 |
+
− med(x⟨g⟩)|).
|
294 |
+
From such dispersion distances, the test statistic W between x(i)
|
295 |
+
⟨k⟩ and x(j)
|
296 |
+
⟨u⟩ can
|
297 |
+
be defined as:
|
298 |
+
W = N − |P|
|
299 |
+
|P| − 1
|
300 |
+
�
|
301 |
+
g∈P [|x⟨g⟩|(µ(∆⟨g⟩) − µ(∆))2]
|
302 |
+
�
|
303 |
+
g∈P [�
|
304 |
+
ℓ∈x⟨g⟩ (∆⟨g⟩
|
305 |
+
ℓ
|
306 |
+
− µ(∆⟨g⟩))2]
|
307 |
+
(1)
|
308 |
+
where P = {x(i)
|
309 |
+
⟨k⟩, x(j)
|
310 |
+
⟨u⟩, · · · } is the union set of every data group from all
|
311 |
+
classes, |P| is the cardinality of P, N is the sum of all |x⟨g⟩| cardinalities, µ(∆⟨g⟩)
|
312 |
+
correspond to the mean ⟨g⟩ of ∆⟨g⟩
|
313 |
+
ℓ
|
314 |
+
values and µ(∆) is the overall mean of every
|
315 |
+
∆⟨g⟩
|
316 |
+
ℓ
|
317 |
+
value in P. This estimation evaluates if the samples between two different
|
318 |
+
groups are equally in variance for the same class, leading us to the first step in
|
319 |
+
model generalization for any new sample related to trained data. Additionally,
|
320 |
+
the homoscedasticity property is useful when is needed to check if two groups
|
321 |
+
remains in the same distribution range, because two distribution can have the
|
322 |
+
same shape (frequency) but be placed at different domain range, indicating a
|
323 |
+
weakness for the model in new data domains.
|
324 |
+
From a statistical test perspective, the value W rejects the null hypothesis
|
325 |
+
of homocedasticity when W > fα,|P|−1,N−|P| where fα,|P|−1,N−|P| is the upper
|
326 |
+
critical value of Fischer distribution with |P|−1 and N −|P| degrees of freedom
|
327 |
+
at a significance level of α (generally 5%). This metric allows to estimate the
|
328 |
+
clustering level for the model and determine if new data samples from another
|
329 |
+
domain are contained in data distributions of control or Parkinson patients.
|
330 |
+
Then, the homoscedasticity value of x(i)
|
331 |
+
⟨k⟩ against x(j)
|
332 |
+
⟨u⟩ is defined as follow:
|
333 |
+
H(x(i)
|
334 |
+
⟨k⟩, x(j)
|
335 |
+
⟨u⟩) =
|
336 |
+
�
|
337 |
+
�
|
338 |
+
�
|
339 |
+
�
|
340 |
+
�
|
341 |
+
�
|
342 |
+
�
|
343 |
+
�
|
344 |
+
�
|
345 |
+
�
|
346 |
+
�
|
347 |
+
�
|
348 |
+
�
|
349 |
+
�
|
350 |
+
�
|
351 |
+
W(µ(x(i)
|
352 |
+
⟨k⟩, x(j)
|
353 |
+
⟨u⟩)) + W(med(x(i)
|
354 |
+
⟨k⟩, x(j)
|
355 |
+
⟨u⟩))
|
356 |
+
2
|
357 |
+
i = j ∧ k ̸= u
|
358 |
+
0
|
359 |
+
i = j ∧ k = u
|
360 |
+
2 − (W(µ(x(i)
|
361 |
+
⟨k⟩, x(j)
|
362 |
+
⟨u⟩)) + W(med(x(i)
|
363 |
+
⟨k⟩, x(j)
|
364 |
+
⟨u⟩)))
|
365 |
+
2
|
366 |
+
i ̸= j
|
367 |
+
(2)
|
368 |
+
3.4.2. Shapeness analysis from ChiSquare
|
369 |
+
Here, we quantify the “shapenes” focused in having equally distributions.
|
370 |
+
Following the ChiSquare test χ2 between x(i)
|
371 |
+
⟨k⟩ and x(j)
|
372 |
+
⟨u⟩ as:
|
373 |
+
7
|
374 |
+
|
375 |
+
χ2 =
|
376 |
+
�
|
377 |
+
ℓ
|
378 |
+
(x⟨k⟩
|
379 |
+
ℓ
|
380 |
+
− x⟨u⟩
|
381 |
+
ℓ
|
382 |
+
)2
|
383 |
+
x⟨u⟩
|
384 |
+
ℓ
|
385 |
+
(3)
|
386 |
+
From this rule, it should be considered that both groups must have the
|
387 |
+
same cardinality (|x⟨k⟩| = |x⟨u⟩|) and the respective data sorting determines
|
388 |
+
the direction of comparison (i.e. the direction goes from group ⟨k⟩ to have the
|
389 |
+
same distribution of ⟨u⟩). To address these issues we make that the lower group
|
390 |
+
will be repeated in its elements without adding new unknown data to preserve
|
391 |
+
its mean and standard deviation, and secondly, we evaluate both directions to
|
392 |
+
quantify the similarity when χ2(x(i)
|
393 |
+
⟨k⟩ → x(j)
|
394 |
+
⟨u⟩) and χ2(x(j)
|
395 |
+
⟨u⟩ → x(i)
|
396 |
+
⟨k⟩).
|
397 |
+
The value χ2 reject the null hypothesis of equal distributions when χ2 >
|
398 |
+
χ2
|
399 |
+
α,|x⟨g⟩|−1 where χ2
|
400 |
+
α,|x⟨g⟩|−1 is the upper critical value of Chi Square distribution
|
401 |
+
with |x⟨g⟩| − 1 degrees of freedom at a significance level of α. We define the
|
402 |
+
shapeness value as:
|
403 |
+
Sh(x(i)
|
404 |
+
⟨k⟩, x(j)
|
405 |
+
⟨u⟩) =
|
406 |
+
�
|
407 |
+
�
|
408 |
+
�
|
409 |
+
�
|
410 |
+
�
|
411 |
+
�
|
412 |
+
�
|
413 |
+
�
|
414 |
+
�
|
415 |
+
�
|
416 |
+
�
|
417 |
+
�
|
418 |
+
�
|
419 |
+
�
|
420 |
+
�
|
421 |
+
χ2(x(i)
|
422 |
+
⟨k�� → x(j)
|
423 |
+
⟨u⟩) + χ2(x(j)
|
424 |
+
⟨u⟩ → x(i)
|
425 |
+
⟨k⟩)
|
426 |
+
2
|
427 |
+
i = j ∧ k ̸= u
|
428 |
+
0
|
429 |
+
i = j ∧ k = u
|
430 |
+
2 − (χ2(x(i)
|
431 |
+
⟨k⟩ → x(j)
|
432 |
+
⟨u⟩) + χ2(x(j)
|
433 |
+
⟨u⟩ → x(i)
|
434 |
+
⟨k⟩))
|
435 |
+
2
|
436 |
+
i ̸= j
|
437 |
+
(4)
|
438 |
+
This test can be used directly as indicator of how relatively far are the
|
439 |
+
samples from each other.
|
440 |
+
Hence, a higher value of this metric means that
|
441 |
+
the samples will be clearly different and separated, but there is the possibility
|
442 |
+
that control patients’ distribution is near to parkinson’s while parkinson can be
|
443 |
+
clearly far. Finally, in algorithm 1 is showed the steps to calculate the proposed
|
444 |
+
homoscedasticity and shapeness level for the model.
|
445 |
+
4. Experimental setup
|
446 |
+
4.1. Datasets
|
447 |
+
In this study were recruited 37 patients from control (23 subjects with av-
|
448 |
+
erage age of 64.7 ± 13 ) and parkinson (14 subjects with an average age of
|
449 |
+
72.8 ± 6.8) populations. The patients were invited to walk (without any mark-
|
450 |
+
ers protocol), developing a natural locomotion gesture. Parkinson participants
|
451 |
+
were evaluated by a physiotherapist (with more than five years of experience)
|
452 |
+
and stratified according to the H&Y scale (level 1.0 = 2, level 1.5 = 1, level
|
453 |
+
2.5 = 5, and level 3.0 = 6 participants). These patients written an informed
|
454 |
+
consent and the total dataset count with the approval of the Ethics Committee
|
455 |
+
of Universidad Industrial de Santander.
|
456 |
+
For recording, during a natural walking in around 3 meters, the locomotion
|
457 |
+
was registered 8 times from a sagittal view, following a semi-controlled condi-
|
458 |
+
tions (a green background). In this study we use a conventional optical camera
|
459 |
+
8
|
460 |
+
|
461 |
+
Algorithm 1 Calculation of homoscedasticity and shapeness metric for any
|
462 |
+
quantity of data groups with any classes
|
463 |
+
Require: C = {c0, c1, · · · , cn}
|
464 |
+
▷ Classes in dataset
|
465 |
+
Require: Gci =
|
466 |
+
�
|
467 |
+
x(i)
|
468 |
+
⟨0⟩, x(i)
|
469 |
+
⟨1⟩, · · · , x(i)
|
470 |
+
⟨mi⟩
|
471 |
+
�
|
472 |
+
∀ci ∈ C
|
473 |
+
▷ Partitions per classes
|
474 |
+
h ← 0
|
475 |
+
s ← 0
|
476 |
+
for any pair (ci, cj) in C do
|
477 |
+
for any pair (x(i)
|
478 |
+
⟨k⟩, x(j)
|
479 |
+
⟨u⟩) in �(Gci, Gcj) do
|
480 |
+
h ← h + H(x(i)
|
481 |
+
⟨k⟩, x(j)
|
482 |
+
⟨u⟩)
|
483 |
+
▷ H defined in eq. 2
|
484 |
+
s ← s + Sh(x(i)
|
485 |
+
⟨k⟩, x(j)
|
486 |
+
⟨u⟩)
|
487 |
+
▷ Sh defined in eq. 4
|
488 |
+
end for
|
489 |
+
end for
|
490 |
+
N ← �n
|
491 |
+
i |Gci|
|
492 |
+
d ←
|
493 |
+
�N
|
494 |
+
2
|
495 |
+
�
|
496 |
+
▷ Combinatory of N in groups of 2
|
497 |
+
h ← h
|
498 |
+
d
|
499 |
+
▷ Homocedasticity level metric
|
500 |
+
s ← s
|
501 |
+
d
|
502 |
+
▷ Shapeness level metric
|
503 |
+
Nikon D3500, that output sequences at 60 fps with a spatial resolution of 1080p.
|
504 |
+
The camera was localized to cover the whole participant silhouette. Every se-
|
505 |
+
quence was spatially resized to 64×64 pixels, and temporally cropped to 64
|
506 |
+
frames. Besides, the videos were normalized and a subsequent subsampling was
|
507 |
+
carried out to ensure a complete gait cycle. To follow one learning class, the
|
508 |
+
proposed approach was trained only with control subjects. In such case, the set
|
509 |
+
of control patients was split in common train, validation and test partitions of
|
510 |
+
11, 3 and 9 randomly patients selected, respectively. For parkinson participants,
|
511 |
+
we take for validation and test partitions of 3 and 11 patients randomly selected
|
512 |
+
to complement validation and test control sets. Hence, we balanced data for
|
513 |
+
standard and statistical validation purposes.
|
514 |
+
4.1.1. External dataset validation
|
515 |
+
A main interest in this work is to measure the capability to generalize motion
|
516 |
+
patterns from anomaly deep representations. Also, we are interested in mea-
|
517 |
+
suring the capability of embedding descriptors to discriminate PD from other
|
518 |
+
classes, even for videos captured with external protocols. Hence, in this work
|
519 |
+
we only evaluate the proposed approach with a public dataset of walking videos
|
520 |
+
that include knee-osteoarthritis (50 subjects with an average age of 56.7 ± 12.7),
|
521 |
+
parkinson (16 subjects with an average age of 68.6 ± 8.3) and control (30 sub-
|
522 |
+
jects with an average age of 43.7 ± 9.3) patients [21]. The 96 participants were
|
523 |
+
recorded with a static green background, blurred faces and markers on their
|
524 |
+
bodies. Following the same methodology for owner data, each sequence was
|
525 |
+
spatially resized to 64×64 pixels, and temporally cropped to 64 frames, and
|
526 |
+
finally normalized and subsampled ensuring a complete gait cycle.
|
527 |
+
9
|
528 |
+
|
529 |
+
4.2. Model configuration
|
530 |
+
The introduced strategy has in the generator an autoencoder and encoder
|
531 |
+
net, while the discriminator has an encoder net. The encoders use three layers
|
532 |
+
that include 3D (4×4×4 and stride 2×2×2) convolutions, BatchNormalization
|
533 |
+
(momentum of 0.1 and epsilon of 1 × 10−5) and LeakyRelu (α = 0.2).
|
534 |
+
At
|
535 |
+
each progressive level, the input is reduced to half in spatial and temporal
|
536 |
+
dimensions while the features are increased twice. The decoder network follows
|
537 |
+
a symmetrical configuration against the encoder with same layers as encoder
|
538 |
+
(replacing 3D convolutions by 3D transpose convolutions). The overall structure
|
539 |
+
is summarized in table 1.
|
540 |
+
Table 1: Generator and Discriminator Networks structure summary
|
541 |
+
Module
|
542 |
+
Network
|
543 |
+
Levels
|
544 |
+
Input
|
545 |
+
Output
|
546 |
+
Generator
|
547 |
+
Encoder
|
548 |
+
5
|
549 |
+
64×64×64×1
|
550 |
+
1×1×1×n
|
551 |
+
Decoder
|
552 |
+
5
|
553 |
+
1×1×1×n
|
554 |
+
64×64×64×1
|
555 |
+
Discriminator
|
556 |
+
Encoder
|
557 |
+
5
|
558 |
+
64×64×64×1
|
559 |
+
1×1×1×1
|
560 |
+
5. Evaluations and Results
|
561 |
+
The proposed strategy was exhaustively validated with respect to the ca-
|
562 |
+
pability to recognize parkinsonian inputs as abnormal class patterns in archi-
|
563 |
+
tectures trained only with control patterns and under challenging unbalanced
|
564 |
+
and scarce scenarios. Hence, in the first experiment, the proposed strategy was
|
565 |
+
trained only with control samples from owner dataset, following a video recon-
|
566 |
+
struction pretext task. Hence, encoder (∥zG − z′
|
567 |
+
G∥2), contextual (∥x − x′∥1)
|
568 |
+
and adversarial (∥zD − z′
|
569 |
+
D∥2) embedding errors were recovered as locomotor
|
570 |
+
descriptors of the observed sequences. For classification purposes, these errors
|
571 |
+
were binarized by imposing a threshold value, as: τzG = 1.768 for encoder,
|
572 |
+
τx = 0.147 for contextual, and τzD = 0.429 for adversarial errors. Table 2 sum-
|
573 |
+
marizes the achieved performance of three locomotor descriptors according to
|
574 |
+
standard classification metrics. In general, the proposed strategy reports a re-
|
575 |
+
markable capability to label parkinson patterns as abnormal samples, which are
|
576 |
+
excluded from trained representation. Interestingly, the contextual errors have
|
577 |
+
the highest value among the others to classify between control and parkinson
|
578 |
+
patients, reporting a remarkable 86.9% in AUC, with mistakes in only 64 video
|
579 |
+
clips (approximately 3 patients).
|
580 |
+
For robustness validation, we are also interested in the distribution out-
|
581 |
+
put of predictions, which may suggest the capability of generalization of the
|
582 |
+
model. For doing so, we also validate locomotion descriptors with respect to
|
583 |
+
10
|
584 |
+
|
585 |
+
Table 2: Model performance for encoder, contextual and adversarial losses using standard
|
586 |
+
metrics when the model trains with control patients. Acc, Pre, Rec, Spe, F1 are for accuracy,
|
587 |
+
precision, recall, specificity and f1 score respectively.
|
588 |
+
Loss
|
589 |
+
Acc
|
590 |
+
Pre
|
591 |
+
Rec
|
592 |
+
Spe
|
593 |
+
F1
|
594 |
+
ROC-AUC
|
595 |
+
Encoder
|
596 |
+
53.8%
|
597 |
+
89.5%
|
598 |
+
20.4%
|
599 |
+
96.9%
|
600 |
+
33.2%
|
601 |
+
58.7%
|
602 |
+
Contextual
|
603 |
+
85.7%
|
604 |
+
96.6%
|
605 |
+
77.4%
|
606 |
+
96.4%
|
607 |
+
85.7%
|
608 |
+
86.9%
|
609 |
+
Adversarial
|
610 |
+
75.5%
|
611 |
+
94.3%
|
612 |
+
60%
|
613 |
+
95.4%
|
614 |
+
73.3%
|
615 |
+
77.7%
|
616 |
+
introduced homoscedasticity and shapeness validation. Table 3 summarizes the
|
617 |
+
results achieved by each locomotion embedding descriptor, contrasting with the
|
618 |
+
reported results from standard metrics. In such case, the validated metrics sug-
|
619 |
+
gest that contextual errors may be overfitted for the trained dataset and the
|
620 |
+
recording conditions, which may be restrictive for generalized architecture in
|
621 |
+
other datasets. Contrary, the encoder descriptor shows evident statistical ro-
|
622 |
+
bustness from variance and shapeness distributions. Furthermore, the encoder
|
623 |
+
losses evidence a clearly separation between the control and parkinson distribu-
|
624 |
+
tion in Figure 3, where even the proposed model can separate stages of Hoehn
|
625 |
+
& Yahr with the difference between 2.5 and 3.0 levels where the ChiSquare test
|
626 |
+
shows us that both distributions remains equals meaning that both stages are
|
627 |
+
difficult to model.
|
628 |
+
Table 3: Model performance for encoder, contextual and adversarial losses using the proposed
|
629 |
+
statistical metrics when the model trains with control patients.
|
630 |
+
Loss
|
631 |
+
Homocedasticity
|
632 |
+
Shapeness
|
633 |
+
Encoder
|
634 |
+
80%
|
635 |
+
70%
|
636 |
+
Contextual
|
637 |
+
50%
|
638 |
+
40%
|
639 |
+
Adversarial
|
640 |
+
50%
|
641 |
+
45%
|
642 |
+
To follow with one of the main interests in this work i.e, the generaliza-
|
643 |
+
tion capability, the proposed strategy was validated with an external public
|
644 |
+
dataset (without any extra training) that include parkinson (16 patients), knee-
|
645 |
+
osteoarthritis (50 patients) and control patients (30 patients) [21]. Table 4 sum-
|
646 |
+
marized the achieved results to discriminate among the three unseen classes,
|
647 |
+
evidencing a notable performance following encoder embedding representation.
|
648 |
+
It should be noted, that Encoder achieves the highest ROC-AUC, reporting an
|
649 |
+
average of 75%, being the more robust representation, as suggested by statistical
|
650 |
+
11
|
651 |
+
|
652 |
+
Figure 3: Data distribution given by the proposed model for control and parkinson samples
|
653 |
+
by Hoehn & Yahr levels.
|
654 |
+
homoscedasticity and shapeness validation. The contextual and the adversarial
|
655 |
+
losses have better accuracy, precision and recall, but the specificity suggests
|
656 |
+
that there is not any evidence of correctly classifying control subjects. In such
|
657 |
+
sense, the model label all samples as abnormal from trained representation.
|
658 |
+
In contrast, the encoder element in the network (Figure 1-a) capture relevant
|
659 |
+
gait patterns to distinguish between control, parkinson and knee-osteoarthritis
|
660 |
+
patients.
|
661 |
+
Table 4: Model performance for encoder, contextual and adversarial losses using the proposed
|
662 |
+
model without retraining and same thresholds as Table 2. Acc, Pre, Rec, Spe, F1 are for
|
663 |
+
accuracy, precision, recall, specificity and f1 score respectively.
|
664 |
+
Loss
|
665 |
+
Acc
|
666 |
+
Pre
|
667 |
+
Rec
|
668 |
+
Spe
|
669 |
+
F1
|
670 |
+
ROC-AUC
|
671 |
+
Encoder
|
672 |
+
62.6%
|
673 |
+
97.9%
|
674 |
+
58.1%
|
675 |
+
91.9%
|
676 |
+
72.9%
|
677 |
+
75%
|
678 |
+
Contextual
|
679 |
+
86.7%
|
680 |
+
86.7%
|
681 |
+
100%
|
682 |
+
0%
|
683 |
+
92.9%
|
684 |
+
50%
|
685 |
+
Adversarial
|
686 |
+
87.8%
|
687 |
+
89.4%
|
688 |
+
97.4%
|
689 |
+
24.9%
|
690 |
+
93.3%
|
691 |
+
61.2%
|
692 |
+
Along the same line, the external dataset was also validated with respect
|
693 |
+
to homoscedasticity and shapeness metrics. Table 5 summarizes the achieved
|
694 |
+
results from the distribution representation of output probabilities. As expected,
|
695 |
+
the results enforce the fact that embeddings from the Encoder have much better
|
696 |
+
generalization against the other losses, allowing to discriminate among three
|
697 |
+
different unseen classes. Remarkably, the results suggest that control subjects
|
698 |
+
of the external dataset belong to the trained control set. This fact is relevant
|
699 |
+
because indicates that architecture is principally dedicated to coded locomotor
|
700 |
+
patterns without strict restrictions about captured conditions. To complement
|
701 |
+
such results, output probabilities from three classes are summarized in violin
|
702 |
+
plots, as illustrated in Figure 4 which shows the separation between the classes
|
703 |
+
of parkinson and knee-osteoarthritis, also, between levels of the diseases, being
|
704 |
+
remarkable the locomotor affectations produced by the patients diagnosed with
|
705 |
+
knee-Osteoarthritis.
|
706 |
+
12
|
707 |
+
|
708 |
+
25
|
709 |
+
20
|
710 |
+
p< 0.05
|
711 |
+
p< 0.05
|
712 |
+
15
|
713 |
+
Encoder Errors
|
714 |
+
p<0.05
|
715 |
+
p< 0.05
|
716 |
+
10
|
717 |
+
p<0.05
|
718 |
+
p> 0.05
|
719 |
+
Y
|
720 |
+
5
|
721 |
+
0
|
722 |
+
0
|
723 |
+
-5
|
724 |
+
-10
|
725 |
+
Control
|
726 |
+
Stage 1.0
|
727 |
+
Stage 1.5
|
728 |
+
Stage 2.5
|
729 |
+
Stage 3.0Table 5: Model performance for encoder, contextual and adversarial losses using the proposed
|
730 |
+
statistical metrics and model as Table 2.
|
731 |
+
Loss
|
732 |
+
Homocedasticity
|
733 |
+
Shapeness
|
734 |
+
Encoder
|
735 |
+
66.7%
|
736 |
+
66.7%
|
737 |
+
Contextual
|
738 |
+
83.4%
|
739 |
+
0%
|
740 |
+
Adversarial
|
741 |
+
16.7%
|
742 |
+
16.7%
|
743 |
+
Figure 4: Data distribution given by the proposed model for control, parkinson (PD) and
|
744 |
+
knee-osteoarthritis (KOA) samples by levels where EL is early, MD medium and SV severe.
|
745 |
+
Alternatively, in an additional experiment we train using only patients di-
|
746 |
+
agnosed with parkinson to force the architecture to extract these abnormal
|
747 |
+
locomotion patterns. In such cases, the videos from control subjects are associ-
|
748 |
+
ated with abnormal responses from trained architecture. Table 6 summarizes the
|
749 |
+
achieved results from standard and statistical distribution metrics. As expected,
|
750 |
+
from this configuration of the architecture is achieved a lower classification per-
|
751 |
+
formance because the high variability and complexity to code the disease. In
|
752 |
+
fact, parkinson patients may manifest totally different locomotion affectations
|
753 |
+
at the same stage. For such reason, the architecture has major challenges to
|
754 |
+
discriminate control subjects and therefore lower agreement with ground truth
|
755 |
+
labels. The statistical homoscedasticity and shapeness metrics confirm such is-
|
756 |
+
sue achieving scores lower than 50% and indicating that the model, from such
|
757 |
+
configuration, is not generalizable. In this configuration, it would be demanding
|
758 |
+
a larger amount of parkinson patients to deal with disease variability.
|
759 |
+
6. Discussion
|
760 |
+
This work presented a deep generative scheme, designed under the one-class-
|
761 |
+
learning methodology to model gait locomotion patterns in markerless video
|
762 |
+
sequences. The proposed architecture is trained under the reconstruction video
|
763 |
+
pretext task, being categorical to capture kinematic behaviors without the asso-
|
764 |
+
13
|
765 |
+
|
766 |
+
15.0
|
767 |
+
p< 0.05
|
768 |
+
p> 0.05
|
769 |
+
T
|
770 |
+
12.5
|
771 |
+
p<0.05
|
772 |
+
p<0.05
|
773 |
+
p< 0.05
|
774 |
+
p<0.05
|
775 |
+
11
|
776 |
+
10.0
|
777 |
+
Encoder Errors
|
778 |
+
7.5
|
779 |
+
5.0
|
780 |
+
2.5
|
781 |
+
0.0
|
782 |
+
-2.5
|
783 |
+
-5.0
|
784 |
+
Control
|
785 |
+
EL PD
|
786 |
+
MD PD
|
787 |
+
SV PD
|
788 |
+
EL KOA
|
789 |
+
MD KOA
|
790 |
+
SV KOATable 6: Model performance for encoder, contextual and adversarial losses using standard
|
791 |
+
metrics when the model trains with parkinson patients. Acc, Pre, Rec, Spe, Homo and Shape
|
792 |
+
are for accuracy, precision, recall, specificity, homocedasticity and shapeness respectively.
|
793 |
+
Loss
|
794 |
+
Acc
|
795 |
+
Pre
|
796 |
+
Rec
|
797 |
+
Spe
|
798 |
+
Homo
|
799 |
+
Shape
|
800 |
+
ROC-AUC
|
801 |
+
Encoder
|
802 |
+
62.5%
|
803 |
+
55.2%
|
804 |
+
88.9%
|
805 |
+
40.9%
|
806 |
+
45%
|
807 |
+
50%
|
808 |
+
64.9%
|
809 |
+
Contextual
|
810 |
+
71.5%
|
811 |
+
93.5%
|
812 |
+
73.7%
|
813 |
+
50%
|
814 |
+
50%
|
815 |
+
40%
|
816 |
+
61.9%
|
817 |
+
Adversarial
|
818 |
+
68.8%
|
819 |
+
64.1%
|
820 |
+
69.4%
|
821 |
+
68.2%
|
822 |
+
45%
|
823 |
+
40%
|
824 |
+
68.8%
|
825 |
+
ciation of expert diagnosis criteria. From an exhaustive experimental setup, the
|
826 |
+
proposed approach was trained with videos recorded from a control population,
|
827 |
+
while then parkinsonian patterns were associated with anomaly patterns from
|
828 |
+
the design of a discrimination metric that operates from embedding represen-
|
829 |
+
tations. From an owner dataset, the proposed approach achieves an ROC-AUC
|
830 |
+
of 86.9%, while for an external dataset without unseen training videos, the
|
831 |
+
proposed approach achieved an average ROC-AUC of 75%.
|
832 |
+
One of the main issues addressed in this work was to make efforts to train
|
833 |
+
generative architecture with a sufficient generalization capability to capture
|
834 |
+
kinematic patterns without a bias associated to the capture setups. To carefully
|
835 |
+
select such architectures, this study introduced homoscedasticity and shapeness
|
836 |
+
as complementary statistical rules to validate the models. From these metrics
|
837 |
+
was evidenced that encoder embeddings brings major capabilities to general-
|
838 |
+
ize models, against the contextual and adversarial losses, achieving in average
|
839 |
+
an 80% and 70% for homoscedasticity and shapeness, respectively. Once these
|
840 |
+
metrics defined the best architecture and embedding representation, we confirm
|
841 |
+
the selection by using the external dataset with different capture conditions and
|
842 |
+
even with the study of a new disease class into the population i.e., the Knee-
|
843 |
+
osteoarthritis. Remarkably, the proposed approach generates embeddings with
|
844 |
+
sufficient capabilities to discriminate among different unseen populations.
|
845 |
+
In the literature have been declared different efforts to develop computational
|
846 |
+
strategies to discriminate parkinson from control patterns, following markerless
|
847 |
+
and sensor-based observations [6–9, 22]. For instance, volumetric architectures
|
848 |
+
have been adjusted from discriminatory rules taking minimization rules associ-
|
849 |
+
ated with expert diagnosis annotations [6, 8]. These approaches have reported
|
850 |
+
remarkable results (average an 95% ROC-AUC with 22 patients). Also, Sun
|
851 |
+
et. al. proposed an architecture that takes frontal gait views and together with
|
852 |
+
volumetric convolution layers, discriminates the level of freeze in the gait for
|
853 |
+
parkinson patients with an accuracy of 79.3%. Likewise, Kour et. al. [22] de-
|
854 |
+
velops a sensor-based approach to correlate postural relationships with several
|
855 |
+
annotated disease groups (reports an accuracy = 92.4%, precision = 90.0% with
|
856 |
+
14
|
857 |
+
|
858 |
+
50 knee-ostheoarthritis, 16 parkinson and 30 control patients).
|
859 |
+
Nonetheless,
|
860 |
+
such schemes are restricted to a specific recording scenario and pose observa-
|
861 |
+
tional configurations. Besides, the minimization of these representations may be
|
862 |
+
biased by label annotations associated with expert diagnostics. Contrary, the
|
863 |
+
proposed approach adjusts the representation using only control video sequences
|
864 |
+
without any expert label intervention during the architecture tunning. In such
|
865 |
+
case, the architecture has major flexibility to code potential hidden relation-
|
866 |
+
ships associated with locomotor patterns. In fact, the proposed approach was
|
867 |
+
validated with raw video sequences, reported in [22], surpassing precision scores
|
868 |
+
without any additional training to observe such videos. Moreover, the proposed
|
869 |
+
approach uses video sequences instead of representation from key points, that
|
870 |
+
coarsely minimize dynamic complexity during locomotion.
|
871 |
+
Recovered generalization metrics scores (homocedasticity = 80%, shapeness
|
872 |
+
= 70% ) suggest that some patients have different statistical distributions, an
|
873 |
+
expected result from variability in control population, as well as, the variability
|
874 |
+
associated to disease parkinson phenotyping. In such sense, it is demanding
|
875 |
+
a large set of training data to capture additional locomotion components, to-
|
876 |
+
gether with a sufficient variability spectrum. Nonetheless, the re-training of the
|
877 |
+
architecture should be supervised from output population distributions to avoid
|
878 |
+
overfitting regarding specific training scenarios. The output reconstruction may
|
879 |
+
also be extended as anomaly maps to evidence in the spatial domain the regions
|
880 |
+
with anomalies, which further may represent some association with the disease
|
881 |
+
to help experts in the correct identification of patient prediction.
|
882 |
+
7. Conclusions
|
883 |
+
This work presented a deep generative architecture with the capability of dis-
|
884 |
+
covering anomaly locomotion patterns, convolving entire video sequences into a
|
885 |
+
3D scheme. Interestingly, a parkinson disease population was projected to the
|
886 |
+
architecture, returning not only outlier rejection but coding a new locomotion
|
887 |
+
distribution with separable patterns with respect to the trained control popu-
|
888 |
+
lation. These results evidenced a potential use of this learning and architecture
|
889 |
+
scheme to recover potential digital biomarkers, coded into embedding represen-
|
890 |
+
tations. The proposed approach was validated with standard classification rules
|
891 |
+
but also with statistical measures to validate the capability of generalization.
|
892 |
+
Future works include the validation of proposals among different stages and
|
893 |
+
the use of federated scenarios with different experimental capture setups to test
|
894 |
+
performance on real scenarios.
|
895 |
+
8. Acknowledgements
|
896 |
+
The authors thank Ministry of science, technology and innovation of Colom-
|
897 |
+
bia (MINCIENCIAS) for supporting this research work by the project “Mecan-
|
898 |
+
ismos computacionales de aprendizaje profundo para soportar tareas de local-
|
899 |
+
izaci´on, segmentaci´on y pron´ostico de lesiones asociadas con accidentes cere-
|
900 |
+
brovasculares isqu´emicos.”, with code 91934.
|
901 |
+
15
|
902 |
+
|
903 |
+
References
|
904 |
+
[1] T. Vos, A. A. Abajobir, K. H. Abate, C. Abbafati, K. M. Abbas, F. Abd-
|
905 |
+
Allah, R. S. Abdulkader, A. M. Abdulle, T. A. Abebo, S. F. Abera, et al.,
|
906 |
+
Global, regional, and national incidence, prevalence, and years lived with
|
907 |
+
disability for 328 diseases and injuries for 195 countries, 1990–2016: a sys-
|
908 |
+
tematic analysis for the global burden of disease study 2016, The Lancet
|
909 |
+
390 (10100) (2017) 1211–1259.
|
910 |
+
[2] E. R. Dorsey, B. R. Bloem, The parkinson pandemic—a call to action,
|
911 |
+
JAMA neurology 75 (1) (2018) 9–10.
|
912 |
+
[3] W. H. Organization, Neurological disorders:
|
913 |
+
public health challenges,
|
914 |
+
World Health Organization, 2006.
|
915 |
+
[4] R. Balestrino, A. Schapira, Parkinson disease, European journal of neurol-
|
916 |
+
ogy 27 (1) (2020) 27–42.
|
917 |
+
[5] N. Kour, S. Arora, et al., Computer-vision based diagnosis of parkinson’s
|
918 |
+
disease via gait: a survey, IEEE Access 7 (2019) 156620–156645.
|
919 |
+
[6] L. C. Guayac´an, E. Rangel, F. Mart´ınez, Towards understanding spatio-
|
920 |
+
temporal parkinsonian patterns from salient regions of a 3d convolutional
|
921 |
+
network, in: 2020 42nd Annual International Conference of the IEEE En-
|
922 |
+
gineering in Medicine & Biology Society (EMBC), IEEE, 2020, pp. 3688–
|
923 |
+
3691.
|
924 |
+
[7] R. Sun, Z. Wang, K. E. Martens, S. Lewis, Convolutional 3d attention
|
925 |
+
network for video based freezing of gait recognition, in: 2018 Digital Image
|
926 |
+
Computing: Techniques and Applications (DICTA), IEEE, 2018, pp. 1–7.
|
927 |
+
[8] L. C. Guayac´an, F. Mart´ınez, Visualising and quantifying relevant parkin-
|
928 |
+
sonian gait patterns using 3d convolutional network, Journal of biomedical
|
929 |
+
informatics 123 (2021) 103935.
|
930 |
+
[9] M. H. Li, T. A. Mestre, S. H. Fox, B. Taati, Vision-based assessment of
|
931 |
+
parkinsonism and levodopa-induced dyskinesia with pose estimation, Jour-
|
932 |
+
nal of neuroengineering and rehabilitation 15 (1) (2018) 1–13.
|
933 |
+
[10] G. Litjens, T. Kooi, B. E. Bejnordi, A. A. A. Setio, F. Ciompi, M. Ghafoo-
|
934 |
+
rian, J. A. Van Der Laak, B. Van Ginneken, C. I. S´anchez, A survey on
|
935 |
+
deep learning in medical image analysis, Medical image analysis 42 (2017)
|
936 |
+
60–88.
|
937 |
+
[11] K. Sugandhi, F. F. Wahid, G. Raju, Feature extraction methods for hu-
|
938 |
+
man gait recognition–a survey, in: International Conference on Advances
|
939 |
+
in Computing and Data Sciences, Springer, 2016, pp. 377–385.
|
940 |
+
[12] R. Chalapathy, S. Chawla, Deep learning for anomaly detection: A survey,
|
941 |
+
arXiv preprint arXiv:1901.03407 (2019).
|
942 |
+
16
|
943 |
+
|
944 |
+
[13] L. Schmarje, M. Santarossa, S.-M. Schr¨oder, R. Koch, A survey on semi-
|
945 |
+
, self-and unsupervised learning for image classification, IEEE Access 9
|
946 |
+
(2021) 82146–82168.
|
947 |
+
[14] C.-W. Cho, W.-H. Chao, S.-H. Lin, Y.-Y. Chen, A vision-based analysis
|
948 |
+
system for gait recognition in patients with parkinson’s disease, Expert
|
949 |
+
Systems with applications 36 (3) (2009) 7033–7039.
|
950 |
+
[15] S.-W. Chen, S.-H. Lin, L.-D. Liao, H.-Y. Lai, Y.-C. Pei, T.-S. Kuo, C.-T.
|
951 |
+
Lin, J.-Y. Chang, Y.-Y. Chen, Y.-C. Lo, et al., Quantification and recogni-
|
952 |
+
tion of parkinsonian gait from monocular video imaging using kernel-based
|
953 |
+
principal component analysis, Biomedical engineering online 10 (1) (2011)
|
954 |
+
1–21.
|
955 |
+
[16] S. N˜omm, A. Toomela, M. Vaske, D. Uvarov, P. Taba, An alternative
|
956 |
+
approach to distinguish movements of parkinson disease patients, IFAC-
|
957 |
+
PapersOnLine 49 (19) (2016) 272–276.
|
958 |
+
[17] S. Soltaninejad, A. Rosales-Castellanos, F. Ba, M. A. Ibarra-Manzano,
|
959 |
+
I. Cheng, Body movement monitoring for parkinson’s disease patients us-
|
960 |
+
ing a smart sensor based non-invasive technique, in: 2018 IEEE 20th In-
|
961 |
+
ternational Conference on e-Health Networking, Applications and Services
|
962 |
+
(Healthcom), IEEE, 2018, pp. 1–6.
|
963 |
+
[18] B. R. Kiran, D. M. Thomas, R. Parakkal, An overview of deep learning
|
964 |
+
based methods for unsupervised and semi-supervised anomaly detection in
|
965 |
+
videos, Journal of Imaging 4 (2) (2018) 36.
|
966 |
+
[19] J. Demˇsar, Statistical comparisons of classifiers over multiple data sets,
|
967 |
+
The Journal of Machine Learning Research 7 (2006) 1–30.
|
968 |
+
[20] J. Luengo, S. Garc´ıa, F. Herrera, A study on the use of statistical tests for
|
969 |
+
experimentation with neural networks: Analysis of parametric test condi-
|
970 |
+
tions and non-parametric tests, Expert Systems with Applications 36 (4)
|
971 |
+
(2009) 7798–7808.
|
972 |
+
[21] N. Kour, S. Arora, et al., A vision-based gait dataset for knee osteoarthritis
|
973 |
+
and parkinson’s disease analysis with severity levels, in: International Con-
|
974 |
+
ference on Innovative Computing and Communications, Springer, 2022, pp.
|
975 |
+
303–317.
|
976 |
+
[22] N. Kour, S. Gupta, S. Arora, A vision-based clinical analysis for classifica-
|
977 |
+
tion of knee osteoarthritis, parkinson’s disease and normal gait with severity
|
978 |
+
based on k-nearest neighbour, Expert Systems 39 (6) (2022) e12955.
|
979 |
+
17
|
980 |
+
|
B9FJT4oBgHgl3EQfACzo/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
BNE3T4oBgHgl3EQfTwqq/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ca0a33e4c68b080db9829e8e1c256e34898a4cd9904df9057f3a86cea39e86e9
|
3 |
+
size 166251
|
BNE5T4oBgHgl3EQfTA98/content/2301.05533v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1a17aa5c64af98e8d6f7e17c3b6b2da9364a35b25874e637cdcb131b1915c791
|
3 |
+
size 941568
|
BNE5T4oBgHgl3EQfTA98/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4cf5f4a17a6ca882e032c6fbab19853a8454e63850b5ce7a8a8a1c32eb38d5b5
|
3 |
+
size 852013
|
BNE5T4oBgHgl3EQfTA98/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cd199afda7527e34448dce5487ac0c80ee7a5dea51a3f903e2f54e7970c06a23
|
3 |
+
size 41563
|
C9E1T4oBgHgl3EQfWATV/content/2301.03110v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7d17a499530c6190a1871f9eb504a8911739a804d0385a2aac125f0441f83ed0
|
3 |
+
size 1377155
|
C9E1T4oBgHgl3EQfWATV/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8a5c488c72bab830d93fac033e963e507323f4155df21120e66027bc39069986
|
3 |
+
size 255239
|
CdFQT4oBgHgl3EQfOTbk/content/2301.13275v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4dde949a07417e038ce8268ecbe26a4319ce68064cddfc82d06003fc715f0d81
|
3 |
+
size 404268
|
CdFQT4oBgHgl3EQfOTbk/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dfc4c560f82c3e7003ddecbef0bd8547d4585683f467db931523639dca8da8ab
|
3 |
+
size 1966125
|
CdFQT4oBgHgl3EQfOTbk/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1db0d1197b51ac44f4a67dbc256be6b08bf86296f2313d2cfe9eaea3d19ecd6d
|
3 |
+
size 72957
|
DNAzT4oBgHgl3EQfTvz-/content/2301.01257v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fa35c8c396859145d37cd74882f13c06a429e5b69e218ed4b5ee23bc2d22b891
|
3 |
+
size 901869
|
DNAzT4oBgHgl3EQfTvz-/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5fd5d64b5b788e7ab22a5b088be78d2fa3d3156c97fd230c07b63236f2651668
|
3 |
+
size 4587565
|
DNAzT4oBgHgl3EQfTvz-/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:16b5eceed0ddaf7fe53c4417076c602d56d406945d337dc6553dde809bfe881e
|
3 |
+
size 162300
|
ENFRT4oBgHgl3EQfAze2/content/2301.13463v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:76147a92c2990565e70fdeb70a7abcca19991846f18910a252683b4b18b41c44
|
3 |
+
size 226732
|
ENFRT4oBgHgl3EQfAze2/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ca58edb41c3d7a419f2e1cbbcd7eb78b6838c5dd194825f9ddf77a189c542de2
|
3 |
+
size 77172
|
EdE4T4oBgHgl3EQffQ16/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fa2bbdff342c0acfc83a62b878f786638eb75536ae7154a2aede5dcf2e55ff10
|
3 |
+
size 1114157
|
EdE4T4oBgHgl3EQffQ16/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6424a1c4fc6684c50517b5ec1be24dd18b0bba04fa5586e191cd60a8ab6d5519
|
3 |
+
size 47813
|
FdE3T4oBgHgl3EQfVwo4/content/2301.04462v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:abbd093abf47a6a39578390cbef636fbb441d86048ce0caf1265b7650ca5113a
|
3 |
+
size 1613748
|