jackkuo commited on
Commit
42c4267
·
verified ·
1 Parent(s): be51e7e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. -NE5T4oBgHgl3EQfRg5P/content/tmp_files/load_file.txt +0 -0
  2. .gitattributes +55 -0
  3. 09E4T4oBgHgl3EQfzA3P/content/2301.05271v1.pdf +3 -0
  4. 09E4T4oBgHgl3EQfzA3P/vector_store/index.pkl +3 -0
  5. 2NE4T4oBgHgl3EQfagwY/content/tmp_files/2301.05064v1.pdf.txt +579 -0
  6. 2NE4T4oBgHgl3EQfagwY/content/tmp_files/load_file.txt +497 -0
  7. 2dE1T4oBgHgl3EQfAAKW/content/2301.02834v1.pdf +3 -0
  8. 2dE1T4oBgHgl3EQfAAKW/vector_store/index.faiss +3 -0
  9. 2dE1T4oBgHgl3EQfAAKW/vector_store/index.pkl +3 -0
  10. 3dFQT4oBgHgl3EQf3TZi/content/tmp_files/2301.13427v1.pdf.txt +1617 -0
  11. 3dFQT4oBgHgl3EQf3TZi/content/tmp_files/load_file.txt +0 -0
  12. 4tE4T4oBgHgl3EQf1A0X/content/tmp_files/2301.05286v1.pdf.txt +810 -0
  13. 4tE4T4oBgHgl3EQf1A0X/content/tmp_files/load_file.txt +0 -0
  14. 5tAzT4oBgHgl3EQfEfox/content/2301.00993v1.pdf +3 -0
  15. 5tAzT4oBgHgl3EQfEfox/vector_store/index.faiss +3 -0
  16. 5tAzT4oBgHgl3EQfEfox/vector_store/index.pkl +3 -0
  17. 79A0T4oBgHgl3EQfOf-b/content/tmp_files/2301.02162v1.pdf.txt +1504 -0
  18. 79A0T4oBgHgl3EQfOf-b/content/tmp_files/load_file.txt +0 -0
  19. 79AyT4oBgHgl3EQf2_lP/content/2301.00760v1.pdf +3 -0
  20. 79AyT4oBgHgl3EQf2_lP/vector_store/index.pkl +3 -0
  21. 7dE5T4oBgHgl3EQfQA7X/vector_store/index.pkl +3 -0
  22. 7tE1T4oBgHgl3EQfBwI8/content/tmp_files/2301.02855v1.pdf.txt +2274 -0
  23. 7tE1T4oBgHgl3EQfBwI8/content/tmp_files/load_file.txt +0 -0
  24. 8dAzT4oBgHgl3EQfgfzo/content/tmp_files/2301.01471v1.pdf.txt +726 -0
  25. 8dAzT4oBgHgl3EQfgfzo/content/tmp_files/load_file.txt +0 -0
  26. 8tFRT4oBgHgl3EQfpzcC/content/tmp_files/2301.13614v1.pdf.txt +1117 -0
  27. 8tFRT4oBgHgl3EQfpzcC/content/tmp_files/load_file.txt +0 -0
  28. CdFAT4oBgHgl3EQftB4M/content/tmp_files/2301.08661v1.pdf.txt +1412 -0
  29. CdFAT4oBgHgl3EQftB4M/content/tmp_files/load_file.txt +0 -0
  30. CtE1T4oBgHgl3EQf9wbT/content/tmp_files/2301.03561v1.pdf.txt +2536 -0
  31. CtE1T4oBgHgl3EQf9wbT/content/tmp_files/load_file.txt +0 -0
  32. CtE2T4oBgHgl3EQf9Ant/content/2301.04225v1.pdf +3 -0
  33. CtFJT4oBgHgl3EQftS25/vector_store/index.faiss +3 -0
  34. CtFJT4oBgHgl3EQftS25/vector_store/index.pkl +3 -0
  35. E9E0T4oBgHgl3EQfhAEU/content/2301.02424v1.pdf +3 -0
  36. E9E4T4oBgHgl3EQf6w7l/content/2301.05335v1.pdf +3 -0
  37. E9E4T4oBgHgl3EQf6w7l/vector_store/index.faiss +3 -0
  38. E9E4T4oBgHgl3EQf6w7l/vector_store/index.pkl +3 -0
  39. FNE3T4oBgHgl3EQfVgrM/vector_store/index.faiss +3 -0
  40. FNE3T4oBgHgl3EQfVgrM/vector_store/index.pkl +3 -0
  41. J9FRT4oBgHgl3EQf0ThP/content/2301.13652v1.pdf +3 -0
  42. J9FRT4oBgHgl3EQf0ThP/vector_store/index.faiss +3 -0
  43. J9FRT4oBgHgl3EQf0ThP/vector_store/index.pkl +3 -0
  44. JNFIT4oBgHgl3EQfZCuh/content/tmp_files/2301.11251v1.pdf.txt +907 -0
  45. JNFIT4oBgHgl3EQfZCuh/content/tmp_files/load_file.txt +413 -0
  46. JdE2T4oBgHgl3EQfUgew/content/tmp_files/2301.03814v1.pdf.txt +367 -0
  47. JdE2T4oBgHgl3EQfUgew/content/tmp_files/load_file.txt +176 -0
  48. KdFOT4oBgHgl3EQfzDRI/content/tmp_files/2301.12930v1.pdf.txt +3349 -0
  49. KdFOT4oBgHgl3EQfzDRI/content/tmp_files/load_file.txt +0 -0
  50. L9E3T4oBgHgl3EQfwAue/content/2301.04699v1.pdf +3 -0
-NE5T4oBgHgl3EQfRg5P/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
.gitattributes CHANGED
@@ -3447,3 +3447,58 @@ otFQT4oBgHgl3EQfrTZB/content/2301.13383v1.pdf filter=lfs diff=lfs merge=lfs -tex
3447
  VdE5T4oBgHgl3EQfBg5L/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3448
  rtAzT4oBgHgl3EQfPPsG/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3449
  jNE0T4oBgHgl3EQfYQBW/content/2301.02304v1.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3447
  VdE5T4oBgHgl3EQfBg5L/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3448
  rtAzT4oBgHgl3EQfPPsG/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3449
  jNE0T4oBgHgl3EQfYQBW/content/2301.02304v1.pdf filter=lfs diff=lfs merge=lfs -text
3450
+ ltE4T4oBgHgl3EQfUAze/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3451
+ 79AyT4oBgHgl3EQf2_lP/content/2301.00760v1.pdf filter=lfs diff=lfs merge=lfs -text
3452
+ b9A0T4oBgHgl3EQfGf_e/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3453
+ f9FJT4oBgHgl3EQfUSzI/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3454
+ jNE3T4oBgHgl3EQfJAm8/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3455
+ jNE0T4oBgHgl3EQfYQBW/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3456
+ LtAzT4oBgHgl3EQfVvzk/content/2301.01291v1.pdf filter=lfs diff=lfs merge=lfs -text
3457
+ J9FRT4oBgHgl3EQf0ThP/content/2301.13652v1.pdf filter=lfs diff=lfs merge=lfs -text
3458
+ E9E4T4oBgHgl3EQf6w7l/content/2301.05335v1.pdf filter=lfs diff=lfs merge=lfs -text
3459
+ CtE2T4oBgHgl3EQf9Ant/content/2301.04225v1.pdf filter=lfs diff=lfs merge=lfs -text
3460
+ b9AzT4oBgHgl3EQfZvxg/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3461
+ LtAzT4oBgHgl3EQfVvzk/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3462
+ CtFJT4oBgHgl3EQftS25/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3463
+ rtAzT4oBgHgl3EQfPPsG/content/2301.01178v1.pdf filter=lfs diff=lfs merge=lfs -text
3464
+ RdAzT4oBgHgl3EQfXPxJ/content/2301.01314v1.pdf filter=lfs diff=lfs merge=lfs -text
3465
+ E9E0T4oBgHgl3EQfhAEU/content/2301.02424v1.pdf filter=lfs diff=lfs merge=lfs -text
3466
+ UdA0T4oBgHgl3EQfEf-Z/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3467
+ LNE1T4oBgHgl3EQfswVe/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3468
+ U9E1T4oBgHgl3EQfuwXx/content/2301.03393v1.pdf filter=lfs diff=lfs merge=lfs -text
3469
+ 2dE1T4oBgHgl3EQfAAKW/content/2301.02834v1.pdf filter=lfs diff=lfs merge=lfs -text
3470
+ w9E0T4oBgHgl3EQf-QKR/content/2301.02812v1.pdf filter=lfs diff=lfs merge=lfs -text
3471
+ J9FRT4oBgHgl3EQf0ThP/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3472
+ ddE0T4oBgHgl3EQfWwAE/content/2301.02281v1.pdf filter=lfs diff=lfs merge=lfs -text
3473
+ W9AyT4oBgHgl3EQf9Pog/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3474
+ UdA0T4oBgHgl3EQfEf-Z/content/2301.02019v1.pdf filter=lfs diff=lfs merge=lfs -text
3475
+ L9E3T4oBgHgl3EQfwAue/content/2301.04699v1.pdf filter=lfs diff=lfs merge=lfs -text
3476
+ ddE0T4oBgHgl3EQfWwAE/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3477
+ w9E0T4oBgHgl3EQf-QKR/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3478
+ q9E5T4oBgHgl3EQfJg7h/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3479
+ ZtFRT4oBgHgl3EQfPze-/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3480
+ L9E3T4oBgHgl3EQfwAue/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3481
+ mNFLT4oBgHgl3EQfei-P/content/2301.12091v1.pdf filter=lfs diff=lfs merge=lfs -text
3482
+ q9E5T4oBgHgl3EQfJg7h/content/2301.05459v1.pdf filter=lfs diff=lfs merge=lfs -text
3483
+ rtA0T4oBgHgl3EQfK_-E/content/2301.02112v1.pdf filter=lfs diff=lfs merge=lfs -text
3484
+ YdE2T4oBgHgl3EQfvAhk/content/2301.04086v1.pdf filter=lfs diff=lfs merge=lfs -text
3485
+ nNFLT4oBgHgl3EQffS-6/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3486
+ 5tAzT4oBgHgl3EQfEfox/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3487
+ RtE4T4oBgHgl3EQfKwzR/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3488
+ nNFLT4oBgHgl3EQffS-6/content/2301.12095v1.pdf filter=lfs diff=lfs merge=lfs -text
3489
+ 5tAzT4oBgHgl3EQfEfox/content/2301.00993v1.pdf filter=lfs diff=lfs merge=lfs -text
3490
+ dtFPT4oBgHgl3EQfyzWo/content/2301.13173v1.pdf filter=lfs diff=lfs merge=lfs -text
3491
+ cNFST4oBgHgl3EQfDTjC/content/2301.13710v1.pdf filter=lfs diff=lfs merge=lfs -text
3492
+ PdFAT4oBgHgl3EQfzh4f/content/2301.08698v1.pdf filter=lfs diff=lfs merge=lfs -text
3493
+ otA0T4oBgHgl3EQfKP8P/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3494
+ N9E3T4oBgHgl3EQfxAtK/content/2301.04707v1.pdf filter=lfs diff=lfs merge=lfs -text
3495
+ fNE2T4oBgHgl3EQfxwgj/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3496
+ fNE2T4oBgHgl3EQfxwgj/content/2301.04113v1.pdf filter=lfs diff=lfs merge=lfs -text
3497
+ 09E4T4oBgHgl3EQfzA3P/content/2301.05271v1.pdf filter=lfs diff=lfs merge=lfs -text
3498
+ otFQT4oBgHgl3EQfrTZB/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3499
+ cNFLT4oBgHgl3EQfYi_0/content/2301.12066v1.pdf filter=lfs diff=lfs merge=lfs -text
3500
+ WNAyT4oBgHgl3EQfu_m7/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3501
+ i9A0T4oBgHgl3EQfIf_T/content/2301.02077v1.pdf filter=lfs diff=lfs merge=lfs -text
3502
+ 2dE1T4oBgHgl3EQfAAKW/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3503
+ FNE3T4oBgHgl3EQfVgrM/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3504
+ E9E4T4oBgHgl3EQf6w7l/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
09E4T4oBgHgl3EQfzA3P/content/2301.05271v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e82a2962bbcb29c80ab08143c8f2e64c9d14f2e55dc285954c27050a78c93eab
3
+ size 1404946
09E4T4oBgHgl3EQfzA3P/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfdbb76a100af8e6bb8ee4e7c75d8be991ecc353a94d43344edb8cb575d69c93
3
+ size 164408
2NE4T4oBgHgl3EQfagwY/content/tmp_files/2301.05064v1.pdf.txt ADDED
@@ -0,0 +1,579 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Journal of the Physical Society of Japan
2
+ LETTERS
3
+ Inelastic Neutron Scattering Study of the Spin Dynamics
4
+ in the Breathing Pyrochlore System LiGa0.95In0.05Cr4O8
5
+ Yu Tanaka1 *, Rafal Wawrzy´nczak2, Manh Duc Le3, Tatiana Guidi3, Yoshihiko Okamoto4, Takeshi Yajima1,
6
+ Zenji Hiroi1, Masashi Takigawa1, and Gøran J. Nilsen3
7
+ 1Institute for Solid State Physics, University of Tokyo, Kashiwa, Chiba 277-8581, Japan
8
+ 2Institut Laue-Langevin, CS 20156, Cedex 9, 38042 Grenoble, France
9
+ 3ISIS Facility, Rutherford Appleton Laboratory-STFC, Chilton, Didcot OX11 0QX, United Kingdom
10
+ 4Department of Applied Physics, Nagoya University, Nagoya 464-8603, Japan
11
+ The A-site ordered chromate spinels LiGa1−xInxCr4O8 host a network of size-alternating spin-3/2 Cr3+ tetrahedra
12
+ known as a “breathing” pyrochlore lattice. For the x = 0.05 composition, the complex magneto-structural ordering
13
+ observed in the parent x = 0 material is replaced by a single transition at T f = 11 K, ascribed to the collinear nematic
14
+ order caused by strong spin-lattice coupling. We present here an inelastic neutron scattering study of the spin dynamics
15
+ in this composition. Above T f , the dynamical scattering function S (Q, E) is ungapped and quasi-elastic, similar to
16
+ undoped LiGaCr4O8. Below T f , the spectral weight splits between a broad inelastic feature at 5.8 meV and toward the
17
+ elastic line. The former feature can be ascribed to spin precessions within antiferromagnetic loops, lifted to finite energy
18
+ by the effective biquadratic spin-lattice term in the spin Hamiltonian.
19
+ When magnetic frustration is combined with strong spin-
20
+ lattice coupling, a range of possible magneto-structural be-
21
+ haviors result,1) including nematic transitions,2) magnetiza-
22
+ tion plateaus, and localized spin excitations.3,4) An ideal
23
+ arena for exploring this interplay is provided by the chro-
24
+ mate spinels, A2+Cr3+
25
+ 2 O4. Here, the frustration originates from
26
+ the corner-sharing pyrochlore network of Cr3+ (S = 3/2)
27
+ tetrahedra on the B-site, while the strong spin-lattice cou-
28
+ pling arises from the direct overlap of Cr3+ d-orbitals on ad-
29
+ jacent sites. A common starting point to understand the low-
30
+ temperature physics of the chromate spinels is the so-called
31
+ bilinear-biquadratic model,5,6)
32
+ H = J
33
+
34
+ i,j
35
+ Si · Sj + b
36
+
37
+ i,j
38
+ (Si · Sj)2
39
+ (1)
40
+ where the classical nearest-neighbor Heisenberg Hamiltonian
41
+ is extended with an effective biquadratic term, b(Si · Sj)2
42
+ (where b is a coupling constant, and Si,j are classical spins).
43
+ This term, generated by spin-lattice coupling to local distor-
44
+ tions, lifts some of the degeneracy of the ground state mani-
45
+ fold by selecting collinear or coplanar spin configurations. Al-
46
+ though the bilinear-biquadratic model ignores the long-range
47
+ interactions which eventually cause magneto-structural order
48
+ in many chromate spinels, it is able to successfully describe
49
+ both the short-range spin correlations and the magnetic phase
50
+ diagram of virtually every member of the family. It has not,
51
+ however, yet been applied to the unusual magnetic excita-
52
+ tion spectra in the ordered phases of MgCr2O4,7) HgCr2O4,4)
53
+ and ZnCr2O4;8) these are characterized by weak spin-wave
54
+ branches and sharp, non-dispersive inelastic bands, with wave
55
+ vector dependences characteristic of small spin clusters.4,9)
56
+ While bands assigned to both hexamer and heptamer clus-
57
+ ters are observed in MgCr2O4 and HgCr2O4, only the for-
58
+ mer are seen in ZnCr2O4. Due to the complexity of the low-
59
+ temperature magneto-structural orders in these materials, the
60
+ connection between the structure and the apparent spin cluster
61
+ excitations is unclear.
62
+ This link is more evident, at least at high temperature, in
63
+ the so-called “breathing” pyrochlore spinels A+A′3+Cr4O8,
64
+ where the A-site is now populated by an ordered arrangement
65
+ of mono- and trivalent cations. Here, the term “breathing”
66
+ refers to the alternation of Cr3+-Cr3+ distances and, hence,
67
+ magnetic exchanges J and J′ between adjacent Cr3+
68
+ 4 tetrahe-
69
+ dra as a consequence of the order on the A-site. The degree
70
+ of magnetic alternation is quantified by the breathing factor
71
+ Bf = J′/J, where Bf → 0 corresponds to isolated tetrahedra
72
+ and Bf → 1 to the isotropic pyrochlore lattice. Starting from
73
+ the former limit, the excitations at T ∼ J are localized non-
74
+ dispersive triplets (and higher multiplets) separated by a spin
75
+ gap ∆ from the singlet ground state. When Bf is increased, ∆
76
+ is suppressed, and the excitations become qualitatively simi-
77
+ lar to the isotropic case beyond Bf ∼ 0.25. This simple picture
78
+ is again complicated by the influence of the spin-lattice cou-
79
+ pling and long-ranged terms, which are responsible for the
80
+ collective excitations in the low-temperature ordered phases.
81
+ LiGaCr4O8 and LiInCr4O8 were rediscovered by Okamoto
82
+ et. al.10) as breathing pyrochlore systems, which have A+ =
83
+ Li+ and A′3+ = Ga3+/In3+. The nearest-neighbor magnetic in-
84
+ teractions on the small and large tetrahedra are estimated to
85
+ be J ∼ 50 K and J′ ∼ 30 K (Bf ∼ 0.6) for LiGaCr4O8,
86
+ and J ∼ 60 K and J′ ∼ 6 K (Bf ∼ 0.1) for LiInCr4O8.10)
87
+ For LiGaCr4O8, the upper magneto-structural transition at
88
+ Tu ∼ 20 K results in phase separation into cubic paramag-
89
+ netic and tetragonal collinear phases. The cubic phase then
90
+ undergoes another transition at Tl = 13.8 K, into a second
91
+ tetragonal phase, the structure of which has not yet been deter-
92
+ mined. As in other chromate spinels, both transitions are first-
93
+ order. However, the paramagnetic component shows a diver-
94
+ gence in the nuclear spin-lattice relaxation rate 1/T1 extracted
95
+ from 7Li-NMR, implying proximity to a tricritical point or to
96
+ a second-order transition to another phase.11)
97
+ In this letter, we describe inelastic neutron scattering mea-
98
+ surements of the spin excitation spectrum of the “breath-
99
+ 1
100
+ arXiv:2301.05064v1 [cond-mat.str-el] 12 Jan 2023
101
+
102
+ J. Phys. Soc. Jpn.
103
+ LETTERS
104
+ 152 K
105
+ 50.4 K
106
+ (a)
107
+ (b)
108
+ (c)
109
+ (d)
110
+ 12.4 K
111
+ 5.2 K
112
+ S (Q, E) (arb. units)
113
+ Fig. 1.
114
+ (Color online) Temperature dependence of S (Q, E) for LiGa0.95In0.05Cr4O8 recorded with 16 meV incident energy. (a)-(c) are taken at T > T f and
115
+ (d) below T f . Blank patches are due to gaps between detectors.
116
+ ing” pyrochlore chromate spinel, LiGa0.95In0.05Cr4O8, where
117
+ Bf ∼ 0.6. Previous diffraction measurements indicate that
118
+ LiGa0.95In0.05Cr4O8 undergoes a possible second-order tran-
119
+ sition to a nematic collinear ground state at T f = 11.1 K,
120
+ in accordance with predictions from the bilinear-biquadratic
121
+ model.2) The excitations at T > T f are gapless and Lorentzian
122
+ in form, as is also the case for MgCr2O4, HgCr2O4, and
123
+ ZnCr2O4. Below T f , the spectral weight shifts to the elastic
124
+ line and an inelastic feature at ∼5.8 meV. We identify the lat-
125
+ ter with spin precession within antiferromagnetic hexagonal
126
+ spin clusters created by the nematic order, and lifted to finite
127
+ energy by the biquadratic term. We thus provide, for the first
128
+ time, a plausible link between the magnetic excitations and
129
+ magnetic structure. The remaining spectral weight appears to
130
+ be consistent with collective spin-wave-like excitations.
131
+ The powder sample of LiGa0.95In0.05Cr4O8 was prepared
132
+ by sintering a stoichiometric mixture of the two end-member
133
+ compounds LiGaCr4O8 and LiInCr4O8.12) These were in turn
134
+ prepared by the standard solid-state route,10) using starting
135
+ materials enriched with 7Li to reduce neutron absorption. For
136
+ our inelastic neutron scattering measurements, 8.1 g of pow-
137
+ der was packed in an Al sachet, which was rolled into an annu-
138
+ lus and loaded into an Al can with � = 45 mm. The measure-
139
+ ments were performed on the MARI direct-geometry time-of-
140
+ flight chopper spectrometer at the ISIS facility, UK, using in-
141
+ cident energies Ei = 10, 16, 25, and 35 meV. For all values of
142
+ Ei, the elastic energy resolution was close to ∆E/E ∼ 4.5%.
143
+ Temperatures between 5 and 300 K were accessed using a
144
+ closed-cycle refrigerator. The diffraction measurements re-
145
+ ported in Ref. 2 were performed on the same sample.
146
+ The dynamical structure factors S (Q, E = Ei − E f ) mea-
147
+ sured at four selected temperatures between 5.2 K and 152 K
148
+ are shown in Fig. 1. We begin with an analysis of the data
149
+ taken above T f : at T = 152 K≫ T f , a quasi-elastic rod
150
+ of scattering extending up to ∼15 meV is observed. This
151
+ is characteristic of diffusive spin excitations in the corre-
152
+ lated paramagnetic state,13) and resembles the S (Q, E) of both
153
+ LiInCr4O814) and LiGaCr4O815) at similar temperatures. The
154
+ intensity of the rod is centered around 1.6 Å−1, corresponding
155
+ approximately to the reciprocal space position of the Cr-Cr
156
+ nearest-neighbor distance. Upon cooling to 50.4 K, intensity
157
+ builds up near the elastic line, again as in LiGaCr4O8, but in
158
+ contrast to LiInCr4O8, where the scattering becomes inelas-
159
+ tic.14) The modulation of the quasi-elastic scattering is also
160
+ enhanced, indicating the development of longer-ranged spin-
161
+ spin correlations ⟨S (0) · S (r)⟩, as may be seen in Fig. 2.
162
+ To determine the spatial extent of the correlations, we
163
+ fit the Q-dependence of the scattering integrated between 2
164
+ and 7 meV (≃ S (Q) at high temperature) to a shell model
165
+ [Fig. 2(a)]:
166
+ S (Q) = f(Q)2 �
167
+ i
168
+ ⟨S (0) · S (ri)⟩ Ni
169
+ sin(Qri)
170
+ Qri
171
+ ,
172
+ (2)
173
+ where f(Q) is the magnetic form factor for Cr3+, and Ni is
174
+ the coordination number of the ith shell at radial distance ri.
175
+ For simplicity, r1 is approximated as the mean of the r1 and r′
176
+ 1
177
+ distances.
178
+ The summation in the fitting function was extended to the
179
+ third neighboring shell, at which point the fit quality did
180
+ not increase. The extracted parameters reveal antiferromag-
181
+ 2
182
+
183
+ MAR20556Reduced SQW
184
+ T= 50.4 K
185
+ Energy transfer (me V)
186
+ 10
187
+ 0
188
+ 1
189
+ 2
190
+ 3
191
+ 4
192
+ 5
193
+ Q(A-1)MAR20554Reduced SQW
194
+ T= 12.4 K
195
+ Energy transfer (me V)
196
+ 10
197
+ 0
198
+ 1
199
+ 2
200
+ 3
201
+ 5
202
+ Q(A-1)MAR20551Reduced SQW
203
+ T= 5.2 K
204
+ Energy transfer (me V)
205
+ 10
206
+ 0
207
+ 1
208
+ 2
209
+ 3
210
+ 4
211
+ Q(A-1)MAR20560Reduced SQW
212
+ T= 152 K
213
+ Energy transfer (me V)
214
+ .10
215
+ 0
216
+ 1
217
+ 2
218
+ 3
219
+ 4
220
+ Q(A-1)J. Phys. Soc. Jpn.
221
+ LETTERS
222
+ 1
223
+ 2
224
+ 3
225
+ 4
226
+ Q ( ˚A−1)
227
+ S(Q) (a.u.)
228
+ 152 K
229
+ 75 K
230
+ 50 K
231
+ 24 K
232
+ 18 K
233
+ 14 K
234
+ 12.4 K
235
+ 5.2 K
236
+ (a)
237
+ E = 2−7 meV
238
+ 0
239
+ 2
240
+ 4
241
+ 6
242
+ 8
243
+ 10
244
+ 12
245
+ r ( ˚A)
246
+ −0.3
247
+ −0.2
248
+ −0.1
249
+ 0.0
250
+ 0.1
251
+ 0.2
252
+ ⟨S(0)·S(r)⟩/S(S+1)
253
+ (b)
254
+ RMC
255
+ 1.5 K
256
+ 30 K
257
+ 5.2 K
258
+ 24 K
259
+ 152
260
+ Hexamer cluster
261
+ Hexamer cluster
262
+ Fig. 2.
263
+ (Color online) (a) Q dependence of the magnetic scattering I(Q),
264
+ integrated over the energy range 2-7 meV at different temperatures. Dashed
265
+ lines are fitting curves calculated from the shell model (Eq. (2)) with the first
266
+ three nearest neighbors and flat backgrounds. Solid red lines show the results
267
+ of structure factor calculations for hexagonal chromium rings at T < 20 K.
268
+ (b) Real space spin-spin correlation functions ⟨S (0) · S (ri)⟩ versus r. Solid
269
+ circles are obtained from the fits to the shell model in Fig. 2(a), and the open
270
+ triangles are obtained by the reverse Monte Carlo (RMC) simulation on the
271
+ magnetic diffuse scattering observed in the elastic ND measurement.2) Green
272
+ stars mark the correlations for an isolated hexagonal antiferromagnetic loop
273
+ (Fig. 4).
274
+ netic nearest-neighbor spin-spin correlations, with progres-
275
+ sively weaker alternating ferro- and antiferromagnetic cor-
276
+ relations for the second and third nearest neighbors, respec-
277
+ tively [Fig. 2(b)]. The extracted correlations are thus con-
278
+ sistent with the reverse Monte Carlo results presented in
279
+ Ref. 2, where energy-integrated data from a diffractometer
280
+ were used; i.e., the true S (Q) was reflected. The temperature
281
+ dependence of the parameters indicates smooth growth of the
282
+ spin-spin correlations in the entire temperature range, as ex-
283
+ pected.
284
+ To analyze the temperature dependence of the quasi-elastic
285
+ feature further, the imaginary part of the magnetic dynamic
286
+ susceptibility χ′′ was calculated by applying the fluctuation
287
+ dissipation theorem16) χ′′(Q, E) = π(1 − e−
288
+ E
289
+ kBT )S (Q, E) to the
290
+ E dependence of the intensity integrated over the Q range
291
+ 1.1 − 1.9 Å−1 [Fig. 3(a)]. At T > T f , the contribution of elas-
292
+ tic scattering is subtracted by approximating it with a sharp
293
+ Gaussian centered around E = 0. The obtained χ′′(ω) are well
294
+ fit by a quasi-elastic Lorentzian χ′′(ω) = χ′ωΓ/(ω2 + Γ2),
295
+ 20
296
+ 15
297
+ 10
298
+ 5
299
+ 0
300
+ χ" (arb. unit)
301
+ 10
302
+ 8
303
+ 6
304
+ 4
305
+ 2
306
+ 0
307
+ E transfer (meV)
308
+ 5.2 K
309
+ 12.4 K
310
+ 14 K
311
+ 18 K
312
+ 24 K
313
+ 50 K
314
+ 75 K
315
+ 152 K
316
+ 12
317
+ 10
318
+ 8
319
+ 6
320
+ 4
321
+ 2
322
+ 0
323
+ Γ (meV)
324
+ 160
325
+ 120
326
+ 80
327
+ 40
328
+ 0
329
+ T (K)
330
+ 30
331
+ 20
332
+ 10
333
+ 0
334
+ χ' (arb. units)
335
+ (a)
336
+ (b)
337
+ Fig. 3.
338
+ (Color online) (a) Energy dependence of the dynamic susceptibil-
339
+ ity χ′′(ω), integrated over the Q range 1.1-1.9 Å−1 for all measured tem-
340
+ peratures. The elastic line was subtracted from each dataset. The solid lines
341
+ are resolution-broadened quasi-elastic Lorentzian fits. (b) Temperature de-
342
+ pendence of the inverse relaxation rate Γ and the static susceptibility χ’ as
343
+ determined by quasi-elastic Lorentzian fitting. The solid and dotted lines are
344
+ a linear and power-law curve fits to Γ.
345
+ which is the time-Fourier transform of an exponential de-
346
+ cay exp(−t/τ), with τ ∝ 1/Γ and χ′ the static susceptibil-
347
+ ity. On cooling below 18 K, the Lorentzian fits become poor
348
+ at E < 2 meV, indicating that the scattering is no longer de-
349
+ scribed by a single relaxation process. This coincides with the
350
+ appearance of a stretching exponent β < 1 in fits of the T1 re-
351
+ laxation process,17) and thus is likely connected with the onset
352
+ of critical fluctuations above T f .
353
+ Figure 3(b) shows the temperature dependence of the in-
354
+ verse relaxation time Γ and the static susceptibility χ′ ex-
355
+ tracted from the fits described above. Γ decreases smoothly
356
+ in the temperature range 18 K, and is well described by a
357
+ power law Γ ∝ T γ with γ = 0.66 (dashed line). For Heisen-
358
+ berg spins on the isotropic pyrochlore lattice, theory predicts
359
+ Γ ∝ T (γ = 1);13,18,19) however, a linear fit to the data (solid
360
+ line) is poor at high temperature, even permitting a nonzero
361
+ intercept Γ0 = 1.09 meV. Although a similar reduction of γ
362
+ has also been observed in ZnCr2O4 (γ = 0.81), the cause re-
363
+ mains unclear.3) Aside from this, χ′ is consistent with the bulk
364
+ susceptibility.
365
+ Turning now to the form of S (Q, E) below T f shown in
366
+ Fig. 1(d), most of the high-temperature quasi-elastic scatter-
367
+ ing shifts either towards the elastic line or to an inelastic fea-
368
+ ture centered around 5.8 meV. The latter is similar to the “res-
369
+ onance” observed in LiGaCr4O815) and other spinels, but is
370
+ considerably broader in energy. Like the resonance, however,
371
+ its structure factor suggests local modes on small antiferro-
372
+ magnetic spin loops. An analysis of the reverse Monte Carlo
373
+ spin configurations derived from fits to S (Q) in our previ-
374
+ ous publication2) identifies these with a large number of six-
375
+ membered hexagonal antiferromagnetic spin loops, as well
376
+ as a few with eight or more members. Indeed, the calculated
377
+ structure factor for the hexagonal rings (Fig. 4) agrees almost
378
+ perfectly with that of the energy-integrated data in Figure
379
+ 2(a), also accounting for the variation of ⟨S (0) · S (ri)⟩ versus
380
+ r from the model-independent fits above. As shown in Fig. 4,
381
+ hexagonal antiferromagnetic spin loops are only possible in
382
+ the presence of three types (colors) of collinear state on the
383
+ Cr3+ tetrahedra.5)
384
+ By analogy with the coplanar nematic state in the kagome
385
+ lattice antiferromagnet,20–22) collinear nematic states on the
386
+ 3
387
+
388
+ J. Phys. Soc. Jpn.
389
+ LETTERS
390
+ Fig. 4.
391
+ (Color online) Hexamer loop determined within the breathing py-
392
+ rochlore lattice (cyan bonds). Spheres represent Cr3+ ions. RBG coloring of
393
+ the bonds and vertices of the tetrahedra corresponds to the bond ordering de-
394
+ scribed in Refs. 2 and 5. Cyan arrows represent antiferromagnetically coupled
395
+ spins on the nodes of hexagonal cluster precessing around the easy direction
396
+ of nematic phase (dashed black lines).
397
+ pyrochlore lattice support two types of loop excitations: (i)
398
+ loop flips, which invert the moment directions around the
399
+ loop, hence transforming one nematic ground state configura-
400
+ tion to another, and (ii) “weathervane” modes, small displace-
401
+ ments of the moment direction about the equilibrium direction
402
+ [Fig. 4]. The former, related to the diffusive high-temperature
403
+ excitations, is expected to produce a quasi-elastic signal with
404
+ a temperature-dependent width, and thus cannot account for
405
+ the inelastic feature. As such, we tentatively assign the fea-
406
+ ture to weathervane modes on the hexagonal loops. Consid-
407
+ ering only the bilinear term, the ground state criterion of two
408
+ spins up and two down on each tetrahedron results in a zero
409
+ net exchange field for the spins around the hexagon, and the
410
+ weathervane modes therefore carry no energy cost. When the
411
+ biquadratic (magneto-elastic) and other long-ranged terms are
412
+ included, however, they are lifted to finite energy. In particu-
413
+ lar, inserting the bilinear-biquadratic Hamiltonian (1) into the
414
+ classical equation of motion
415
+ dSi(t)
416
+ dt
417
+ = −1
418
+ ℏSi(t) × ∇Si(t)H
419
+ (3)
420
+ results in an energy gap ∆E ≃ 8bavS 3, where bav is the aver-
421
+ age bilinear-biquadratic coupling constant between the small
422
+ and large tetrahedra. In deriving this expression, we assumed
423
+ that there is no coupling between the loops and S z
424
+ i(t) ≃ S ; i.e.,
425
+ the spin displacements are small. From Jav = (J + J′)/2 =
426
+ 45 K estimated from the magnetic susceptibility and the ex-
427
+ perimental excitation energy, we obtain bav ∼ 0.05Jav, which
428
+ is close to the b reported for related materials.23) In addi-
429
+ tion, using T f ≃ bS 4 for the isotropic pyrochlore lattice,24)
430
+ bav ∼ 0.05Jav yields T f ∼ 12 K, which is in excellent agree-
431
+ ment with experiment.
432
+ Now we address the large width of the feature relative to the
433
+ much sharper features observed in other spinels: could this
434
+ be due to the disorder inherent to the nematic state? Below
435
+ T f , the Cr-Cr bond lengths, and hence the biquadratic bond
436
+ energies, are expected to follow a Gaussian distribution (is
437
+ indeed found for the d-spacings in [2]). The resulting spec-
438
+ trum is then broadened by σ(bav), the FWHM of the Gaus-
439
+ sian. The experimental feature at 5.8 meV is approximately
440
+ Gaussian, with an FWHM of ∼2 meV. To reproduce this, the
441
+ distribution of mean Cr-Cr bond lengths around a spin loop
442
+ is required to be ∼0.1 Å wide, assuming a linear relationship
443
+ between the exchange and the Cr-Cr distance. This is larger
444
+ by approximately a factor of 4 than the distribution estimated
445
+ from Rietveld refinements, which, however, ignore any local
446
+ structure.
447
+ The significant amount of inelastic and quasi-elastic spec-
448
+ tral weight at energies above and below the 5.8 meV feature,
449
+ may be associated with other excitations (also observed on
450
+ the kagome lattice), including the loop flips mentioned above
451
+ and longer-ranged spin-wave-like excitations (which may ex-
452
+ tend to much higher energies), perhaps belonging to the short-
453
+ range magnetic order superimposed on the nematic state. The
454
+ long high-energy tail of the inelastic scattering, extending to
455
+ ∼15 meV, is certainly compatible with the latter. Loop flips,
456
+ on the other hand, are expected to give a quasi-elastic signal of
457
+ width ∝ 1/ exp(−b/T). Ultimately, single-crystal studies and
458
+ spin dynamics simulations of the bilinear-biquadratic model
459
+ with disorder on the present lattice will be required to disen-
460
+ tangle all the contributions to the excitation spectrum in the
461
+ nematic phase.
462
+ Looking beyond the breathing pyrochlores, many features
463
+ of the LiGa1−xInxCr4O8 series are shared with the undistorted
464
+ ZnxCd1−xCr2O4 family.8,25) Starting with the x − T phase di-
465
+ agrams, the introduction of bond disorder by even vestigial
466
+ doping is found to lead to the suppression of the N´eel phase
467
+ and adoption of a disordered frozen state at small x in both
468
+ cases, as also observed in Monte Carlo simulations.24) The
469
+ persistence of a sharp phase transition in the specific heat,
470
+ despite glassy behavior in the magnetic susceptibility, is also
471
+ common to both systems. These commonalities suggest the
472
+ intriguing possibility that ZnxCd1−xCr2O4 with x < 0.1 and
473
+ other similar systems also exhibit nematic transitions.24)
474
+ Comparing the x = 0.05 compositions of both families,
475
+ In0.05 and Cd0.05, the form of the scattering is at first glance
476
+ nearly identical above and below the transitions at T f . How-
477
+ ever, the dynamic susceptibility χ′′(E) of In0.05 is describable
478
+ using only one relaxation rate down to 18 K ∼ 1.6T f , while
479
+ that of Cd0.05 requires a distribution of relaxation rates already
480
+ below 4T f .8) This is indicative of a stronger doping effect in
481
+ the latter case. In regard to the gap in S (Q, E) at T < T f , ∆E is
482
+ 4.5 meV in Cd0.05 versus 5.8 meV in In0.05, giving a ratio close
483
+ to that of the exchange couplings in the two systems. Given
484
+ the similar b/J, this could point to a similar physical origin for
485
+ the gap. On the other hand, non-collinearity or strong further
486
+ neighbor couplings could also generate a nonzero exchange
487
+ field around a hexagon, and the former is thought to be fa-
488
+ vored by bond disorder.26) Indeed, flat features in the inelastic
489
+ scattering are also observed in Y2Ru2O7 and ZnCr2O4, where
490
+ non-collinear orders have been proposed.
491
+ We finally note that although inelastic resonances have
492
+ been interpreted as quantum two-level excitations in the
493
+ past,4) they should not be considered as such in the present
494
+ case. This is because the singlet-triplet gap is rapidly sup-
495
+ pressed by both further neighbor couplings and a negative bi-
496
+ quadratic exchange. In addition to this, none of the expected
497
+ higher multiplets are observed at any temperature.
498
+ We have presented an inelastic neutron scattering study
499
+ of the spin dynamics in the classical spin nematic mate-
500
+ rial LiGa0.95In0.05Cr4O8. The high-temperature dynamics are
501
+ 4
502
+
503
+ J. Phys. Soc. Jpn.
504
+ LETTERS
505
+ quasi-elastic and resemble those observed in other pyrochlore
506
+ systems, while the excitation spectrum below the transition at
507
+ T f = 11 K is dominated by a broad, non-dispersive inelastic
508
+ feature at 5.8 meV. A plausible origin for this feature mode is
509
+ the so-called weathervane modes on hexagonal antiferromag-
510
+ netic loops (abundant in the nematic state), which are lifted
511
+ to finite energy by the biquadratic term that induces the ne-
512
+ matic order. Possible collective excitations with a bandwidth
513
+ of 15 meV are also observed. In order to verify this interpreta-
514
+ tion, more detailed spin dynamics simulations of the bilinear-
515
+ biquadratic model on the breathing pyrochlore lattice will be
516
+ required.
517
+ Acknowledgments
518
+ We thank Y. Motome, H. Shinaoka and M. Gingras
519
+ for fruitful discussions. This work was supported by JSPS KAKENHI (Grant
520
+ Nos. 25287083 and 16J01077). Y.T. was supported by the JSPS through the
521
+ Program for Leading Graduate Schools (MERIT).
522
+ 1) R. Moessner: Can. J. Phys. 79 (2001) 1283.
523
+ 2) R. Wawrzy´nczak, Y. Tanaka, M. Yoshida, Y. Okamoto, P. Manuel,
524
+ N. Casati, Z. Hiroi, M. Takigawa, and G. J. Nilsen: Phys. Rev. Lett.
525
+ 119 (2017) 087201.
526
+ 3) S.-H. Lee, C. Broholm, T. H. Kim, W. Ratcliff, and S.-W. Cheong: Phys.
527
+ Rev. Lett. 84 (2000) 3718.
528
+ 4) K. Tomiyasu, H. Ueda, M. Matsuda, M. Yokoyama, K. Iwasa, and
529
+ K. Yamada: Phys. Rev. B 84 (2011) 035115.
530
+ 5) O. Tchernyshyov, R. Moessner, and S. Sondhi: Phys. Rev. B 66 (2002)
531
+ 064403.
532
+ 6) N. Shannon, K. Penc, and Y. Motome: Phys. Rev. B 81 (2010) 184409.
533
+ 7) K. Tomiyasu, T. Yokobori, Y. Kousaka, R. I. Bewley, T. Guidi,
534
+ T. Watanabe, J. Akimitsu, and K. Yamada: Phys. Rev. Lett. 110 (2013)
535
+ 077205.
536
+ 8) W. Ratcliff, S.-H. Lee, C. Broholm, S.-W. Cheong, and Q. Huang: Phys.
537
+ Rev. B 65 (2002) 220406.
538
+ 9) K. Tomiyasu, H. Suzuki, M. Toki, S. Itoh, M. Matsuura, N. Aso, and
539
+ K. Yamada: Phys. Rev. Lett. 101 (2008) 177401.
540
+ 10) Y. Okamoto, G. J. Nilsen, J. P. Attfield, and Z. Hiroi: Phys. Rev. Lett.
541
+ 110 (2013) 097203.
542
+ 11) Y. Tanaka, M. Yoshida, M. Takigawa, Y. Okamoto, and Z. Hiroi: Phys.
543
+ Rev. Lett. 113 (2014) 227204.
544
+ 12) Y. Okamoto, G. J. Nilsen, T. Nakazano, and Z. Hiroi: J. Phys. Soc. Jpn.
545
+ 84 (2015) 043707.
546
+ 13) P. H. Conlon and J. T. Chalker: Phys. Rev. Lett. 102 (2009) 237206.
547
+ 14) G. J. Nilsen, Y. Okamoto, T. Masuda, J. Rodriguez-Carvajal, H. Mutka,
548
+ T. Hansen, and Z. Hiroi: Phys. Rev. B 91 (2015) 174435.
549
+ 15) G. J. Nilsen, Y. Okamoto, C. Tassel, T. Masuda, H. Mutka, and
550
+ Z. Hiroi,
551
+ ILL
552
+ Experimental
553
+ Report
554
+ 5-31-2275,
555
+ Available
556
+ at:
557
+ https://userclub.ill.eu.
558
+ 16) S. W. Lovesey: Theory of neutron scattering from condensed matter
559
+ (Clarendon Press, 1984).
560
+ 17) See the supplementary materials of Ref. 2.
561
+ 18) R. Moessner and J. Chalker: Phys. Rev. Lett. 80 (1998) 2929.
562
+ 19) J. N. Reimers, J. E. Greedan, and M. Bj¨orgvinsson: Phys. Rev. B 45
563
+ (1992) 7295.
564
+ 20) J. von Delft and C. L. Henley: Phys. Rev. B 48 (1993) 965.
565
+ 21) M. Taillefumier, J. Robert, C. L. Henley, R. Moessner, and B. Canals:
566
+ Phys. Rev. B 90 (2014) 064419.
567
+ 22) Y. Wan and M. J. P. Gingras: Phys. Rev. B 94 (2016) 174417.
568
+ 23) A. Miyata, H. Ueda, Y. Ueda, Y. Motome, N. Shannon, K. Penc, and
569
+ S. Takeyama: J. Phys. Soc. Jpn. 80 (2011) 074709.
570
+ 24) H. Shinaoka, Y. Tomita, and Y. Motome: Phys. Rev. B 90 (2014)
571
+ 165119.
572
+ 25) H. Martinho, N. O. Moreno, J. A. Sanjurjo, C. Rettori, A. J. Garc´ıa-
573
+ Adeva, D. L. Huber, S. B. Oseroff, W. Ratcliff, S.-W. Cheong, P. G.
574
+ Pagliuso, J. L. Sarrao, and G. B. Martins: Phys. Rev. B 64 (2001)
575
+ 024408.
576
+ 26) L. Bellier-Castella, M. J. Gingras, P. C. Holdsworth, and R. Moessner:
577
+ Canadian Journal of Physics 79 (2001) 1365.
578
+ 5
579
+
2NE4T4oBgHgl3EQfagwY/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,497 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf,len=496
2
+ page_content='Journal of the Physical Society of Japan LETTERS Inelastic Neutron Scattering Study of the Spin Dynamics in the Breathing Pyrochlore System LiGa0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
3
+ page_content='95In0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
4
+ page_content='05Cr4O8 Yu Tanaka1 *, Rafal Wawrzy´nczak2, Manh Duc Le3, Tatiana Guidi3, Yoshihiko Okamoto4, Takeshi Yajima1, Zenji Hiroi1, Masashi Takigawa1, and Gøran J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
5
+ page_content=' Nilsen3 1Institute for Solid State Physics, University of Tokyo, Kashiwa, Chiba 277-8581, Japan 2Institut Laue-Langevin, CS 20156, Cedex 9, 38042 Grenoble, France 3ISIS Facility, Rutherford Appleton Laboratory-STFC, Chilton, Didcot OX11 0QX, United Kingdom 4Department of Applied Physics, Nagoya University, Nagoya 464-8603, Japan The A-site ordered chromate spinels LiGa1−xInxCr4O8 host a network of size-alternating spin-3/2 Cr3+ tetrahedra known as a “breathing” pyrochlore lattice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
6
+ page_content=' For the x = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
7
+ page_content='05 composition, the complex magneto-structural ordering observed in the parent x = 0 material is replaced by a single transition at T f = 11 K, ascribed to the collinear nematic order caused by strong spin-lattice coupling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
8
+ page_content=' We present here an inelastic neutron scattering study of the spin dynamics in this composition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
9
+ page_content=' Above T f , the dynamical scattering function S (Q, E) is ungapped and quasi-elastic, similar to undoped LiGaCr4O8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
10
+ page_content=' Below T f , the spectral weight splits between a broad inelastic feature at 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
11
+ page_content='8 meV and toward the elastic line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
12
+ page_content=' The former feature can be ascribed to spin precessions within antiferromagnetic loops, lifted to finite energy by the effective biquadratic spin-lattice term in the spin Hamiltonian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
13
+ page_content=' When magnetic frustration is combined with strong spin- lattice coupling, a range of possible magneto-structural be- haviors result,1) including nematic transitions,2) magnetiza- tion plateaus, and localized spin excitations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
14
+ page_content='3,4) An ideal arena for exploring this interplay is provided by the chro- mate spinels, A2+Cr3+ 2 O4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
15
+ page_content=' Here, the frustration originates from the corner-sharing pyrochlore network of Cr3+ (S = 3/2) tetrahedra on the B-site, while the strong spin-lattice cou- pling arises from the direct overlap of Cr3+ d-orbitals on ad- jacent sites.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
16
+ page_content=' A common starting point to understand the low- temperature physics of the chromate spinels is the so-called bilinear-biquadratic model,5,6) H = J � i,j Si · Sj + b � i,j (Si · Sj)2 (1) where the classical nearest-neighbor Heisenberg Hamiltonian is extended with an effective biquadratic term, b(Si · Sj)2 (where b is a coupling constant, and Si,j are classical spins).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
17
+ page_content=' This term, generated by spin-lattice coupling to local distor- tions, lifts some of the degeneracy of the ground state mani- fold by selecting collinear or coplanar spin configurations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
18
+ page_content=' Al- though the bilinear-biquadratic model ignores the long-range interactions which eventually cause magneto-structural order in many chromate spinels, it is able to successfully describe both the short-range spin correlations and the magnetic phase diagram of virtually every member of the family.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
19
+ page_content=' It has not, however, yet been applied to the unusual magnetic excita- tion spectra in the ordered phases of MgCr2O4,7) HgCr2O4,4) and ZnCr2O4;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
20
+ page_content='8) these are characterized by weak spin-wave branches and sharp, non-dispersive inelastic bands, with wave vector dependences characteristic of small spin clusters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
21
+ page_content='4,9) While bands assigned to both hexamer and heptamer clus- ters are observed in MgCr2O4 and HgCr2O4, only the for- mer are seen in ZnCr2O4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
22
+ page_content=' Due to the complexity of the low- temperature magneto-structural orders in these materials, the connection between the structure and the apparent spin cluster excitations is unclear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
23
+ page_content=' This link is more evident, at least at high temperature, in the so-called “breathing” pyrochlore spinels A+A′3+Cr4O8, where the A-site is now populated by an ordered arrangement of mono- and trivalent cations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
24
+ page_content=' Here, the term “breathing” refers to the alternation of Cr3+-Cr3+ distances and, hence, magnetic exchanges J and J′ between adjacent Cr3+ 4 tetrahe- dra as a consequence of the order on the A-site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
25
+ page_content=' The degree of magnetic alternation is quantified by the breathing factor Bf = J′/J, where Bf → 0 corresponds to isolated tetrahedra and Bf → 1 to the isotropic pyrochlore lattice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
26
+ page_content=' Starting from the former limit, the excitations at T ∼ J are localized non- dispersive triplets (and higher multiplets) separated by a spin gap ∆ from the singlet ground state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
27
+ page_content=' When Bf is increased, ∆ is suppressed, and the excitations become qualitatively simi- lar to the isotropic case beyond Bf ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
28
+ page_content='25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
29
+ page_content=' This simple picture is again complicated by the influence of the spin-lattice cou- pling and long-ranged terms, which are responsible for the collective excitations in the low-temperature ordered phases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
30
+ page_content=' LiGaCr4O8 and LiInCr4O8 were rediscovered by Okamoto et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
31
+ page_content=' al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
32
+ page_content='10) as breathing pyrochlore systems, which have A+ = Li+ and A′3+ = Ga3+/In3+.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
33
+ page_content=' The nearest-neighbor magnetic in- teractions on the small and large tetrahedra are estimated to be J ∼ 50 K and J′ ∼ 30 K (Bf ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
34
+ page_content='6) for LiGaCr4O8, and J ∼ 60 K and J′ ∼ 6 K (Bf ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
35
+ page_content='1) for LiInCr4O8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
36
+ page_content='10) For LiGaCr4O8, the upper magneto-structural transition at Tu ∼ 20 K results in phase separation into cubic paramag- netic and tetragonal collinear phases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
37
+ page_content=' The cubic phase then undergoes another transition at Tl = 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
38
+ page_content='8 K, into a second tetragonal phase, the structure of which has not yet been deter- mined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
39
+ page_content=' As in other chromate spinels, both transitions are first- order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
40
+ page_content=' However, the paramagnetic component shows a diver- gence in the nuclear spin-lattice relaxation rate 1/T1 extracted from 7Li-NMR, implying proximity to a tricritical point or to a second-order transition to another phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
41
+ page_content='11) In this letter, we describe inelastic neutron scattering mea- surements of the spin excitation spectrum of the “breath- 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
42
+ page_content='05064v1 [cond-mat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
43
+ page_content='str-el] 12 Jan 2023 J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
44
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
45
+ page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
46
+ page_content=' Jpn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
47
+ page_content=' LETTERS 152 K 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
48
+ page_content='4 K (a) (b) (c) (d) 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
49
+ page_content='4 K 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
50
+ page_content='2 K S (Q, E) (arb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
51
+ page_content=' units) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
52
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
53
+ page_content=' (Color online) Temperature dependence of S (Q, E) for LiGa0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
54
+ page_content='95In0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
55
+ page_content='05Cr4O8 recorded with 16 meV incident energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
56
+ page_content=' (a)-(c) are taken at T > T f and (d) below T f .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
57
+ page_content=' Blank patches are due to gaps between detectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
58
+ page_content=' ing” pyrochlore chromate spinel, LiGa0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
59
+ page_content='95In0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
60
+ page_content='05Cr4O8, where Bf ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
61
+ page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
62
+ page_content=' Previous diffraction measurements indicate that LiGa0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
63
+ page_content='95In0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
64
+ page_content='05Cr4O8 undergoes a possible second-order tran- sition to a nematic collinear ground state at T f = 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
65
+ page_content='1 K, in accordance with predictions from the bilinear-biquadratic model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
66
+ page_content='2) The excitations at T > T f are gapless and Lorentzian in form, as is also the case for MgCr2O4, HgCr2O4, and ZnCr2O4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
67
+ page_content=' Below T f , the spectral weight shifts to the elastic line and an inelastic feature at ∼5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
68
+ page_content='8 meV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
69
+ page_content=' We identify the lat- ter with spin precession within antiferromagnetic hexagonal spin clusters created by the nematic order, and lifted to finite energy by the biquadratic term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
70
+ page_content=' We thus provide, for the first time, a plausible link between the magnetic excitations and magnetic structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
71
+ page_content=' The remaining spectral weight appears to be consistent with collective spin-wave-like excitations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
72
+ page_content=' The powder sample of LiGa0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
73
+ page_content='95In0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
74
+ page_content='05Cr4O8 was prepared by sintering a stoichiometric mixture of the two end-member compounds LiGaCr4O8 and LiInCr4O8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
75
+ page_content='12) These were in turn prepared by the standard solid-state route,10) using starting materials enriched with 7Li to reduce neutron absorption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
76
+ page_content=' For our inelastic neutron scattering measurements, 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
77
+ page_content='1 g of pow- der was packed in an Al sachet, which was rolled into an annu- lus and loaded into an Al can with � = 45 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
78
+ page_content=' The measure- ments were performed on the MARI direct-geometry time-of- flight chopper spectrometer at the ISIS facility, UK, using in- cident energies Ei = 10, 16, 25, and 35 meV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
79
+ page_content=' For all values of Ei, the elastic energy resolution was close to ∆E/E ∼ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
80
+ page_content='5%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
81
+ page_content=' Temperatures between 5 and 300 K were accessed using a closed-cycle refrigerator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
82
+ page_content=' The diffraction measurements re- ported in Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
83
+ page_content=' 2 were performed on the same sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
84
+ page_content=' The dynamical structure factors S (Q, E = Ei − E f ) mea- sured at four selected temperatures between 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
85
+ page_content='2 K and 152 K are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
86
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
87
+ page_content=' We begin with an analysis of the data taken above T f : at T = 152 K≫ T f , a quasi-elastic rod of scattering extending up to ∼15 meV is observed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
88
+ page_content=' This is characteristic of diffusive spin excitations in the corre- lated paramagnetic state,13) and resembles the S (Q, E) of both LiInCr4O814) and LiGaCr4O815) at similar temperatures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
89
+ page_content=' The intensity of the rod is centered around 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
90
+ page_content='6 Å−1, corresponding approximately to the reciprocal space position of the Cr-Cr nearest-neighbor distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
91
+ page_content=' Upon cooling to 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
92
+ page_content='4 K, intensity builds up near the elastic line, again as in LiGaCr4O8, but in contrast to LiInCr4O8, where the scattering becomes inelas- tic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
93
+ page_content='14) The modulation of the quasi-elastic scattering is also enhanced, indicating the development of longer-ranged spin- spin correlations ⟨S (0) · S (r)⟩, as may be seen in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
94
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
95
+ page_content=' To determine the spatial extent of the correlations, we fit the Q-dependence of the scattering integrated between 2 and 7 meV (≃ S (Q) at high temperature) to a shell model [Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
96
+ page_content=' 2(a)]: S (Q) = f(Q)2 � i ⟨S (0) · S (ri)⟩ Ni sin(Qri) Qri , (2) where f(Q) is the magnetic form factor for Cr3+, and Ni is the coordination number of the ith shell at radial distance ri.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
97
+ page_content=' For simplicity, r1 is approximated as the mean of the r1 and r′ 1 distances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
98
+ page_content=' The summation in the fitting function was extended to the third neighboring shell, at which point the fit quality did not increase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
99
+ page_content=' The extracted parameters reveal antiferromag- 2 MAR20556Reduced SQW T= 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
100
+ page_content='4 K Energy transfer (me V) 10 0 1 2 3 4 5 Q(A-1)MAR20554Reduced SQW T= 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
101
+ page_content='4 K Energy transfer (me V) 10 0 1 2 3 5 Q(A-1)MAR20551Reduced SQW T= 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
102
+ page_content='2 K Energy transfer (me V) 10 0 1 2 3 4 Q(A-1)MAR20560Reduced SQW T= 152 K Energy transfer (me V) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
103
+ page_content='10 0 1 2 3 4 Q(A-1)J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
104
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
105
+ page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
106
+ page_content=' Jpn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
107
+ page_content=' LETTERS 1 2 3 4 Q ( ˚A−1) S(Q) (a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
108
+ page_content='u.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
109
+ page_content=') 152 K 75 K 50 K 24 K 18 K 14 K 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
110
+ page_content='4 K 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
111
+ page_content='2 K (a) E = 2−7 meV 0 2 4 6 8 10 12 r ( ˚A) −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
112
+ page_content='3 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
113
+ page_content='2 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
114
+ page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
115
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
116
+ page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
117
+ page_content='2 ⟨S(0)·S(r)⟩/S(S+1) (b) RMC 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
118
+ page_content='5 K 30 K 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
119
+ page_content='2 K 24 K 152 Hexamer cluster Hexamer cluster Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
120
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
121
+ page_content=' (Color online) (a) Q dependence of the magnetic scattering I(Q), integrated over the energy range 2-7 meV at different temperatures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
122
+ page_content=' Dashed lines are fitting curves calculated from the shell model (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
123
+ page_content=' (2)) with the first three nearest neighbors and flat backgrounds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
124
+ page_content=' Solid red lines show the results of structure factor calculations for hexagonal chromium rings at T < 20 K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
125
+ page_content=' (b) Real space spin-spin correlation functions ⟨S (0) · S (ri)⟩ versus r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
126
+ page_content=' Solid circles are obtained from the fits to the shell model in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
127
+ page_content=' 2(a), and the open triangles are obtained by the reverse Monte Carlo (RMC) simulation on the magnetic diffuse scattering observed in the elastic ND measurement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
128
+ page_content='2) Green stars mark the correlations for an isolated hexagonal antiferromagnetic loop (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
129
+ page_content=' 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
130
+ page_content=' netic nearest-neighbor spin-spin correlations, with progres- sively weaker alternating ferro- and antiferromagnetic cor- relations for the second and third nearest neighbors, respec- tively [Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
131
+ page_content=' 2(b)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
132
+ page_content=' The extracted correlations are thus con- sistent with the reverse Monte Carlo results presented in Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
133
+ page_content=' 2, where energy-integrated data from a diffractometer were used;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
134
+ page_content=' i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
135
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
136
+ page_content=', the true S (Q) was reflected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
137
+ page_content=' The temperature dependence of the parameters indicates smooth growth of the spin-spin correlations in the entire temperature range, as ex- pected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
138
+ page_content=' To analyze the temperature dependence of the quasi-elastic feature further, the imaginary part of the magnetic dynamic susceptibility χ′′ was calculated by applying the fluctuation dissipation theorem16) χ′′(Q, E) = π(1 − e− E kBT )S (Q, E) to the E dependence of the intensity integrated over the Q range 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
139
+ page_content='1 − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
140
+ page_content='9 Å−1 [Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
141
+ page_content=' 3(a)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
142
+ page_content=' At T > T f , the contribution of elas- tic scattering is subtracted by approximating it with a sharp Gaussian centered around E = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
143
+ page_content=' The obtained χ′′(ω) are well fit by a quasi-elastic Lorentzian χ′′(ω) = χ′ωΓ/(ω2 + Γ2), 20 15 10 5 0 χ" (arb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
144
+ page_content=' unit) 10 8 6 4 2 0 E transfer (meV) 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
145
+ page_content='2 K 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
146
+ page_content="4 K 14 K 18 K 24 K 50 K 75 K 152 K 12 10 8 6 4 2 0 Γ (meV) 160 120 80 40 0 T (K) 30 20 10 0 χ' (arb." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
147
+ page_content=' units) (a) (b) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
148
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
149
+ page_content=' (Color online) (a) Energy dependence of the dynamic susceptibil- ity χ′′(ω), integrated over the Q range 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
150
+ page_content='1-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
151
+ page_content='9 Å−1 for all measured tem- peratures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
152
+ page_content=' The elastic line was subtracted from each dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
153
+ page_content=' The solid lines are resolution-broadened quasi-elastic Lorentzian fits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
154
+ page_content=' (b) Temperature de- pendence of the inverse relaxation rate Γ and the static susceptibility χ’ as determined by quasi-elastic Lorentzian fitting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
155
+ page_content=' The solid and dotted lines are a linear and power-law curve fits to Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
156
+ page_content=' which is the time-Fourier transform of an exponential de- cay exp(−t/τ), with τ ∝ 1/Γ and χ′ the static susceptibil- ity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
157
+ page_content=' On cooling below 18 K, the Lorentzian fits become poor at E < 2 meV, indicating that the scattering is no longer de- scribed by a single relaxation process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
158
+ page_content=' This coincides with the appearance of a stretching exponent β < 1 in fits of the T1 re- laxation process,17) and thus is likely connected with the onset of critical fluctuations above T f .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
159
+ page_content=' Figure 3(b) shows the temperature dependence of the in- verse relaxation time Γ and the static susceptibility χ′ ex- tracted from the fits described above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
160
+ page_content=' Γ decreases smoothly in the temperature range 18 K, and is well described by a power law Γ ∝ T γ with γ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
161
+ page_content='66 (dashed line).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
162
+ page_content=' For Heisen- berg spins on the isotropic pyrochlore lattice, theory predicts Γ ∝ T (γ = 1);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
163
+ page_content='13,18,19) however, a linear fit to the data (solid line) is poor at high temperature, even permitting a nonzero intercept Γ0 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
164
+ page_content='09 meV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
165
+ page_content=' Although a similar reduction of γ has also been observed in ZnCr2O4 (γ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
166
+ page_content='81), the cause re- mains unclear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
167
+ page_content='3) Aside from this, χ′ is consistent with the bulk susceptibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
168
+ page_content=' Turning now to the form of S (Q, E) below T f shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
169
+ page_content=' 1(d), most of the high-temperature quasi-elastic scatter- ing shifts either towards the elastic line or to an inelastic fea- ture centered around 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
170
+ page_content='8 meV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
171
+ page_content=' The latter is similar to the “res- onance” observed in LiGaCr4O815) and other spinels, but is considerably broader in energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
172
+ page_content=' Like the resonance, however, its structure factor suggests local modes on small antiferro- magnetic spin loops.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
173
+ page_content=' An analysis of the reverse Monte Carlo spin configurations derived from fits to S (Q) in our previ- ous publication2) identifies these with a large number of six- membered hexagonal antiferromagnetic spin loops, as well as a few with eight or more members.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
174
+ page_content=' Indeed, the calculated structure factor for the hexagonal rings (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
175
+ page_content=' 4) agrees almost perfectly with that of the energy-integrated data in Figure 2(a), also accounting for the variation of ⟨S (0) · S (ri)⟩ versus r from the model-independent fits above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
176
+ page_content=' As shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
177
+ page_content=' 4, hexagonal antiferromagnetic spin loops are only possible in the presence of three types (colors) of collinear state on the Cr3+ tetrahedra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
178
+ page_content='5) By analogy with the coplanar nematic state in the kagome lattice antiferromagnet,20–22) collinear nematic states on the 3 J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
179
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
180
+ page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
181
+ page_content=' Jpn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
182
+ page_content=' LETTERS Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
183
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
184
+ page_content=' (Color online) Hexamer loop determined within the breathing py- rochlore lattice (cyan bonds).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
185
+ page_content=' Spheres represent Cr3+ ions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
186
+ page_content=' RBG coloring of the bonds and vertices of the tetrahedra corresponds to the bond ordering de- scribed in Refs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
187
+ page_content=' 2 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
188
+ page_content=' Cyan arrows represent antiferromagnetically coupled spins on the nodes of hexagonal cluster precessing around the easy direction of nematic phase (dashed black lines).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
189
+ page_content=' pyrochlore lattice support two types of loop excitations: (i) loop flips, which invert the moment directions around the loop, hence transforming one nematic ground state configura- tion to another, and (ii) “weathervane” modes, small displace- ments of the moment direction about the equilibrium direction [Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
190
+ page_content=' 4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
191
+ page_content=' The former, related to the diffusive high-temperature excitations, is expected to produce a quasi-elastic signal with a temperature-dependent width, and thus cannot account for the inelastic feature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
192
+ page_content=' As such, we tentatively assign the fea- ture to weathervane modes on the hexagonal loops.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
193
+ page_content=' Consid- ering only the bilinear term, the ground state criterion of two spins up and two down on each tetrahedron results in a zero net exchange field for the spins around the hexagon, and the weathervane modes therefore carry no energy cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
194
+ page_content=' When the biquadratic (magneto-elastic) and other long-ranged terms are included, however, they are lifted to finite energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
195
+ page_content=' In particu- lar, inserting the bilinear-biquadratic Hamiltonian (1) into the classical equation of motion dSi(t) dt = −1 ℏSi(t) × ∇Si(t)H (3) results in an energy gap ∆E ≃ 8bavS 3, where bav is the aver- age bilinear-biquadratic coupling constant between the small and large tetrahedra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
196
+ page_content=' In deriving this expression, we assumed that there is no coupling between the loops and S z i(t) ≃ S ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
197
+ page_content=' i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
198
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
199
+ page_content=', the spin displacements are small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
200
+ page_content=' From Jav = (J + J′)/2 = 45 K estimated from the magnetic susceptibility and the ex- perimental excitation energy, we obtain bav ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
201
+ page_content='05Jav, which is close to the b reported for related materials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
202
+ page_content='23) In addi- tion, using T f ≃ bS 4 for the isotropic pyrochlore lattice,24) bav ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
203
+ page_content='05Jav yields T f ∼ 12 K, which is in excellent agree- ment with experiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
204
+ page_content=' Now we address the large width of the feature relative to the much sharper features observed in other spinels: could this be due to the disorder inherent to the nematic state?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
205
+ page_content=' Below T f , the Cr-Cr bond lengths, and hence the biquadratic bond energies, are expected to follow a Gaussian distribution (is indeed found for the d-spacings in [2]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
206
+ page_content=' The resulting spec- trum is then broadened by σ(bav), the FWHM of the Gaus- sian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
207
+ page_content=' The experimental feature at 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
208
+ page_content='8 meV is approximately Gaussian, with an FWHM of ∼2 meV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
209
+ page_content=' To reproduce this, the distribution of mean Cr-Cr bond lengths around a spin loop is required to be ∼0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
210
+ page_content='1 Å wide, assuming a linear relationship between the exchange and the Cr-Cr distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
211
+ page_content=' This is larger by approximately a factor of 4 than the distribution estimated from Rietveld refinements, which, however, ignore any local structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
212
+ page_content=' The significant amount of inelastic and quasi-elastic spec- tral weight at energies above and below the 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
213
+ page_content='8 meV feature, may be associated with other excitations (also observed on the kagome lattice), including the loop flips mentioned above and longer-ranged spin-wave-like excitations (which may ex- tend to much higher energies), perhaps belonging to the short- range magnetic order superimposed on the nematic state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
214
+ page_content=' The long high-energy tail of the inelastic scattering, extending to ∼15 meV, is certainly compatible with the latter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
215
+ page_content=' Loop flips, on the other hand, are expected to give a quasi-elastic signal of width ∝ 1/ exp(−b/T).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
216
+ page_content=' Ultimately, single-crystal studies and spin dynamics simulations of the bilinear-biquadratic model with disorder on the present lattice will be required to disen- tangle all the contributions to the excitation spectrum in the nematic phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
217
+ page_content=' Looking beyond the breathing pyrochlores, many features of the LiGa1−xInxCr4O8 series are shared with the undistorted ZnxCd1−xCr2O4 family.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
218
+ page_content='8,25) Starting with the x − T phase di- agrams, the introduction of bond disorder by even vestigial doping is found to lead to the suppression of the N´eel phase and adoption of a disordered frozen state at small x in both cases, as also observed in Monte Carlo simulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
219
+ page_content='24) The persistence of a sharp phase transition in the specific heat, despite glassy behavior in the magnetic susceptibility, is also common to both systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
220
+ page_content=' These commonalities suggest the intriguing possibility that ZnxCd1−xCr2O4 with x < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
221
+ page_content='1 and other similar systems also exhibit nematic transitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
222
+ page_content='24) Comparing the x = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
223
+ page_content='05 compositions of both families, In0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
224
+ page_content='05 and Cd0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
225
+ page_content='05, the form of the scattering is at first glance nearly identical above and below the transitions at T f .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
226
+ page_content=' How- ever, the dynamic susceptibility χ′′(E) of In0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
227
+ page_content='05 is describable using only one relaxation rate down to 18 K ∼ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
228
+ page_content='6T f , while that of Cd0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
229
+ page_content='05 requires a distribution of relaxation rates already below 4T f .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
230
+ page_content='8) This is indicative of a stronger doping effect in the latter case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
231
+ page_content=' In regard to the gap in S (Q, E) at T < T f , ∆E is 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
232
+ page_content='5 meV in Cd0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
233
+ page_content='05 versus 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
234
+ page_content='8 meV in In0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
235
+ page_content='05, giving a ratio close to that of the exchange couplings in the two systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
236
+ page_content=' Given the similar b/J, this could point to a similar physical origin for the gap.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
237
+ page_content=' On the other hand, non-collinearity or strong further neighbor couplings could also generate a nonzero exchange field around a hexagon, and the former is thought to be fa- vored by bond disorder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
238
+ page_content='26) Indeed, flat features in the inelastic scattering are also observed in Y2Ru2O7 and ZnCr2O4, where non-collinear orders have been proposed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
239
+ page_content=' We finally note that although inelastic resonances have been interpreted as quantum two-level excitations in the past,4) they should not be considered as such in the present case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
240
+ page_content=' This is because the singlet-triplet gap is rapidly sup- pressed by both further neighbor couplings and a negative bi- quadratic exchange.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
241
+ page_content=' In addition to this, none of the expected higher multiplets are observed at any temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
242
+ page_content=' We have presented an inelastic neutron scattering study of the spin dynamics in the classical spin nematic mate- rial LiGa0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
243
+ page_content='95In0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
244
+ page_content='05Cr4O8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
245
+ page_content=' The high-temperature dynamics are 4 J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
246
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
247
+ page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
248
+ page_content=' Jpn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
249
+ page_content=' LETTERS quasi-elastic and resemble those observed in other pyrochlore systems, while the excitation spectrum below the transition at T f = 11 K is dominated by a broad, non-dispersive inelastic feature at 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
250
+ page_content='8 meV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
251
+ page_content=' A plausible origin for this feature mode is the so-called weathervane modes on hexagonal antiferromag- netic loops (abundant in the nematic state), which are lifted to finite energy by the biquadratic term that induces the ne- matic order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
252
+ page_content=' Possible collective excitations with a bandwidth of 15 meV are also observed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
253
+ page_content=' In order to verify this interpreta- tion, more detailed spin dynamics simulations of the bilinear- biquadratic model on the breathing pyrochlore lattice will be required.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
254
+ page_content=' Acknowledgments We thank Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
255
+ page_content=' Motome, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
256
+ page_content=' Shinaoka and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
257
+ page_content=' Gingras for fruitful discussions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
258
+ page_content=' This work was supported by JSPS KAKENHI (Grant Nos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
259
+ page_content=' 25287083 and 16J01077).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
260
+ page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
261
+ page_content='T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
262
+ page_content=' was supported by the JSPS through the Program for Leading Graduate Schools (MERIT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
263
+ page_content=' 1) R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
264
+ page_content=' Moessner: Can.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
265
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
266
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
267
+ page_content=' 79 (2001) 1283.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
268
+ page_content=' 2) R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
269
+ page_content=' Wawrzy´nczak, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
270
+ page_content=' Tanaka, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
271
+ page_content=' Yoshida, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
272
+ page_content=' Okamoto, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
273
+ page_content=' Manuel, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
274
+ page_content=' Casati, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
275
+ page_content=' Hiroi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
276
+ page_content=' Takigawa, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
277
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
278
+ page_content=' Nilsen: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
279
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
280
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
281
+ page_content=' 119 (2017) 087201.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
282
+ page_content=' 3) S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
283
+ page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
284
+ page_content=' Lee, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
285
+ page_content=' Broholm, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
286
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
287
+ page_content=' Kim, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
288
+ page_content=' Ratcliff, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
289
+ page_content='-W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
290
+ page_content=' Cheong: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
291
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
292
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
293
+ page_content=' 84 (2000) 3718.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
294
+ page_content=' 4) K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
295
+ page_content=' Tomiyasu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
296
+ page_content=' Ueda, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
297
+ page_content=' Matsuda, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
298
+ page_content=' Yokoyama, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
299
+ page_content=' Iwasa, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
300
+ page_content=' Yamada: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
301
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
302
+ page_content=' B 84 (2011) 035115.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
303
+ page_content=' 5) O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
304
+ page_content=' Tchernyshyov, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
305
+ page_content=' Moessner, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
306
+ page_content=' Sondhi: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
307
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
308
+ page_content=' B 66 (2002) 064403.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
309
+ page_content=' 6) N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
310
+ page_content=' Shannon, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
311
+ page_content=' Penc, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
312
+ page_content=' Motome: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
313
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
314
+ page_content=' B 81 (2010) 184409.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
315
+ page_content=' 7) K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
316
+ page_content=' Tomiyasu, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
317
+ page_content=' Yokobori, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
318
+ page_content=' Kousaka, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
319
+ page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
320
+ page_content=' Bewley, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
321
+ page_content=' Guidi, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
322
+ page_content=' Watanabe, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
323
+ page_content=' Akimitsu, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
324
+ page_content=' Yamada: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
325
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
326
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
327
+ page_content=' 110 (2013) 077205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
328
+ page_content=' 8) W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
329
+ page_content=' Ratcliff, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
330
+ page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
331
+ page_content=' Lee, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
332
+ page_content=' Broholm, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
333
+ page_content='-W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
334
+ page_content=' Cheong, and Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
335
+ page_content=' Huang: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
336
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
337
+ page_content=' B 65 (2002) 220406.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
338
+ page_content=' 9) K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
339
+ page_content=' Tomiyasu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
340
+ page_content=' Suzuki, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
341
+ page_content=' Toki, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
342
+ page_content=' Itoh, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
343
+ page_content=' Matsuura, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
344
+ page_content=' Aso, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
345
+ page_content=' Yamada: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
346
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
347
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
348
+ page_content=' 101 (2008) 177401.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
349
+ page_content=' 10) Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
350
+ page_content=' Okamoto, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
351
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
352
+ page_content=' Nilsen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
353
+ page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
354
+ page_content=' Attfield, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
355
+ page_content=' Hiroi: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
356
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
357
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
358
+ page_content=' 110 (2013) 097203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
359
+ page_content=' 11) Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
360
+ page_content=' Tanaka, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
361
+ page_content=' Yoshida, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
362
+ page_content=' Takigawa, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
363
+ page_content=' Okamoto, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
364
+ page_content=' Hiroi: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
365
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
366
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
367
+ page_content=' 113 (2014) 227204.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
368
+ page_content=' 12) Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
369
+ page_content=' Okamoto, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
370
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
371
+ page_content=' Nilsen, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
372
+ page_content=' Nakazano, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
373
+ page_content=' Hiroi: J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
374
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
375
+ page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
376
+ page_content=' Jpn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
377
+ page_content=' 84 (2015) 043707.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
378
+ page_content=' 13) P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
379
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
380
+ page_content=' Conlon and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
381
+ page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
382
+ page_content=' Chalker: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
383
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
384
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
385
+ page_content=' 102 (2009) 237206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
386
+ page_content=' 14) G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
387
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
388
+ page_content=' Nilsen, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
389
+ page_content=' Okamoto, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
390
+ page_content=' Masuda, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
391
+ page_content=' Rodriguez-Carvajal, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
392
+ page_content=' Mutka, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
393
+ page_content=' Hansen, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
394
+ page_content=' Hiroi: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
395
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
396
+ page_content=' B 91 (2015) 174435.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
397
+ page_content=' 15) G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
398
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
399
+ page_content=' Nilsen, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
400
+ page_content=' Okamoto, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
401
+ page_content=' Tassel, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
402
+ page_content=' Masuda, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
403
+ page_content=' Mutka, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
404
+ page_content=' Hiroi, ILL Experimental Report 5-31-2275, Available at: https://userclub.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
405
+ page_content='ill.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
406
+ page_content='eu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
407
+ page_content=' 16) S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
408
+ page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
409
+ page_content=' Lovesey: Theory of neutron scattering from condensed matter (Clarendon Press, 1984).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
410
+ page_content=' 17) See the supplementary materials of Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
411
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
412
+ page_content=' 18) R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
413
+ page_content=' Moessner and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
414
+ page_content=' Chalker: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
415
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
416
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
417
+ page_content=' 80 (1998) 2929.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
418
+ page_content=' 19) J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
419
+ page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
420
+ page_content=' Reimers, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
421
+ page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
422
+ page_content=' Greedan, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
423
+ page_content=' Bj¨orgvinsson: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
424
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
425
+ page_content=' B 45 (1992) 7295.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
426
+ page_content=' 20) J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
427
+ page_content=' von Delft and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
428
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
429
+ page_content=' Henley: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
430
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
431
+ page_content=' B 48 (1993) 965.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
432
+ page_content=' 21) M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
433
+ page_content=' Taillefumier, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
434
+ page_content=' Robert, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
435
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
436
+ page_content=' Henley, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
437
+ page_content=' Moessner, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
438
+ page_content=' Canals: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
439
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
440
+ page_content=' B 90 (2014) 064419.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
441
+ page_content=' 22) Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
442
+ page_content=' Wan and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
443
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
444
+ page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
445
+ page_content=' Gingras: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
446
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
447
+ page_content=' B 94 (2016) 174417.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
448
+ page_content=' 23) A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
449
+ page_content=' Miyata, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
450
+ page_content=' Ueda, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
451
+ page_content=' Ueda, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
452
+ page_content=' Motome, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
453
+ page_content=' Shannon, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
454
+ page_content=' Penc, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
455
+ page_content=' Takeyama: J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
456
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
457
+ page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
458
+ page_content=' Jpn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
459
+ page_content=' 80 (2011) 074709.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
460
+ page_content=' 24) H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
461
+ page_content=' Shinaoka, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
462
+ page_content=' Tomita, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
463
+ page_content=' Motome: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
464
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
465
+ page_content=' B 90 (2014) 165119.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
466
+ page_content=' 25) H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
467
+ page_content=' Martinho, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
468
+ page_content=' O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
469
+ page_content=' Moreno, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
470
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
471
+ page_content=' Sanjurjo, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
472
+ page_content=' Rettori, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
473
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
474
+ page_content=' Garc´ıa- Adeva, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
475
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
476
+ page_content=' Huber, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
477
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
478
+ page_content=' Oseroff, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
479
+ page_content=' Ratcliff, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
480
+ page_content='-W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
481
+ page_content=' Cheong, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
482
+ page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
483
+ page_content=' Pagliuso, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
484
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
485
+ page_content=' Sarrao, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
486
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
487
+ page_content=' Martins: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
488
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
489
+ page_content=' B 64 (2001) 024408.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
490
+ page_content=' 26) L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
491
+ page_content=' Bellier-Castella, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
492
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
493
+ page_content=' Gingras, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
494
+ page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
495
+ page_content=' Holdsworth, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
496
+ page_content=' Moessner: Canadian Journal of Physics 79 (2001) 1365.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
497
+ page_content=' 5' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE4T4oBgHgl3EQfagwY/content/2301.05064v1.pdf'}
2dE1T4oBgHgl3EQfAAKW/content/2301.02834v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b30bfce7d822ed24c8723a8f040b25ca1fc1e884c60c387acee94929a265604
3
+ size 333034
2dE1T4oBgHgl3EQfAAKW/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f945d5f879d5754ab718969ec2dcf9b8d208b17b435ef1846a5009a8db3aaba2
3
+ size 3407917
2dE1T4oBgHgl3EQfAAKW/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6654267fbde6822facf9afcf1a69d770f377d1fca50c4d30dd0abe5402cb8aa4
3
+ size 110042
3dFQT4oBgHgl3EQf3TZi/content/tmp_files/2301.13427v1.pdf.txt ADDED
@@ -0,0 +1,1617 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.13427v1 [math.OC] 31 Jan 2023
2
+ Disciplined Saddle Programming
3
+ Philipp Schiele∗1, Eric Luxenberg∗2, and Stephen Boyd2
4
+ 1Department of Statistics, Ludwig-Maximilians-Universit¨at M¨unchen
5
+ 2Department of Electrical Engineering, Stanford University
6
+ February 1, 2023
7
+ Abstract
8
+ We consider convex-concave saddle point problems, and more generally convex opti-
9
+ mization problems we refer to as saddle problems, which include the partial supremum
10
+ or infimum of convex-concave saddle functions. Saddle problems arise in a wide range
11
+ of applications, including game theory, machine learning, and finance. It is well known
12
+ that a saddle problem can be reduced to a single convex optimization problem by dual-
13
+ izing either the convex (min) or concave (max) objectives, reducing a min-max problem
14
+ into a min-min (or max-max) problem. Carrying out this conversion by hand can be
15
+ tedious and error prone. In this paper we introduce disciplined saddle programming
16
+ (DSP), a domain specific language (DSL) for specifying saddle problems, for which the
17
+ dualizing trick can be automated. The language and methods are based on recent work
18
+ by Juditsky and Nemirovski [JN22], who developed the idea of conic-representable sad-
19
+ dle point programs, and showed how to carry out the required dualization automatically
20
+ using conic duality. Juditsky and Nemirovski’s conic representation of saddle problems
21
+ extends Nesterov and Nemirovski’s earlier development of conic representable convex
22
+ problems; DSP can be thought of as extending disciplined convex programming (DCP)
23
+ to saddle problems. Just as DCP makes it easy for users to formulate and solve com-
24
+ plex convex problems, DSP allows users to easily formulate and solve saddle problems.
25
+ Our method is implemented in an open-source package, also called DSP.
26
+ ∗Equal contribution.
27
+ 1
28
+
29
+ Contents
30
+ 1
31
+ Introduction
32
+ 3
33
+ 1.1
34
+ Previous and related work . . . . . . . . . . . . . . . . . . . . . . . . . . . .
35
+ 4
36
+ 1.2
37
+ Outline . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
38
+ 5
39
+ 2
40
+ Saddle programming
41
+ 5
42
+ 2.1
43
+ Saddle functions . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
44
+ 5
45
+ 2.2
46
+ Saddle point problems
47
+ . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
48
+ 7
49
+ 2.3
50
+ Saddle extremum functions . . . . . . . . . . . . . . . . . . . . . . . . . . . .
51
+ 7
52
+ 2.4
53
+ Saddle problems . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
54
+ 8
55
+ 2.5
56
+ Solving saddle problems
57
+ . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
58
+ 9
59
+ 2.6
60
+ Dual reduction
61
+ . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
62
+ 10
63
+ 3
64
+ Applications
65
+ 10
66
+ 3.1
67
+ Robust bond portfolio construction . . . . . . . . . . . . . . . . . . . . . . .
68
+ 11
69
+ 3.2
70
+ Model fitting robust to data weights . . . . . . . . . . . . . . . . . . . . . . .
71
+ 11
72
+ 3.3
73
+ Robust production problem with worst case prices . . . . . . . . . . . . . . .
74
+ 12
75
+ 3.4
76
+ Robust Markowitz portfolio construction . . . . . . . . . . . . . . . . . . . .
77
+ 13
78
+ 4
79
+ Disciplined saddle point programming
80
+ 14
81
+ 4.1
82
+ Saddle function calculus
83
+ . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
84
+ 14
85
+ 4.2
86
+ Conically representable saddle functions
87
+ . . . . . . . . . . . . . . . . . . . .
88
+ 14
89
+ 5
90
+ Implementation
91
+ 16
92
+ 5.1
93
+ Atoms . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
94
+ 16
95
+ 5.2
96
+ Calculus rules . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
97
+ 18
98
+ 5.3
99
+ Saddle point problems
100
+ . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
101
+ 19
102
+ 5.4
103
+ Saddle extremum functions . . . . . . . . . . . . . . . . . . . . . . . . . . . .
104
+ 21
105
+ 5.5
106
+ Saddle problems . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
107
+ 22
108
+ 6
109
+ Examples
110
+ 23
111
+ 6.1
112
+ Robust bond portfolio construction . . . . . . . . . . . . . . . . . . . . . . .
113
+ 23
114
+ 6.2
115
+ Model fitting robust to data weights . . . . . . . . . . . . . . . . . . . . . . .
116
+ 25
117
+ 6.3
118
+ Robust Markowitz portfolio construction . . . . . . . . . . . . . . . . . . . .
119
+ 27
120
+ 2
121
+
122
+ 1
123
+ Introduction
124
+ We consider saddle problems, by which we mean convex-concave saddle point problems or,
125
+ more generally, convex optimization problems that include the partial supremum or infimum
126
+ of convex-concave saddle functions. Saddle problems arise in various fields such as game
127
+ theory, robust and minimax optimization, machine learning, and finance.
128
+ While there are algorithms specifically designed to solve some types of saddle point or
129
+ minimax problems, another approach is to convert them into standard convex optimization
130
+ problems using a trick based on duality that can be traced back to at least the 1920s. The
131
+ idea is to express the infima or suprema that appear in the saddle problem via their duals,
132
+ which converts them to suprema or infima, respectively. Roughly speaking, this turns a min-
133
+ max problem into a min-min (or max-max) problem, which can then be solved by standard
134
+ methods. Specific cases of this trick are well known; the classical example is converting a
135
+ matrix game, a specific saddle point problem, into a linear program (LP) [MVN53]. While
136
+ the dualizing trick has been known and used for almost 100 years, it has always been done
137
+ by hand, for specific problems. It can only be carried out by those who have a working
138
+ knowledge of duality in convex optimization, and are aware of the trick.
139
+ In this paper we propose an automated method for carrying out the dualizing trick. Our
140
+ method is based on the theory of conic representation of saddle point problems, developed
141
+ recently by Juditsky and Nemirovski [JN22]. Based on this development, we have designed
142
+ a domain specific language (DSL) for describing saddle problems, which we refer to as dis-
143
+ ciplined saddle programming (DSP). When a problem description complies with the syntax
144
+ rules, i.e., is DSP-compliant, it is easy to verify that it is a valid saddle problem, and more
145
+ importantly, automatically carry out the dualizing trick. We have implemented the DSL in
146
+ an open source software package, also called DSP, which works with CVXPY [DB16], a DSL
147
+ for specifying and solving convex optimization problems. DSP makes it easy to specify and
148
+ solve saddle problems, without any expertise in (or even knowledge of) convex duality. Even
149
+ for those with the required expertise to carry out the dualizing trick by hand, DSP is less
150
+ tedious and error prone.
151
+ DSP is disciplined, meaning it is based on a small number of syntax rules that, if followed,
152
+ guarantee that the specified problem is a valid saddle problem. It is analogous to disciplined
153
+ convex programming (DCP) [GBY06], which is a DSL for specifying convex optimization
154
+ problems. When a problem specification follows these syntax rules, i.e., is DCP-compliant, it
155
+ is a valid convex optimization problem, and more importantly can be automatically converted
156
+ to an equivalent cone program, and then solved. As a practical matter, DCP allows a large
157
+ number of users to specify and solve even complex convex optimization problems, with no
158
+ knowledge of the reduction to cone form. Indeed, most DCP users are blissfully unaware
159
+ of how their problems are solved, i.e., a reduction to cone form. DCP was based on the
160
+ theory of conic representations of convex functions and problems, pioneered by Nesterov
161
+ and Nemirovski [NN92]. Widely used implementations of DCP include CVXPY [DB16],
162
+ Convex.jl [Ude+14], CVXR [FNB20], YALMIP [Lof04], and CVX [GB14]. Like DCP did for
163
+ convex problems, DSP makes it easy to specify and solve saddle problems, with most users
164
+ unaware of the dualization trick and reduction used to solve their problems.
165
+ 3
166
+
167
+ 1.1
168
+ Previous and related work
169
+ Saddle problems.
170
+ Studying saddle problems is a long-standing area of research, resulting
171
+ in many theoretical insights, numerous algorithms for specific classes of problems, and a
172
+ large number of applications.
173
+ Saddle problems are often studied in the context of minimax or maximin optimization
174
+ [DM90; DP95], which, while dating back to the 1920s and the work of von Neumann and
175
+ Morgenstern on game theory [MVN53], continue to be active areas of research, with many
176
+ recent advancements for example in machine learning [Goo+14]. A variety of methods have
177
+ been developed for solving saddle point problems, including interior point methods [HT03;
178
+ Nem99], first-order methods [Kor76; Nem04; Nes07; NO09; CLO13], and second-order meth-
179
+ ods [NP06; Nes08], where many of these methods are specialized to specific classes of saddle
180
+ problems. Depending on the class of saddle problem, the methods differ in convergence rate.
181
+ For example, for the subset of smooth minimax problems, an overview of rates for different
182
+ curvature assumptions is given in [The+19]. Due to their close relation to Lagrange duality,
183
+ saddle problems are commonly studied in the context of convex analysis (see, for example,
184
+ [BV04, §5.4], [Roc70, §33–37], [RW09, §11.J], [BL06, §4.3]), with an analysis via monotone
185
+ operators given in [RY22].
186
+ The practical usefulness of saddle programming in many applications is also increas-
187
+ ingly well known. Many applications of saddle programming are robust optimization prob-
188
+ lems [BBC11; BTEGN09]. For example, in statistics, distributionally robust models can be
189
+ used when the true distribution of the data generating process is not known [DA19]. Another
190
+ common area of application is in finance, with [CPT18, §19.3–4] describing a range of finan-
191
+ cial applications that can be characterized as saddle problems. Similarly, [Boy+17; GI03;
192
+ LB00] describe variations of the classical portfolio optimization problem as saddle problems.
193
+ Disciplined convex programming.
194
+ DCP is a grammar for constructing optimization
195
+ problems that are provably convex, meaning that they can be solved globally, efficiently
196
+ and accurately.
197
+ It is based on the rule that the convexity of a function f is preserved
198
+ under composition if all inner expressions in arguments where f is nondecreasing are convex,
199
+ and all expressions where f is nonincreasing are concave, and all other expressions are
200
+ affine. A detailed description of the composition rule is given in [BV04, §3.2.4]. Using this
201
+ rule, functions can be composed from a small set of primitives, called atoms, where each
202
+ atom has known curvature, sign, and monotonicity. Every function that can be constructed
203
+ from these atoms according to the composition rule is convex, but the converse is not true.
204
+ The DCP framework has been implemented in many programming languages, including
205
+ MATLAB [GB14; Lof04], Python [DB16], R [FNB20], and Julia [Ude+14], and is used by
206
+ researchers and practitioners in a wide range of fields.
207
+ Well-structured convex-concave saddle point problems.
208
+ As mentioned earlier, dis-
209
+ ciplined saddle programming is based on Juditsky and Nemirovski’s recent work on well-
210
+ structured convex-concave saddle point problems [JN22].
211
+ 4
212
+
213
+ 1.2
214
+ Outline
215
+ In §2 we describe saddle programming, which includes the classical saddle point problem, as
216
+ well as convex problems that include functions described via partial minimization or maxi-
217
+ mization of a saddle function. We describe some typical applications of saddle programming
218
+ in §3. In §4 we describe disciplined saddle programming, which is a way to specify saddle
219
+ programs in such a way that validity is easy to verify, and the reduction to an equivalent
220
+ cone program can be automated. We describe our implementation in §5, showing how sad-
221
+ dle functions, saddle extremum functions, saddle point problems, and saddle problems are
222
+ specified. We present numerical examples in §6.
223
+ 2
224
+ Saddle programming
225
+ 2.1
226
+ Saddle functions
227
+ A saddle function (also referred to as a convex-concave saddle function) f : X × Y → R
228
+ is one for which f(·, y) is convex for any fixed y ∈ Y, and f(x, ·) is concave for any fixed
229
+ x ∈ X . The argument domains X ⊆ Rn and Y ⊆ Rm must be nonempty closed convex. We
230
+ refer to x as the convex variable, and y as the concave variable, of the saddle function f.
231
+ Examples.
232
+ • Functions of x or y alone. A convex function of x, or a concave function of y, are
233
+ trivial examples of saddle functions.
234
+ • Lagrangian of a convex optimization problem. The convex optimization problem
235
+ minimize
236
+ f0(x)
237
+ subject to
238
+ Ax = b,
239
+ fi(x) ≤ 0,
240
+ i = 1, . . . , m,
241
+ with variable x ∈ Rn, where f0, . . . , fm are convex and A ∈ Rp×n, has Lagrangian
242
+ L(x, ν, λ) = f(x) + νT (Ax − b) + λ1f1(x) + · · · + λnfm(x),
243
+ for λ ≥ 0 (elementwise). It is convex in x and affine (and therefore also concave) in
244
+ y = (ν, λ), so it is a saddle function with
245
+ X =
246
+
247
+ i=0,...,m
248
+ dom fi,
249
+ Y = Rm
250
+ + × Rp,
251
+ • Bi-affine function. The function f(x, y) = (Ax + b)T(Cy + d), with X = Rp and
252
+ Y = Rq, is evidently a saddle function. The inner product xT y is a special case of
253
+ a bi-affine function. For a bi-affine function, either variable can serve as the convex
254
+ variable, with the other serving as the concave variable.
255
+ 5
256
+
257
+ • Convex-concave inner product. The function f(x, y) = F(x)TG(y), where F : Rp →
258
+ Rn is a nonnegative elementwise convex function and G : Rq → Rn is a nonnegative
259
+ elementwise concave function.
260
+ • Weighted ℓ2 norm. The function
261
+ f(x, y) =
262
+ � n
263
+
264
+ i=1
265
+ yix2
266
+ i
267
+ �1/2
268
+ ,
269
+ with X = Rn and Y = Rn
270
+ +, is a saddle function.
271
+ • Weighted log-sum-exp. The function
272
+ f(x, y) = log
273
+ � n
274
+
275
+ i=1
276
+ yi exp xi
277
+
278
+ ,
279
+ with X = Rn and Y = Rn
280
+ +, is a saddle function.
281
+ • Weighted geometric mean. The function f(x, y) = �n
282
+ i=1 yxi
283
+ i , with X = Rn
284
+ + and Y =
285
+ Rn
286
+ +, is a saddle function.
287
+ • Quadratic form with quasi-semidefinite matrix. The function
288
+ f(x, y) =
289
+
290
+ x
291
+ y
292
+ �T �
293
+ P
294
+ S
295
+ ST
296
+ Q
297
+ � �
298
+ x
299
+ y
300
+
301
+ ,
302
+ where the matrix is quasi-semidefinite, i.e., P ∈ Sn
303
+ + (the set of symmetric positive
304
+ semidefinite matrices) and −Q ∈ Sn
305
+ +.
306
+ • Quadratic form. The function f(x, Y ) = xTY x, with X = Rn and Y = Sn
307
+ + (the set of
308
+ symmetric positive semidefinite n × n matrices), is a saddle function.
309
+ • As more esoteric example, the function f(x, Y ) = xTY 1/2x, with X = Rn and Y = Sn
310
+ +,
311
+ is a saddle function.
312
+ Combination rules.
313
+ Saddle functions can be combined in several ways to yield saddle
314
+ functions. For example the sum of two saddle functions is a saddle function, provided the
315
+ domains have nonempty intersection. A saddle function scaled by a nonnegative scalar is
316
+ a saddle function. Scaling a saddle function with a nonpositive scalar, and swapping its
317
+ arguments, yields a saddle function: g(x, y) = −f(y, x) is a saddle function provided f is.
318
+ Saddle functions are preserved by pre-composition of the convex and concave variables with
319
+ an affine function, i.e., if f is a saddle function, so is f(Ax+ b, Cx+ d). Indeed, the bi-affine
320
+ function is just the inner product with an affine pre-composition for each of the convex and
321
+ concave variables.
322
+ 6
323
+
324
+ 2.2
325
+ Saddle point problems
326
+ A saddle point (x⋆, y⋆) ∈ X × Y is any point that satisfies
327
+ f(x⋆, y) ≤ f(x⋆, y⋆) ≤ f(x, y⋆) for all x ∈ X , y ∈ Y.
328
+ (1)
329
+ In other words, x⋆ minimizes f(x, y⋆) over x ∈ X , and y⋆ maximizes f(x⋆, y) over y ∈ Y.
330
+ The basic saddle point problem is to find such a saddle point,
331
+ find x⋆, y⋆ which satisfy (1).
332
+ (2)
333
+ The value of the saddle point problem is f(x⋆, y⋆).
334
+ Existence of a saddle point for a saddle function is guaranteed, provided some technical
335
+ conditions hold. For example, Sion’s theorem [Sio58] guarantees the existence of a saddle
336
+ point when Y is compact. There are many other cases.
337
+ Examples.
338
+ • Matrix game. In a matrix game, player one chooses i ∈ {1, . . . , m}, and player two
339
+ chooses j ∈ {1, . . . , n}, resulting in player one paying player two the amount Cij. Player
340
+ one wants to minimize this payment, while player two wishes to maximize it. In a mixed
341
+ strategy, player one makes choices at random, from probabilities given by x and player
342
+ two makes independent choices with probabilities given by y. The expected payment
343
+ from player one to player two is then f(x, y) = xT Cy. With X = {x | x ≥ 0, 1Tx = 1},
344
+ and similarly for Y, a saddle point corresponds to an equilibrium, where no player can
345
+ improve her position by changing (mixed) strategy. The saddle point problem consists
346
+ of finding a stable equilibrium, i.e., an optimal mixed strategy for each player.
347
+ • Lagrangian. A saddle point of a Lagrangian of a convex optimization problem is a
348
+ primal-dual optimal pair for the convex optimization problem.
349
+ 2.3
350
+ Saddle extremum functions
351
+ Suppose f is a saddle function. The function G : X → R ∪ {∞} defined by
352
+ G(x) = sup
353
+ y∈Y
354
+ f(x, y),
355
+ x ∈ X ,
356
+ (3)
357
+ is called a saddle max function. Similarly, the function H : Y → R ∪ {−∞} defined by
358
+ H(x) = inf
359
+ x∈X f(x, y),
360
+ y ∈ Y,
361
+ (4)
362
+ is called a saddle min function. Saddle max functions are convex, and saddle min functions
363
+ are concave. We will use the term saddle extremum (SE) functions to refer to saddle max
364
+ or saddle min functions. Which is meant is clear from context, i.e., whether it is defined by
365
+ minimization (infimum) or maximization (supremum), or its curvature (convex or concave).
366
+ Note that in SE functions, we always maximize (or take supremum) over the concave variable,
367
+ and minimize (or take infimum) over the convex variable. This means that evaluating G(x)
368
+ os H(y) involves solving a convex optimization problem.
369
+ 7
370
+
371
+ Examples.
372
+ • Dual function. Minimizing a Lagrangian L(x, ν, λ) over x gives the dual function of
373
+ the original convex optimization problem.
374
+ • Maximizing a Lagrangian L(x, ν, λ) over y = (ν, λ) gives the objective function re-
375
+ stricted to the feasible set.
376
+ • Conjugate of a convex function. Suppose f is convex. Then g(x, y) = f(x) − xTy is a
377
+ saddle function, the Lagrangian of the problem of minimizing f subject to x = 0. Its
378
+ saddle min is the negative conjugate function: infx g(x, y) = −f ∗(y).
379
+ • Sum of k largest entries. Consider f(x, y) = xTy, with Y = {y | 0 ≤ y ≤ 1, 1Ty = k}.
380
+ The associated saddle max function G is the sum of the k largest entries of x.
381
+ Saddle points via SE functions.
382
+ A pair (x⋆, y⋆) is a saddle point of a saddle function f
383
+ if and only if x⋆ minimizes the convex SE function G in (3) over x ∈ X , and y⋆ maximizes
384
+ the concave SE function H defined in (4) over y ∈ Y. This means that we can find saddle
385
+ points, i.e., solve the saddle point problem (2), by solving the convex optimization problem
386
+ minimize
387
+ G(x)
388
+ subject to
389
+ x ∈ X ,
390
+ (5)
391
+ with variable x, and the convex optimization problem
392
+ maximize
393
+ H(y)
394
+ subject to
395
+ y ∈ Y,
396
+ (6)
397
+ with variable y. The problem (5) is called a minimax problem, since we are minimizing a
398
+ function defined as the maximum over another variable. The problem (6) is called a maximin
399
+ problem.
400
+ While the minimax problem (5) and maximin problem (6) are convex, they cannot be
401
+ directly solved by conventional methods, since the objectives themselves are defined by max-
402
+ imization and minimization, respectively. There are solution methods specifically designed
403
+ for minimax and maximin problems [LJJ20; MB09], but as we will see minimax problems
404
+ involving SE functions can be transformed to equivalent forms that can be directly solved
405
+ using conventional methods.
406
+ 2.4
407
+ Saddle problems
408
+ In this paper we consider convex optimization problems that include SE functions in the
409
+ objective or constraints, which we refer to as saddle problems. The convex problems that
410
+ solve the basic saddle point problem (5) and (6) are special cases, where the objective is an
411
+ 8
412
+
413
+ SE function. As another example consider the problem of minimizing a convex function φ
414
+ subject to the convex SE constraint H(y) ≤ 0, which can be expressed as
415
+ minimize
416
+ φ(x)
417
+ subject to
418
+ f(x, y) ≤ 0 for all y ∈ Y,
419
+ (7)
420
+ with variable x. The constraint here is called a semi-infinite constraint, since (when Y is not
421
+ a singleton) it can be thought of as an infinite collection of convex constraints, one for each
422
+ y ∈ Y [HK93].
423
+ Saddle problems include the minimax and maximin problems (that can be used to solve
424
+ the saddle point problem), and semi-infinite problems that involve SE functions. There are
425
+ many other examples of saddle problems, where SE functions can appear in expressions that
426
+ define the objective and constraints.
427
+ Robust cost LP.
428
+ As a more specific example of a saddle problem consider the linear
429
+ program with robust cost,
430
+ minimize
431
+ supc∈C cTx
432
+ subject to
433
+ Ax = b,
434
+ x ≥ 0,
435
+ (8)
436
+ with variable x ∈ Rn, with C = {c | Fc ≤ g}. This is an LP with worst case cost over the
437
+ polyhedron C [BBC11; BTEGN09]. This is a saddle problem with convex variable x, concave
438
+ variable y, and an objective which is a saddle max function.
439
+ 2.5
440
+ Solving saddle problems
441
+ Special cases with tractable analytical expressions.
442
+ There are cases where an SE
443
+ function can be worked out analytically. An example is the max of a linear function over a
444
+ box,
445
+ sup
446
+ l≤y≤u
447
+ yTx, = (1/2)(u + l)Tx + (1/2)(u − l)T|x|,
448
+ where the absolute value is elementwise. We will see other cases in our examples.
449
+ Subgradient methods.
450
+ We can readily compute a subgradient of a saddle max function
451
+ (or a supergradient of a saddle min function), by simply maximizing over the concave variable
452
+ (minimizing over the convex variable), which is itself a convex optimization problem. We can
453
+ then use any method to solve the saddle problem using these subgradients, e.g., subgradient-
454
+ type methods, ellipsoid method, or localization methods such as the analytic center cutting
455
+ plane method. In [MB09] such an approach is used for general minimax problems.
456
+ Methods for specific forms.
457
+ Many methods have been developed for finding saddle
458
+ points of saddle functions with the special form
459
+ f(x, y) = xTKy + φ(x) + ψ(y),
460
+ 9
461
+
462
+ where φ is convex, ψ is concave, and K is a matrix [BS15; Con13; CP11; Nes05a; Nes05b;
463
+ CP16]. Beyond this example, there are many other special forms of saddle functions, with
464
+ different methods adapted to properties such as smoothness, separability, and strong-convex-
465
+ strong-concavity.
466
+ 2.6
467
+ Dual reduction
468
+ A well-known trick can be used to transform a saddle point problem into an equivalent prob-
469
+ lem that does not contain SE functions. This method of transforming an inner minimization
470
+ is not new; it has been used since the 1950s when Von Neumann proved the minimax the-
471
+ orem using strong duality in his work with Morgenstern on game theory [MVN53]. Using
472
+ this observation, he showed that the minimax problem of a two player game is equivalent
473
+ to an LP. Duality allows us to express the convex (concave) SE function as an infimum
474
+ (supremum), which facilitates the use of standard convex optimization. We think of this as
475
+ a reduction to an equivalent problem that removes the SE functions from the objective and
476
+ constraints.
477
+ Robust cost LP.
478
+ We illustrate the dualization method for the robust cost LP (8). The
479
+ key is to express the robust cost or saddle max function supc∈C cTx as an infimum. We first
480
+ observe that this saddle max function is the optimal value of the LP
481
+ maximize
482
+ xT c
483
+ subject to
484
+ Fc ≤ g,
485
+ with variable c. Its dual is
486
+ minimize
487
+ gTλ
488
+ subject to
489
+ F Tλ = x,
490
+ λ ≥ 0,
491
+ with variable λ. Assuming that C is nonempty, this dual problem has the same optimal value
492
+ as the primal, i.e.,
493
+ sup
494
+ c∈C
495
+ cTx =
496
+ inf
497
+ λ≥0, F T λ=x gTλ
498
+ Substituting this into (8) we obtain the problem
499
+ minimize
500
+ gTλ
501
+ subject to
502
+ Ax = b,
503
+ x ≥ 0,
504
+ F Tλ = x,
505
+ λ ≥ 0,
506
+ (9)
507
+ with variables x and λ. This simple LP is equivalent to the original robust LP (8), in the
508
+ sense that if (x⋆, λ⋆) is a solution of (9), then x⋆ is a solution of the robust LP (8).
509
+ We will see this dualization trick in a far more general setting in §4.
510
+ 3
511
+ Applications
512
+ In this section we describe a few applications of saddle programming.
513
+ 10
514
+
515
+ 3.1
516
+ Robust bond portfolio construction
517
+ We describe here a simplified version of the problem described in much more detail in
518
+ [LSB22].
519
+ Our goal is to construct a portfolio of n bonds, giving by its holdings vector
520
+ h ∈ Rn
521
+ +, where hi is the number of bond i held in the portfolio. Each bond produces a cash
522
+ flow, i.e., a sequence of payments to the portfolio holder, up to some period T. Let ci,t be
523
+ the payment from bond i in time period t. Let y ∈ RT be the yield curve, which gives the
524
+ time value of cash: A payment of one dollar at time t is worth exp(−tyt) current dollars,
525
+ assuming continuously compounded returns. The bond portfolio value, which is the present
526
+ value of the total cash flow, can be expressed as
527
+ V (h, y) =
528
+ n
529
+
530
+ i=1
531
+ T
532
+
533
+ t=1
534
+ hici,t exp(−tyt).
535
+ This function is convex in the yields y and concave (in fact, linear) in the holdings vector h.
536
+ Now suppose we do not know the yield curve, but instead have a convex set Y of possible
537
+ values, with y ∈ Y. The worst case value of the bond portfolio, over this set of possible yield
538
+ curves, is
539
+ V wc(h) = inf
540
+ y∈Y V (h, y).
541
+ We recognize this as a saddle min function. (In this application, y is the convex variable
542
+ of the saddle function V , whereas elsewhere in this paper we use y to denote the concave
543
+ variable.)
544
+ We consider a robust bond portfolio construction problem of the form
545
+ minimize
546
+ φ(h)
547
+ subject to
548
+ h ∈ H,
549
+ V wc(h) ≥ V lim,
550
+ (10)
551
+ where φ is a convex objective, typically a measure of return and risk, H is a convex set
552
+ of portfolio constraints (for example, imposing h ≥ 0 and a total budget), and V lim is a
553
+ specified limit on worst case value of the portfolio over the yield curve set Y, which has a
554
+ saddle min as a constraint.
555
+ For some simple choices of Y the worst case value can be found analytically. One example
556
+ is when Y has a maximum element.
557
+ In this special case, the maximum element is the
558
+ minimizer of the value over Y (since V is a monotone decreasing function of y). For other
559
+ cases, however, we need to solve the saddle problem (10).
560
+ 3.2
561
+ Model fitting robust to data weights
562
+ We wish to fit a model parametrized by θ ∈ Θ ⊆ Rn to m observed data points. We do this
563
+ by minimizing a weighted loss over the observed data, plus a regularizer,
564
+ m
565
+
566
+ i=1
567
+ wiℓi(θ) + r(θ),
568
+ 11
569
+
570
+ where ℓi is the convex loss function for observed data point i, r is a convex regularizer
571
+ function, and the weights wi are nonnegative. The weights can be used to adjust a data
572
+ sample that was not representative, as in [BAB21], or to ignore some of the data points (by
573
+ taking wi = 0), as in [BGM20]. Evidently the weighted loss is a saddle function, with convex
574
+ variable θ and concave variable w.
575
+ We consider the case when the weights are unknown, but lie in a convex set, w ∈ W. The
576
+ robust fitting problem is to choose θ to minimize the worst case loss over the set of possible
577
+ weights, plus the regularizer,
578
+ max
579
+ w∈W
580
+ m
581
+
582
+ i=1
583
+ wiℓi(θ) + r(θ).
584
+ We recognize the first term, i.e., the worst case loss over the set of possible weights, as a
585
+ saddle max function.
586
+ For some simple choices of W the worst case loss can be expressed analytically. For
587
+ example with
588
+ W = {w | 0 ≤ w ≤ 1, 1Tw = k},
589
+ (with k ∈ [0, n]), the worst case loss is given by
590
+ max
591
+ w∈W
592
+ m
593
+
594
+ i=1
595
+ wiℓi(θ) = φ(ℓ1, . . . , ℓm),
596
+ where φ is the sum-of-k-largest entries [BV04, §3.2.3]. (Our choice of symbol k suggests that
597
+ k is an integer, but it need not be.) In this case we judge the model parameter θ by its worst
598
+ loss on any subset of k of data points. Put another way, we judge θ by dropping the m − k
599
+ data points on which it does best (i.e., has the smallest loss) [BGM20].
600
+ CVXPY directly supports the sum-of-k-largest function, so the robust fitting problem
601
+ can be formulated and solved without using DSP. To support this function, CVXPY carries
602
+ out a transformation very similar to the one that DSP does.
603
+ The difference is that the
604
+ transformation in CVXPY is specific to this one function, whereas the one carried out in
605
+ DSP is general, and would work for other convex weight sets.
606
+ 3.3
607
+ Robust production problem with worst case prices
608
+ We consider the choice of a vector of quantities q ∈ Q ⊆ Rn. Positive entries indicate goods
609
+ we buy, and negative quantities are goods we sell. The set of possible quantities Q is our
610
+ production set, which is convex. In addition, we have a manufacturing cost associated with
611
+ the choice q, given by φ(q), where φ is a convex function. The total cost is the manufacturing
612
+ cost plus the cost of goods (which includes revenue), φ(q) + pTq, where p ∈ Rn is vector of
613
+ prices.
614
+ We consider the situation when we do not know the prices, but we have a convex set
615
+ they lie in, p ∈ P. The worst case cost of the goods is maxp∈P pTq. The robust production
616
+ problem is
617
+ minimize
618
+ φ(q) + maxp∈P pTq
619
+ subject to
620
+ q ∈ Q,
621
+ (11)
622
+ 12
623
+
624
+ with variable q. Here too we can work out analytical expressions for simple choices of P,
625
+ such as a range for each component, in which case the worst case price is the upper limit
626
+ for goods we buy, and the lower limit for goods we sell. In other cases, we solve the saddle
627
+ problem (11).
628
+ 3.4
629
+ Robust Markowitz portfolio construction
630
+ Markowitz portfolio construction [Mar52] chooses a set of weights (the fraction of the total
631
+ portfolio value held in each asset) by solving the convex problem
632
+ maximize
633
+ µTw − γwTΣw
634
+ subject to
635
+ 1Tw = 1,
636
+ w ∈ W,
637
+ where the variable is the vector of portfolio weights w ∈ Rn, µ ∈ Rn is a forecast of the
638
+ asset returns, γ > 0 is the risk aversion parameter, Σ ∈ Sn
639
+ ++ is a forecast of the asset return
640
+ covariance matrix, and W is a convex set of feasible portfolios. The objective is called the
641
+ risk adjusted (mean) return.
642
+ Markowitz portfolio construction is known to be fairly sensitive to the (forecasts) µ and
643
+ Σ, which have to be chosen with some care; see, e.g., [BL91]. One approach is to specify
644
+ a convex uncertainty set U that (µ, Σ) must lie in, and replace the objective with its worst
645
+ case (smallest) value over this uncertainty set. This gives the robust Markowitz portfolio
646
+ construction problem
647
+ maximize
648
+ inf(µ,Σ)∈U
649
+
650
+ µTw − γwTΣw
651
+
652
+ subject to
653
+ 1T w = 1,
654
+ w ∈ W,
655
+ with variable w. This is described in, e.g., in [Boy+17; GI03; LB00]. We observe that this
656
+ is directly a saddle problem, with a saddle min objective, i.e., a maximin problem.
657
+ For some simple versions of the problem we can work out the saddle min function explic-
658
+ itly. One example, given in [Boy+17], uses U = M × S, where
659
+ M
660
+ =
661
+ {µ + δ | |δ| ≤ ρ},
662
+ S
663
+ =
664
+ {Σ + ∆ | Σ + ∆ ⪰ 0, |∆ij| ≤ η(ΣiiΣjj)1/2, i, j = 1, . . . , n},
665
+ where ρ > 0 is a vector of uncertainties in the forecast returns, and η ∈ (0, 1) is a parameter
666
+ that scales the perturbation to the forecast covariance matrix. (We interpret δ and ∆ as
667
+ perturbations of the nominal mean and covariance µ and Σ, respectively.) We can express
668
+ the worst case risk adjusted return analytically as
669
+ inf
670
+ (µ,Σ)∈U
671
+
672
+ µTw − γwTΣw
673
+
674
+ = µTw − γwTΣw − ρT |w| − γη
675
+ � n
676
+
677
+ i=1
678
+ Σ1/2
679
+ ii |wi|
680
+ �2
681
+ .
682
+ The first two terms are the nominal risk adjusted return; the last two terms (which are
683
+ nonpositive) represent the cost of uncertainty.
684
+ 13
685
+
686
+ 4
687
+ Disciplined saddle point programming
688
+ 4.1
689
+ Saddle function calculus
690
+ We use the notation φ(x, y) : X × Y ⊆ Rn×m → R to denote a saddle function with concave
691
+ variables x and convex variables y. The set of operations that, when performed on saddle
692
+ functions, preserves the saddle property are called the saddle function calculus. The calculus
693
+ is quite simple, and consists of the following operations:
694
+ 1. Conic combination of saddle functions. Let φi(xi, yi), i = 1, . . . , k be saddle functions.
695
+ Let θi ≥ 0 for each i. Then the conic combination, φ(x, y) = �k
696
+ i=1 θiφi(xi, yi), is a
697
+ saddle function.
698
+ 2. Affine precomposition of saddle functions. Let φ(x, y) be a saddle function, with x ∈ Rn
699
+ and y ∈ Rm. Let A ∈ Rn×q, b ∈ Rn, C ∈ Rm×p, and d ∈ Rm. Then, with u ∈ Rq and
700
+ v ∈ Rp, the affine precomposition, φ(Au + b, Cv + d), is a saddle function.
701
+ 3. Precomposition of saddle functions. Let φ(x, y) : X × Y ⊆ Rn×m → R be a saddle
702
+ function, with x ∈ Rn and y ∈ Rm. The precomposition with a function f : Rp → Rn,
703
+ φ(f(u), y), is a saddle function if for each i = 1, . . . , n one of the following holds:
704
+ • fi(u) is convex and φ is nondecreasing in xi for all y ∈ Y and all x ∈ X .
705
+ • fi(u) is concave and φ is nonincreasing in xi for all y ∈ Y and all x ∈ X .
706
+ Similarly, the precomposition with a function g : Rq → Rm, φ(x, g(v)), is a saddle
707
+ function if for each j = 1, . . . , m one of the following holds:
708
+ • gj(v) is convex and φ is nonincreasing in yj for all x ∈ X and all y ∈ Y.
709
+ • gj(v) is concave and φ is nondecreasing in yj for all x ∈ X and all y ∈ Y.
710
+ 4.2
711
+ Conically representable saddle functions
712
+ Nemirovski and Juditsky propose a class of conic representable saddle functions which fa-
713
+ cilitate the automated dualization of saddle problems [JN22]. We will first introduce some
714
+ terminology and notation, and then describe the class of conic representable saddle functions.
715
+ Notation.
716
+ We use the notation φ(x, y) : X × Y ⊆ Rn×m → R to denote a saddle function
717
+ which is convex in x and concave in y. Let Kx, Ky and K be members of a collection K
718
+ of closed, convex, and pointed cones with nonempty interiors in Euclidean spaces such that
719
+ K contains a nonnegative ray, is closed with respect to taking finite direct products of its
720
+ members, and is closed with respect to passing from a cone to its dual. We denote conic
721
+ membership z ∈ K by z ⪰K 0. We call a set X ⊆ Rn K-representable if there exist constant
722
+ matrices A and B, a constant vector c, and a cone K ∈ K such that
723
+ X = {x | ∃u : Ax + Bu ⪯K c}.
724
+ 14
725
+
726
+ CVXPY [DB16] can implement a function f exactly when its epigraph {(x, u) | f(x) ≤ u}
727
+ is K-representable.
728
+ Conically representable saddle functions.
729
+ Let X and Y be nonempty and possessing
730
+ K-representations
731
+ X = {x | ∃u : Ax + Bu ⪯K c},
732
+ Y = {y | ∃v : Cy + Dv ⪯K e}.
733
+ A saddle function φ(x, y) : X × Y → R is K-representable if there exist constant matrices
734
+ P, Q, R, constant vectors p and s and a cone K ∈ K such that for each x ∈ X and y ∈ Y,
735
+ φ(x, y) = inf{f Ty + t | Pf + tp + Qu + Rx ⪯K s}.
736
+ This definition generalizes simple class of bilinear saddle functions. See [JN22] for much
737
+ more detail.
738
+ Automated dualization.
739
+ Suppose we have a K-representable saddle function φ as above.
740
+ The power of the conic form is that the saddle extremum
741
+ Φ(x) = sup
742
+ y∈Y
743
+ φ(x, y)
744
+ admits a tractable conic form, meaning that it can be implemented in a DSL like CVXPY.
745
+ Specifically,
746
+ Φ(x)
747
+ =
748
+ sup
749
+ y∈Y
750
+ φ(x, y)
751
+ =
752
+ sup
753
+ y∈Y
754
+ inf
755
+ f,t,u
756
+
757
+ f Ty + t
758
+ �� Pf + tp + Qu + Rx ⪯K s
759
+
760
+ =
761
+ inf
762
+ f,t,u
763
+
764
+ sup
765
+ y∈Y
766
+
767
+ f Ty + t
768
+ � ���� Pf + tp + Qu + Rx ⪯K s
769
+
770
+ (12)
771
+ =
772
+ inf
773
+ f,t,u
774
+
775
+ sup
776
+ y∈Y
777
+
778
+ f Ty
779
+
780
+ + t
781
+ ���� Pf + tp + Qu + Rx ⪯K s
782
+
783
+ =
784
+ inf
785
+ f,t,u
786
+
787
+ inf
788
+ λ
789
+
790
+ λTe
791
+ ����
792
+ CTλ = f, DTλ = 0
793
+ λ ⪰K∗ 0
794
+ � ���� Pf + tp + Qu + Rx ⪯K s
795
+
796
+ (13)
797
+ where in (12) we use Sion’s minimax theorem [Sio58] to reverse the inf and sup, and in (13)
798
+ we invoke strong duality to replace the supremum over y with an infimum over λ. The final
799
+ line implies a conic representation of the epigraph of Φ(x),
800
+ {(x, u) | Φ(x) ≤ u} =
801
+
802
+
803
+ (x, u)
804
+ ������
805
+ ∃λ, f, t, u :
806
+ λTe + t ≤ u
807
+ CTλ = f, DTλ = 0, λ ⪰K∗ 0
808
+ Pf + tp + Qu + Rx ⪯K s
809
+
810
+
811
+  ,
812
+ which is tractable and can be implemented in a DSL like CVXPY.
813
+ 15
814
+
815
+ A mathematical nuance.
816
+ Switching the inf and sup in (12) requires Sion’s theorem to
817
+ hold. A sufficient condition for Sion’s theorem to hold is that the set Y is compact. However,
818
+ the min and max can be exchanged even if Y is not compact. Then, due to the max-min
819
+ inequality
820
+ max
821
+ y∈Y min
822
+ x∈X f(x, y) ≤ min
823
+ x∈X max
824
+ y∈Y f(x, y),
825
+ the equality in (13) is replaced with a less than or equal to, and we obtain a convex restriction.
826
+ Thus, if a user creates a problem involving an SE function (as opposed to a saddle point
827
+ problem only containing saddle functions in the objective), then DSP guarantees that the
828
+ problem generated is a restriction. This means that the variables returned are feasible and
829
+ the returned optimal value is an upper bound on the optimal value for the user’s problem.
830
+ In our implementation, a saddle problem is solved by applying the above automatic
831
+ dualization to both the objective f and −f and then solving each resulting convex problem,
832
+ with the latter having the role of convex and concave variables switched. We do so in order
833
+ to obtain both the convex and concave components of the saddle point, since the dualization
834
+ removes the concave variable. The saddle problem is only reported as solved if the optimal
835
+ value of the problem with objective f is within a numerical tolerance of the negated optimal
836
+ value of the problem with objective −f. If this holds, this actually implies that
837
+ max
838
+ y∈Y min
839
+ x∈X f(x, y) = min
840
+ x∈X max
841
+ y∈Y f(x, y),
842
+ i.e., (12) was valid, even if for example Y is not compact. Thus, a user need not concern
843
+ themselves with the compactness of Y (or any other sufficient condition for Sion’s theorem)
844
+ when using DSP to find a saddle point; if a saddle point problem is solved, then the saddle
845
+ point is guaranteed to exist.
846
+ 5
847
+ Implementation
848
+ In this section we describe our open source Python implementation of the concepts and
849
+ methods described in §4, which we also call DSP. DSP works with CVXPY [DB16], an
850
+ implementation of a DSL for convex optimization based on DCP. We use the term DSP in
851
+ two different ways. We use it to refer to the mathematical concept of disciplined saddle
852
+ programming, and also our specific implementation; which is meant should be clear from
853
+ the context. The term DSP-compliant refers to a function or expression that is constructed
854
+ according to the DSP composition rules given in §5.2. It can also refer to a problem that
855
+ is constructed according to these rules. In the code snippets below, we use the prefix cp
856
+ to indicate functions and classes from CVXPY. (We give functions and classes from DSP
857
+ without prefix, whereas they would likely have a prefix such as dsp in real code.)
858
+ 5.1
859
+ Atoms
860
+ Saddle functions in DSP are created from fundamental building blocks or atoms. These
861
+ building blocks extend the atoms from CVXPY [DB16]. In CVXPY, atoms are either jointly
862
+ 16
863
+
864
+ convex or concave in all their variables, but in DSP, atoms are (jointly) convex in a subset
865
+ of the variables and (jointly) concave in the remaining variables. We describe some DSP
866
+ atoms below.
867
+ Inner product.
868
+ The atom inner(x,y) represents the inner product xTy. Since either
869
+ x or y could represent the convex variable, we adopt the convention in DSP that the first
870
+ argument of inner is the convex variable. According to the DSP rules, both arguments to
871
+ inner must be affine, and the variables they depend on must be disjoint.
872
+ Saddle inner product.
873
+ The atom saddle_inner(F, G) corresponds to the function
874
+ F(x)TG(y), where F and G are vectors of nonnegative and respectively elementwise convex
875
+ and concave functions. It is DSP-compliant if F is DCP convex and nonnegative and G is
876
+ DCP concave. If the function G is not DCP nonnegative, then the DCP constraint G >= 0
877
+ is attached to the expression. This is analogous to how the DCP constraint x >= 0 is added
878
+ to the expression cp.log(x). As an example consider
879
+ f = saddle_inner(cp.square(x), cp.log(y)).
880
+ This represents the saddle function
881
+ f(x, y) = x2 log y − I(y ≥ 1),
882
+ where I is the {0, ∞} indicator function of its argument.
883
+ Weighted ℓ2 norm.
884
+ The weighted_norm2(x, y) atom represents the saddle function
885
+ (�n
886
+ i=1 yix2
887
+ i )1/2, with y ≥ 0. It is DSP-compliant if x is either DCP affine or both convex and
888
+ nonnegative, and y is DCP concave. Here too, the constraint y >= 0 is added if y is not
889
+ DCP nonnegative.
890
+ Weighted log-sum-exp.
891
+ The weighted_log_sum_exp(x, y) atom represents the saddle
892
+ function log (�n
893
+ i=1 yi exp xi), with y ≥ 0. It is DSP-compliant if x is DCP convex, and y is
894
+ DCP concave. The constraint y >= 0 is added if y is not DCP nonnegative.
895
+ Quasi-semidefinite quadratic form.
896
+ The quasidef_quad_form(x, y, P, Q, S) atom
897
+ represents the function
898
+ f(x, y) =
899
+ � x
900
+ y
901
+ �T � P
902
+ S
903
+ ST
904
+ Q
905
+ � � x
906
+ y
907
+
908
+ ,
909
+ where the matrix is quasi-semidefinite, i.e., P ∈ Sn
910
+ + and −Q ∈ Sn
911
+ +. It is DSP-compliant if x
912
+ is DCP affine and y is DCP affine.
913
+ Quadratic form.
914
+ The saddle_quad_form(x, Y) atom represents the function xTY x,
915
+ where Y is a PSD matrix. It is DSP-compliant if x is DCP affine, and Y is DCP PSD.
916
+ 17
917
+
918
+ 5.2
919
+ Calculus rules
920
+ The atoms can be combined according to the calculus described below to form expressions
921
+ that are DSP-compliant.
922
+ For example, saddle functions can be added or scaled.
923
+ DCP-
924
+ compliant convex and concave expressions are promoted to saddle functions with no concave
925
+ or convex variables, respectively. For example, with variables x, y, and z, the expression
926
+ f = 2.5 * saddle_inner(cp.square(x), cp.log(y)) + cp.minimum(y,1) - z
927
+ is DSP-compliant, with convex variable x, concave variable y, and affine variable z.
928
+ Calling the is_dsp method of an expression returns True if the expression is DSP-
929
+ compliant. The methods convex_variables, concave_variables, and affine_variables,
930
+ list the convex, concave, and affine variables, respectively. The convex variables are those
931
+ that could only be convex, and similarly for concave variables.
932
+ We refer to the convex
933
+ variables as the unambiguously convex variables, and similarly for the concave variables.
934
+ The three lists of variables gives a partition of all the variables the expression depends on.
935
+ For the expression above, f.is_dsp() evaluates as True, f.convex_variables() returns the
936
+ list [x], f.concave_variables() returns the list [y], and f.affine_variables() returns
937
+ the list [z]. Note that the role of z is ambiguous in the expression, since it could be either
938
+ a convex or concave variable.
939
+ No mixing variables rule.
940
+ The DSP rules prohibit mixing of convex and concave vari-
941
+ ables. For example if we add two saddle expressions, no variable can appear in both its
942
+ convex and concave variable lists.
943
+ DSP-compliance is sufficient but not necessary to be a saddle function.
944
+ Re-
945
+ call that if an expression is DCP convex (concave), then it is convex (concave), but the
946
+ converse is false.
947
+ For example, the expression cp.sqrt(1 + cp.square(x)) represents
948
+ the convex function
949
+
950
+ 1 + x2, but is not DCP. But we can express the same function as
951
+ cp.norm2(cp.hstack([1, x])), which is DCP. The same holds for DSP and saddle func-
952
+ tion: If an expression is DSP-compliant, then it represents a saddle function; but it can
953
+ represent a saddle function and not be DSP-compliant. As with DCP, such an expression
954
+ would need to be rewritten in DSP-compliant form, to use any of the other features of DSP
955
+ (such as a solution method). As an example, the expression x.T @ C @ y represents the
956
+ saddle function xT Cy, but is not DSP-compliant. The same function can be expressed as
957
+ inner(x, C @ y), which is DSP-compliant.
958
+ When there are affine variables in a DSP-compliant expression, it means that those
959
+ variables could be considered either convex or concave; either way, the function is a saddle
960
+ function.
961
+ Example.
962
+ The code below defines the bi-linear saddle function f(x, y) = xT Cy, the ob-
963
+ jective of a matrix game, with x the convex variable and y the concave variable.
964
+ 18
965
+
966
+ Creating a saddle function.
967
+ 1 from dsp import *
968
+ # notational convenience
969
+ 2 import cvxpy as cp
970
+ 3 import numpy as np
971
+ 4
972
+ 5 x = cp.Variable(2)
973
+ 6 y = cp.Variable(2)
974
+ 7 C = np.array([[1, 2], [3, 1]])
975
+ 8
976
+ 9 f = inner(x, C @ y)
977
+ 10
978
+ 11 f.is_dsp()
979
+ # True
980
+ 12
981
+ 13 f.convex_variables()
982
+ # [x]
983
+ 14 f.concave_variables()
984
+ # [y]
985
+ 15 f.affine_variables()
986
+ # []
987
+ Lines 1–3 import the necessary packages (which we will use but not show in the sequel).
988
+ In lines 5–7, we create two CVXPY variables and a constant matrix. In line 9 we construct
989
+ the saddle function f using the DSP atom inner. Both its arguments are affine, so this
990
+ matches the DSP rules. In line 11 we check if saddle_function is DSP-compliant, which
991
+ it is. In lines 13–15 we call functions that return lists of the convex, concave, and affine
992
+ variables, respectively. The results of lines 13–15 might seem odd, but recall that inner
993
+ marks its first argument as convex and its second as concave.
994
+ 5.3
995
+ Saddle point problems
996
+ Saddle point problem objective.
997
+ To construct a saddle point problem, we first create
998
+ an objective using
999
+ obj = MinimizeMaximize(f),
1000
+ where f is a CVXPY expression.
1001
+ The objective obj is DSP-compliant if the expression
1002
+ f is DSP-compliant. This is analogous to the CVXPY contructors cp.Minimize(f) and
1003
+ cp.Maximize(f), which create objectives from expressions.
1004
+ Saddle point problem.
1005
+ A saddle point problem is constructed using
1006
+ prob = SaddlePointProblem(obj, constraints, cvx_vars, ccv_vars)
1007
+ Here, obj is a MinimizeMaximize objective, constraints is a list of constraints, cvx_vars
1008
+ is a list of convex variables and ccv_vars is a list of concave variables. The objective must
1009
+ be DSP-compliant for the problem to be DSP-compliant. We now describe the remaining
1010
+ conditions under which the constructed problem is DSP-compliant.
1011
+ 19
1012
+
1013
+ Each constraint in the list must be DCP, and can only involve convex variables or concave
1014
+ variables; convex and concave variables cannot both appear in any one constraint. The list
1015
+ of convex and concave variables partitions all the variables that appear in the objective or
1016
+ the constraints. In cases where the role of a variable is unambiguous, it is inferred, and does
1017
+ not need to be in either list. For example with the objective
1018
+ MinimizeMaximize(weighted_log_sum_exp(x, y) + cp.exp(u) + cp.log(v) + z),
1019
+ x and u must be convex variables, and y and v must be concave variables, and so do not need
1020
+ to appear in the lists used to construct a saddle point problem. The variable z, however,
1021
+ could be either a convex or concave variable, and so must appear in one of the lists.
1022
+ The role of a variable can also be inferred from the constraints: Any variable that appears
1023
+ in a constraint with convex (concave) variables must also be convex (concave). With the
1024
+ objective above, the constraint z + v <= 1 would serve to classify z as a concave variable.
1025
+ With this constraint, we could pass empty variable lists to the saddle point constructor, since
1026
+ the roles of all variables can be inferred. When the roles of all variables are unambiguous,
1027
+ the lists are optional.
1028
+ The roles of the variables in a saddle point problem prob can be found by calling
1029
+ prob.convex_variables() and prob.concave_variables(), which return lists of vari-
1030
+ ables, and is a partition of all the variables appearing in the objective or constraints. This is
1031
+ useful for debugging, to be sure that DSP agrees with you about the roles of all variables. A
1032
+ DSP-compliant saddle point problem must have an empty list of affine variables. (If it did
1033
+ not, the problem would be ambiguous.)
1034
+ Solving a saddle point problem.
1035
+ The solve() method of a SaddlePointProblem object
1036
+ solves the problem. The solve method returns the optimal saddle value, i.e., the value of
1037
+ the objective at the saddle point. As in CVXPY, the solve method has the side effect of
1038
+ writing all variables’ .value attribute.
1039
+ Example.
1040
+ Here we create and solve a matrix game, continuing the example above where
1041
+ f was defined. We do not need to pass in lists of variables since their roles can be inferred.
1042
+ Creating and solving a matrix game.
1043
+ 1 obj = MinimizeMaximize(f)
1044
+ 2 constraints = [x >= 0, cp.sum(x) == 1, y >= 0, cp.sum(y) == 1]
1045
+ 3 prob = SaddlePointProblem(obj, constraints)
1046
+ 4
1047
+ 5 prob.is_dsp()
1048
+ # True
1049
+ 6 prob.convex_variables()
1050
+ # [x]
1051
+ 7 prob.concave_variables()
1052
+ # [y]
1053
+ 8 prob.affine_variables()
1054
+ # []
1055
+ 9
1056
+ 10 prob.solve()
1057
+ # solves the problem
1058
+ 20
1059
+
1060
+ 11 prob.value
1061
+ # 1.6666666666666667
1062
+ 12 x.value
1063
+ # array([0.66666667, 0.33333333])
1064
+ 13 y.value
1065
+ # array([0.33333333, 0.66666667])
1066
+ 5.4
1067
+ Saddle extremum functions
1068
+ Local variables.
1069
+ An SE function has one of the forms
1070
+ G(x) = sup
1071
+ y∈Y
1072
+ f(x, y)
1073
+ or
1074
+ H(y) = inf
1075
+ x∈X f(x, y),
1076
+ where f is saddle function. Note that y in the definition of G, and x in the definition of
1077
+ H, are local or dummy variables, understood to have no connection to any other variable.
1078
+ Their scope extends only to the definition, and not beyond.
1079
+ To express this subtlety in DSP, we use the class LocalVariable to represent these
1080
+ dummy variables. The variables that are maximized over (in a saddle max function) or
1081
+ minimized over (in a saddle min function) must be declared using the LocalVariable()
1082
+ constructor. Any LocalVariable in an SE function cannot appear in any other SE function.
1083
+ Constructing SE functions.
1084
+ We construct SE functions in DSP using
1085
+ saddle_max(f, constraints)
1086
+ or
1087
+ saddle_min(f, constraints).
1088
+ Here, f is a CVXPY scalar expression, and constraints is a list of constraints. We now
1089
+ describe the rules for constructing a DSP-compliant SE function.
1090
+ If a saddle_max is being constructed, f must be DSP-compliant, and the function’s con-
1091
+ cave variables, and all variables appearing in the list of constraints, must be LocalVariables,
1092
+ while the function’s convex variables must all be regular Variables. A similar rule applies
1093
+ for saddle_min.
1094
+ The list of constraints is used to specify the set over which the sup or inf is taken. Each
1095
+ constraint must be DCP-compliant, and can only contain LocalVariables.
1096
+ With x a Variable, y_loc a LocalVariable, z_loc a LocalVariable, and z a Variable,
1097
+ consider the following two SE functions:
1098
+ 1 f_1 = saddle_max(inner(x, y_loc) + z, [y_loc <= 1])
1099
+ 2 f_2 = saddle_max(inner(x, y_loc) + z_loc, [y_loc <= 1, z_loc <= 1])
1100
+ Both are DSP-compliant. For the first, calling f_1.convex_variables() would return
1101
+ [x, z], and calling f_1.concave_variables() would return [y_loc].
1102
+ For the second,
1103
+ calling f_2.convex_variables() would return [x], and f_2.concave_variables() return
1104
+ [y_loc, z_loc].
1105
+ Let y be a Variable. Both of the following are not DSP-compliant:
1106
+ 1 f_3 = saddle_max(inner(x, y_loc) + z, [y_loc <= 1, z <= 1])
1107
+ 2 f_4 = saddle_max(inner(x, y) + z_loc, [y_loc <= 1, z_loc <= 1])
1108
+ 21
1109
+
1110
+ The first is not DSP-compliant because z is not a LocalVariable, but appears in the
1111
+ constraints.
1112
+ The second is not DSP-compliant because y is not a LocalVariable, but
1113
+ appears as a concave variable in the saddle function.
1114
+ SE functions are DCP.
1115
+ When they are DSP-compliant, a saddle_max is a convex func-
1116
+ tion, and a saddle_min is a concave function. They can be used anywhere in CVXPY that a
1117
+ convex or concave function is appropriate. You can add them, compose them (in appropriate
1118
+ ways), use them in the objective or either side of constraints (in appropriate ways).
1119
+ Examples.
1120
+ Now we provide full examples demonstrating construction of a saddle_max,
1121
+ which we can use to solve the matrix game described in §5.3 as a saddle problem involving
1122
+ an SE function.
1123
+ Creating a saddle max.
1124
+ 1 # Creating variables
1125
+ 2 x = cp.Variable(2)
1126
+ 3
1127
+ 4 # Creating local variables
1128
+ 5 y_loc = LocalVariable(2)
1129
+ 6
1130
+ 7 # Convex in x, concave in y_loc
1131
+ 8 f = saddle_inner(C @ x, y_loc)
1132
+ 9
1133
+ 10 # maximizes over y_loc
1134
+ 11 G = saddle_max(f, [y_loc >= 0, cp.sum(y_loc) == 1])
1135
+ Note that G is a CVXPY expression. Constructing a saddle_min works exactly the same
1136
+ way.
1137
+ 5.5
1138
+ Saddle problems
1139
+ A saddle problem is a convex problem that uses SE functions. To be DSP-compliant, the
1140
+ problem must be DCP (which implies all SE functions are DSP-compliant).
1141
+ When you
1142
+ call the solve method on a saddle problem involving SE functions, and the solve is suc-
1143
+ cessful, then all variables’ .value fields are overwritten with optimal values. This includes
1144
+ LocalVariables that the SE functions maximized or minimized over; they are assigned to
1145
+ the value of a particular maximizer or minimizer of the SE function at the value of the
1146
+ non-local variables, with no further guarantees.
1147
+ Example.
1148
+ We continue our example from §5.4 and solve the matrix game using either a
1149
+ saddle max.
1150
+ 22
1151
+
1152
+ Creating and solving a saddle problem using a saddle max to solve the matrix game.
1153
+ 1 prob = cp.Problem(cp.Minimize(G), [x >= 0, cp.sum(x) == 1])
1154
+ 2
1155
+ 3 prob.is_dsp()
1156
+ # True
1157
+ 4
1158
+ 5 prob.solve()
1159
+ # solving the problem
1160
+ 6 prob.value
1161
+ # 1.6666666666666667
1162
+ 7 x.value
1163
+ # array([0.66666667, 0.33333333])
1164
+ 6
1165
+ Examples
1166
+ In this section we give numerical examples, taken from §3, showing how to create DSP-
1167
+ compliant problems. The specific problem instances we take are small, since our main point
1168
+ is to show how easily the problems can be specified in DSP. But DSP will scale to far larger
1169
+ problem instances.
1170
+ 6.1
1171
+ Robust bond portfolio construction
1172
+ Our first example is the robust bond portfolio construction problem described in §3.1. We
1173
+ consider portfolios of n = 20 bonds, over a period T = 60 half-years, i.e., 30 years. The
1174
+ bonds are taken as representative ones in a global investment grade bond portfolio; for more
1175
+ detail, see [LSB22]. The payments from the bonds are given by C ∈ R20×60, with cash flow
1176
+ of bond i in period t denoted ci,t. The portfolio constraint set H is given by
1177
+ H = {h | h ≥ 0, pTh = B},
1178
+ i.e., the investments must be nonnegative and have a total value (budget) B, which we take
1179
+ to be $100. Here p ∈ R20
1180
+ + denotes the price of the bonds on September 12, 2022. The
1181
+ portfolio objective is
1182
+ φ(h) = 1
1183
+ 2∥(h − hmkt) ◦ p∥1,
1184
+ where hmkt is the market portfolio scaled to a value of $100, and ◦ denotes Hadamard or
1185
+ elementwise multiplication. This is called the turn-over distance, since it tells us how much
1186
+ we would need to buy and sell to convert our portfolio to the market portfolio.
1187
+ The yield curve set Y is described in terms of perturbations to the nominal or current
1188
+ yield curve ynom, which is the yield curve on September 12, 2022. We take
1189
+ Y =
1190
+
1191
+ ynom + δ
1192
+ ����� ∥δ∥∞ ≤ δmax, ∥δ∥1 ≤ κ,
1193
+ T−1
1194
+
1195
+ t=1
1196
+ (δt+1 − δt)2 ≤ ω
1197
+
1198
+ .
1199
+ We interpret δ as a shock to the yield curve, which we limit elementwise, in absolute sum,
1200
+ and in smoothness. The specific parameter values are given by
1201
+ δmax = 0.02,
1202
+ κ = 0.9,
1203
+ ω = 10−6.
1204
+ 23
1205
+
1206
+ In the robust bond portfolio problem (10) we take V lim = 90, that is, the worst case value
1207
+ of the portfolio cannot drop below $90 for any y ∈ Y.
1208
+ We solve the problem using the following code, where we assume the cash flow matrix
1209
+ C, the price vector p, the nominal yield curve y_nom, and the market portfolio h_mkt are
1210
+ defined.
1211
+ Robust bond portfolio construction.
1212
+ 1 # Constants and parameters
1213
+ 2 n, T = C.shape
1214
+ 3 delta_max, kappa, omega = 0.02, 0.9, 1e-6
1215
+ 4 B = 100
1216
+ 5 V_lim = 90
1217
+ 6
1218
+ 7 # Creating variables
1219
+ 8 h = cp.Variable(n, nonneg=True)
1220
+ 9
1221
+ 10 delta = LocalVariable(T)
1222
+ 11 y = y_nom + delta
1223
+ 12
1224
+ 13 # Objective
1225
+ 14 phi = 0.5 * cp.norm1(cp.multiply(h, p) - cp.multiply(h_mkt, p))
1226
+ 15
1227
+ 16 # Creating saddle min function
1228
+ 17 V = 0
1229
+ 18 for i in range(n):
1230
+ 19
1231
+ t_plus_1 = np.arange(T) + 1
1232
+ # Account for zero-indexing
1233
+ 20
1234
+ V += saddle_inner(cp.exp(cp.multiply(-t_plus_1, y)), h[i] * C[i])
1235
+ 21
1236
+ 22 Y = [
1237
+ 23
1238
+ cp.norm_inf(delta) <= delta_max,
1239
+ 24
1240
+ cp.norm1(delta) <= kappa,
1241
+ 25
1242
+ cp.sum_squares(delta[1:] - delta[:-1]) <= omega,
1243
+ 26 ]
1244
+ 27
1245
+ 28 V_wc = saddle_min(V, Y)
1246
+ 29
1247
+ 30 # Creating and solving the problem
1248
+ 31 problem = cp.Problem(cp.Minimize(phi), [h @ p == B, V_wc >= V_lim])
1249
+ 32 problem.solve()
1250
+ # 15.32
1251
+ We first define the constants and parameters in lines 2–5, before creating the variable
1252
+ for the holdings h in line 8, and the LocalVariable delta, which gives the yield curve
1253
+ 24
1254
+
1255
+ Nominal portfolio
1256
+ Robust portfolio
1257
+ Turn-over distance
1258
+ $0.00
1259
+ $15.32
1260
+ Worst-case value
1261
+ $86.99
1262
+ $90.00
1263
+ Table 1:
1264
+ Turn-over distance and worst-case value for the nominal (market) portfolio and the
1265
+ robust portfolio. The nominal portfolio does not meet our requirement that the worst-case value
1266
+ be at least $90.
1267
+ perturbation, in line 10. In line 11 we define y as the sum of the current yield curve y_nom
1268
+ and the perturbation delta. The objective function is defined in line 14. Lines 17–20 define
1269
+ the saddle function V via the saddle_inner atom. The yield uncertainty set Y is defined in
1270
+ lines 22–26, and the worst case portfolio value is defined in line 25 using saddle_min. We use
1271
+ the concave expression saddle_min to create and solve a CVXPY problem in lines 31–32.
1272
+ Table 1 summarizes the results. The nominal portfolio is the market portfolio, which
1273
+ has zero turn-over distance to the market portfolio, i.e., zero objective value. This nominal
1274
+ portfolio, however, does not satisfy the worst-case portfolio value constraint, since there are
1275
+ yield curves in Y that cause the portfolio value to drop to around $87, less than our limit
1276
+ of $90. The solution of the robust problem has turn-over distance $15.32, and satisfies the
1277
+ constraint that the worst-case value be at least $90.
1278
+ 6.2
1279
+ Model fitting robust to data weights
1280
+ We consider an instance of the model fitting problem described in §3.2. We use the well
1281
+ known Titanic data set [HC17], which gives several attributes for each passenger on the ill-
1282
+ fated Titanic voyage, including whether they survived. A classifier is fit to predict survival
1283
+ based on the features sex, age (binned into three groups, 0–26, 26–53, and 53–80), and class
1284
+ (1, 2, or 3). These features are encoded as a Boolean vector ai ∈ R7. The label yi = 1 means
1285
+ passenger i survived, and yi = −1 otherwise. There are 1046 examples, but we fit our model
1286
+ using only the m = 50 passengers who embarked from Queenstown, one of three ports of
1287
+ embarkation. This is a somewhat non-representative sample; for example, the survival rate
1288
+ among Queenstown departures is 26%, whereas the overall survival rate is 40.8%.
1289
+ We seek a linear classifier ˆyi = sign(aT
1290
+ i θ + β0), where θ ∈ R7 is the classifier parameter
1291
+ vector and β0 ∈ R is the bias. The hinge loss and ℓ2 regularization are used, given by
1292
+ ℓi(θ) = max(0, 1 − yiaT
1293
+ i θ),
1294
+ r(θ) = η∥θ∥2
1295
+ 2,
1296
+ with η = 0.05.
1297
+ The data is weighted to partially correct for the different survival rates for our training
1298
+ set (26%) and the whole data set (40.8%). To do this we set wi = z1 when yi = 1 and
1299
+ wi = z2 when yi = −1. We require w ≥ 0 and 1Tw = 1, and
1300
+ 0.408 − 0.05 ≤
1301
+
1302
+ yi=1
1303
+ wi ≤ 0.408 + 0.05.
1304
+ 25
1305
+
1306
+ Thus W consists of weights on the Queenstown departure samples that correct the survival
1307
+ rate to within 5% of the overall survival rate.
1308
+ The code shown below solves this problem, where we assume the data matrix is already
1309
+ defined as A_train (with rows aT
1310
+ i ), the survival label vector is defined as y_train, and the
1311
+ indicator of survival in the training set is defined as surv.
1312
+ Model fitting robust to data weights.
1313
+ 1 # Constants and parameters
1314
+ 2 m, n = A_train.shape
1315
+ 3 inds_0 = surv == 0
1316
+ 4 inds_1 = surv == 1
1317
+ 5 eta = 0.05
1318
+ 6
1319
+ 7 # Creating variables
1320
+ 8 theta = cp.Variable(n)
1321
+ 9 beta_0 = cp.Variable()
1322
+ 10 weights = cp.Variable(m, nonneg=True)
1323
+ 11 surv_weight_0 = cp.Variable()
1324
+ 12 surv_weight_1 = cp.Variable()
1325
+ 13
1326
+ 14 # Defining the loss function and the weight constraints
1327
+ 15 y_hat = A_train @ theta + beta_0
1328
+ 16 loss = cp.pos(1 - cp.multiply(y_train, y_hat))
1329
+ 17 objective = MinimizeMaximize(saddle_inner(loss, weights)
1330
+ 18
1331
+ + eta * cp.sum_squares(theta))
1332
+ 19
1333
+ 20 constraints = [
1334
+ 21
1335
+ cp.sum(weights) == 1,
1336
+ 22
1337
+ 0.408 - 0.05 <= weights @ surv,
1338
+ 23
1339
+ weights @ surv <= 0.408 + 0.05,
1340
+ 24
1341
+ weights[inds_0] == surv_weight_0,
1342
+ 25
1343
+ weights[inds_1] == surv_weight_1,
1344
+ 26 ]
1345
+ 27
1346
+ 28 # Creating and solving the problem
1347
+ 29 problem = SaddlePointProblem(objective, constraints)
1348
+ 30 problem.solve()
1349
+ After defining the constants and parameters in lines 2–5, we specify the variables for the
1350
+ model coefficient and the weights in lines 8–9 and 10–12, respectively. The loss function
1351
+ and regularizer which make up the objective are defined next in lines 15–18. The weight
1352
+ constraints are defined in lines 20–26. The saddle point problem is created and solved in
1353
+ 26
1354
+
1355
+ Nominal classifier
1356
+ Robust classifier
1357
+ Train accuracy
1358
+ 82.0%
1359
+ 80.0%
1360
+ Test accuracy
1361
+ 76.0%
1362
+ 78.6%
1363
+ Table 2: Nominal and worst-case objective values for classification and robust classification models.
1364
+ lines 29 and 30.
1365
+ The results are shown in table 2. We report the test accuracy on all samples in the
1366
+ dataset with a different port of embarkation than Queenstown (996 samples). We see that
1367
+ while the robust classification model has slightly lower training accuracy than the nominal
1368
+ model, it achieves a higher test accuracy, generalizing from the non-representative training
1369
+ data better than the nominal classifier, which uses uniform weights.
1370
+ 6.3
1371
+ Robust Markowitz portfolio construction
1372
+ We consider the robust Markowitz portfolio construction problem described in §3.4. We take
1373
+ n = 6 assets, which are the (five) Fama-French factors [FF15] plus a risk-free asset. The
1374
+ data is obtained from the Kenneth R. French data library [Fre22], with monthly return data
1375
+ available from July 1963 to October 2022. The nominal return and risk are the empirical
1376
+ mean and covariance of the returns. (These obviously involve look-ahead, but the point of
1377
+ the example is how to specify and solve the problem with DSP, not the construction of a
1378
+ real portfolio.) We take parameters ρ = 0.02, η = 0.2, and risk aversion parameter γ = 1.
1379
+ In the code, we use mu and Sigma for the mean and covariance estimates, respectively,
1380
+ and the parameters are denoted rho, eta, and gamma.
1381
+ Robust Markowitz portfolio construction.
1382
+ 1 # Constants and parameters
1383
+ 2 n = len(mu)
1384
+ 3 rho, eta, gamma = 0.2, 0.2, 1
1385
+ 4
1386
+ 5 # Creating variables
1387
+ 6 w = cp.Variable(n, nonneg=True)
1388
+ 7
1389
+ 8 delta_loc = LocalVariable(n)
1390
+ 9 Sigma_perturbed = LocalVariable((n, n), PSD=True)
1391
+ 10 Delta_loc = LocalVariable((n, n))
1392
+ 11
1393
+ 12 # Creating saddle min function
1394
+ 13 f = w @ mu + saddle_inner(delta_loc, w) \
1395
+ 14
1396
+ - gamma * saddle_quad_form(w, Sigma_perturbed)
1397
+ 15
1398
+ 16 Sigma_diag = Sigma.diagonal()
1399
+ 27
1400
+
1401
+ Nominal portfolio
1402
+ Robust portfolio
1403
+ Nominal objective
1404
+ .295
1405
+ .291
1406
+ Robust objective
1407
+ .065
1408
+ .076
1409
+ Table 3: Nominal and worst-case objective for the nominal and robust portfolios.
1410
+ 17 local_constraints = [
1411
+ 18
1412
+ cp.abs(delta_loc) <= rho, Sigma_perturbed == Sigma + Delta_loc,
1413
+ 19
1414
+ cp.abs(Delta_loc) <= eta * np.sqrt(np.outer(Sigma_diag, Sigma_diag))
1415
+ 20 ]
1416
+ 21
1417
+ 22 G = saddle_min(f, local_constraints)
1418
+ 23
1419
+ 24 # Creating and solving the problem
1420
+ 25 problem = cp.Problem(cp.Maximize(G), [cp.sum(w) == 1])
1421
+ 26 problem.solve()
1422
+ # 0.076
1423
+ We first define the constants and parameters, before creating the weights variable in
1424
+ line 6, and the local variables for the perturbations in lines 8–10. The saddle function for
1425
+ the objective is defined in line 13, followed by the constraints on the perturbations. Both
1426
+ are combined into the concave saddle min function, which is maximized over the portfolio
1427
+ constraints in lines 25–26.
1428
+ The results are shown in table 3. The robust portfolio yields a slightly lower risk adjusted
1429
+ return of 0.291 compared to the nominal optimal portfolio with 0.295.
1430
+ But the robust
1431
+ portfolio attains a higher worst-case risk adjusted return of 0.076, compared to the nominal
1432
+ optimal portfolio which attains 0.065.
1433
+ Acknowledgements
1434
+ P. Schiele is supported by a fellowship within the IFI program of the German Academic
1435
+ Exchange Service (DAAD). This research was partially supported by ACCESS (AI Chip
1436
+ Center for Emerging Smart Systems), sponsored by InnoHK funding, Hong Kong SAR, and
1437
+ by ONR N000142212121.
1438
+ 28
1439
+
1440
+ References
1441
+ [Mar52]
1442
+ H. Markowitz. “Portfolio Selection”. In: Journal of Finance 7 (1 1952), pp. 77–
1443
+ 91.
1444
+ [MVN53]
1445
+ O. Morgenstern and J. Von Neumann. Theory of games and economic behavior.
1446
+ Princeton University Press, 1953.
1447
+ [Sio58]
1448
+ M. Sion. “On general minimax theorems”. In: Pacific Journal of Mathematics
1449
+ 8.1 (1958), pp. 171–176.
1450
+ [Roc70]
1451
+ R. Rockafellar. Convex analysis. Vol. 18. Princeton university press, 1970.
1452
+ [Kor76]
1453
+ G. Korpelevich. “The extragradient method for finding saddle points and other
1454
+ problems”. In: Matecon 12 (1976), pp. 747–756.
1455
+ [DM90]
1456
+ V. Dem’yanov and V. Malozemov. Introduction to minimax. Courier Corpo-
1457
+ ration, 1990.
1458
+ [BL91]
1459
+ F. Black and R. Litterman. “Asset Allocation”. In: The Journal of Fixed In-
1460
+ come 1.2 (1991), pp. 7–18. issn: 1059-8596.
1461
+ [NN92]
1462
+ Y. Nesterov and A. Nemirovski. “Conic formulation of a convex programming
1463
+ problem and duality”. In: Optimization Methods & Software 1 (1992), pp. 95–
1464
+ 115.
1465
+ [HK93]
1466
+ R. Hettich and K. Kortanek. “Semi-infinite programming: Theory, methods,
1467
+ and applications”. In: SIAM review 35.3 (1993), pp. 380–429.
1468
+ [DP95]
1469
+ D. Du and P. Pardalos. Minimax and applications. Vol. 4. Springer Science &
1470
+ Business Media, 1995.
1471
+ [Nem99]
1472
+ A. Nemirovski. “On self-concordant convex–concave functions”. In: Optimiza-
1473
+ tion Methods and Software 11.1-4 (1999), pp. 303–384.
1474
+ [LB00]
1475
+ M. Lobo and S. Boyd. The worst-case risk of a portfolio. Tech. rep. 2000.
1476
+ [GI03]
1477
+ D. Goldfarb and G. Iyengar. “Robust Portfolio Selection Problems”. In: Mathe-
1478
+ matics of Operations Research 28.1 (2003), pp. 1–38. issn: 0364765X, 15265471.
1479
+ [HT03]
1480
+ B. Halld´orsson and R. T¨ut¨unc¨u. “An interior-point method for a class of
1481
+ saddle-point problems”. In: Journal of Optimization Theory and Applications
1482
+ 116.3 (2003), pp. 559–590.
1483
+ [BV04]
1484
+ S. Boyd and L. Vandenberghe. Convex Optimization. Cambridge University
1485
+ Press, 2004.
1486
+ [Lof04]
1487
+ J. Lofberg. “YALMIP : a toolbox for modeling and optimization in MATLAB”.
1488
+ In: 2004 IEEE International Conference on Robotics and Automation (IEEE
1489
+ Cat. No.04CH37508). 2004, pp. 284–289.
1490
+ 29
1491
+
1492
+ [Nem04]
1493
+ A. Nemirovski. “Prox-method with rate of convergence O(1/t) for variational
1494
+ inequalities with Lipschitz continuous monotone operators and smooth convex-
1495
+ concave saddle point problems”. In: SIAM Journal on Optimization 15.1 (2004),
1496
+ pp. 229–251.
1497
+ [Nes05a]
1498
+ Y. Nesterov. “Excessive gap technique in nonsmooth convex minimization”.
1499
+ In: SIAM Journal on Optimization 16.1 (2005), pp. 235–249.
1500
+ [Nes05b]
1501
+ Y. Nesterov. “Smooth minimization of non-smooth functions”. In: Mathemat-
1502
+ ical programming 103.1 (2005), pp. 127–152.
1503
+ [BL06]
1504
+ J. Borwein and A. Lewis. Convex Analysis. Springer, 2006.
1505
+ [GBY06]
1506
+ M. Grant, S. Boyd, and Y. Ye. “Disciplined convex programming”. In: Global
1507
+ optimization. Springer, 2006, pp. 155–210.
1508
+ [NP06]
1509
+ Y. Nesterov and B. Polyak. “Cubic regularization of Newton method and its
1510
+ global performance”. In: Mathematical Programming 108.1 (2006), pp. 177–
1511
+ 205.
1512
+ [Nes07]
1513
+ Y. Nesterov. “Dual extrapolation and its applications to solving variational in-
1514
+ equalities and related problems”. In: Mathematical Programming 109.2 (2007),
1515
+ pp. 319–344.
1516
+ [Nes08]
1517
+ Y. Nesterov. “Accelerating the cubic regularization of Newton’s method on
1518
+ convex problems”. In: Mathematical Programming 112.1 (2008), pp. 159–181.
1519
+ [BTEGN09]
1520
+ A. Ben-Tal, L. El Ghaoui, and A. Nemirovski. Robust optimization. Vol. 28.
1521
+ Princeton university press, 2009.
1522
+ [MB09]
1523
+ A. Mutapcic and S. Boyd. “Cutting-set methods for robust convex optimiza-
1524
+ tion with pessimizing oracles”. In: Optimization Methods & Software 24.3
1525
+ (2009), pp. 381–406.
1526
+ [NO09]
1527
+ A. Nedi´c and A. Ozdaglar. “Subgradient methods for saddle-point problems”.
1528
+ In: Journal of optimization theory and applications 142.1 (2009), pp. 205–228.
1529
+ [RW09]
1530
+ R. Rockafellar and R. Wets. Variational analysis. Vol. 317. Springer Science
1531
+ & Business Media, 2009.
1532
+ [BBC11]
1533
+ D. Bertsimas, D. Brown, and C. Caramanis. “Theory and applications of ro-
1534
+ bust optimization”. In: SIAM review 53.3 (2011), pp. 464–501.
1535
+ [CP11]
1536
+ A. Chambolle and T. Pock. “A first-order primal-dual algorithm for convex
1537
+ problems with applications to imaging”. In: Journal of mathematical imaging
1538
+ and vision 40.1 (2011), pp. 120–145.
1539
+ [CLO13]
1540
+ Y. Chen, G. Lan, and Y. Ouyang. Optimal Primal-Dual Methods for a Class
1541
+ of Saddle Point Problems. 2013.
1542
+ [Con13]
1543
+ L. Condat. “A primal–dual splitting method for convex optimization involving
1544
+ Lipschitzian, proximable and linear composite terms”. In: Journal of optimiza-
1545
+ tion theory and applications 158.2 (2013), pp. 460–479.
1546
+ 30
1547
+
1548
+ [Goo+14]
1549
+ I. Goodfellow et al. “Generative Adversarial Nets”. In: Advances in Neural
1550
+ Information Processing Systems. Vol. 27. Curran Associates, Inc., 2014.
1551
+ [GB14]
1552
+ M. Grant and S. Boyd. “CVX: Matlab software for disciplined convex pro-
1553
+ gramming, version 2.1”. In: (2014).
1554
+ [Ude+14]
1555
+ M. Udell et al. “Convex Optimization in Julia”. In: SC14 Workshop on High
1556
+ Performance Technical Computing in Dynamic Languages (2014).
1557
+ [BS15]
1558
+ K. Bredies and H. Sun. “Preconditioned Douglas–Rachford splitting methods
1559
+ for convex-concave saddle-point problems”. In: SIAM Journal on Numerical
1560
+ Analysis 53.1 (2015), pp. 421–444.
1561
+ [FF15]
1562
+ E. Fama and K. French. “A five-factor asset pricing model”. In: Journal of
1563
+ Financial Economics 116.1 (2015), pp. 1–22. issn: 0304-405X.
1564
+ [CP16]
1565
+ A. Chambolle and T. Pock. “On the ergodic convergence rates of a first-order
1566
+ primal–dual algorithm”. In: Mathematical Programming 159.1 (2016), pp. 253–
1567
+ 287.
1568
+ [DB16]
1569
+ S. Diamond and S. Boyd. “CVXPY: A Python-embedded modeling language
1570
+ for convex optimization”. In: Journal of Machine Learning Research 17.83
1571
+ (2016), pp. 1–5.
1572
+ [Boy+17]
1573
+ S. Boyd et al. “Multi-Period Trading via Convex Optimization”. In: Founda-
1574
+ tions and Trends in Optimization 3.1 (2017), pp. 1–76.
1575
+ [HC17]
1576
+ F. Harrell Jr. and T. Cason. Titanic dataset. 2017. url: https://www.openml.org/d/40945.
1577
+ [CPT18]
1578
+ G. Cornu´ejols, J. Pe˜na, and R. T¨ut¨unc¨u. Optimization Methods in Finance.
1579
+ 2nd ed. Cambridge University Press, 2018.
1580
+ [DA19]
1581
+ X. Dou and M. Anitescu. “Distributionally robust optimization with correlated
1582
+ data from vector autoregressive processes”. In: Operations Research Letters
1583
+ 47.4 (2019), pp. 294–299. issn: 0167-6377.
1584
+ [The+19]
1585
+ K. Thekumparampil et al. “Efficient Algorithms for Smooth Minimax Opti-
1586
+ mization”. In: Advances in Neural Information Processing Systems. Vol. 32.
1587
+ Curran Associates, Inc., 2019.
1588
+ [BGM20]
1589
+ T. Broderick, R. Giordano, and R. Meager. “An Automatic Finite-Sample
1590
+ Robustness Metric: When Can Dropping a Little Data Make a Big Difference?”
1591
+ In: arXiv preprint arXiv:2011.14999 (2020).
1592
+ [FNB20]
1593
+ A. Fu, B. Narasimhan, and S. Boyd. “CVXR: An R Package for Disciplined
1594
+ Convex Optimization”. In: Journal of Statistical Software 94.14 (2020), pp. 1–
1595
+ 34.
1596
+ [LJJ20]
1597
+ T. Lin, C. Jin, and M. Jordan. “Near-optimal algorithms for minimax opti-
1598
+ mization”. In: Conference on Learning Theory. PMLR. 2020, pp. 2738–2779.
1599
+ [BAB21]
1600
+ S. Barratt, G. Angeris, and S. Boyd. “Optimal representative sample weight-
1601
+ ing”. In: Statistics and Computing 31.2 (2021), pp. 1–14.
1602
+ 31
1603
+
1604
+ [Fre22]
1605
+ K. French. Kenneth R. French Data Library. 2022.
1606
+ [JN22]
1607
+ A. Juditsky and A. Nemirovski. “On well-structured convex–concave saddle
1608
+ point problems and variational inequalities with monotone operators”. In: Op-
1609
+ timization Methods and Software 37.5 (2022), pp. 1567–1602.
1610
+ [LSB22]
1611
+ E. Luxenberg, P. Schiele, and S. Boyd. Robust Bond Portfolio Construction
1612
+ via Convex-Concave Saddle Point Optimization. 2022.
1613
+ [RY22]
1614
+ E. Ryu and W. Yin. Large-Scale Convex Optimization: Algorithms & Analyses
1615
+ via Monotone Operators. Cambridge University Press, 2022.
1616
+ 32
1617
+
3dFQT4oBgHgl3EQf3TZi/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
4tE4T4oBgHgl3EQf1A0X/content/tmp_files/2301.05286v1.pdf.txt ADDED
@@ -0,0 +1,810 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GOVERNMENT LICENSE
2
+ The submitted manuscript has been created by UChicago Argonne, LLC, Operator of Argonne
3
+ National Laboratory (“Argonne”). Argonne, a U.S. Department of Energy Office of Science lab-
4
+ oratory, is operated under Contract No. DE-AC02-06CH11357. The U.S. Government retains for
5
+ itself, and others acting on its behalf, a paid-up nonexclusive, irrevocable worldwide license in
6
+ said article to reproduce, prepare derivative works, distribute copies to the public, and perform
7
+ publicly and display publicly, by or on behalf of the Government. The Department of Energy will
8
+ provide public access to these results of federally sponsored research in accordance with the DOE
9
+ Public Access Plan. http://energy.gov/downloads/doe-public-access-plan.
10
+ 1
11
+ arXiv:2301.05286v1 [physics.app-ph] 12 Jan 2023
12
+
13
+ Demonstration of an AI-driven workflow for autonomous high-resolution scanning
14
+ microscopy
15
+ Saugat Kandel,1 Tao Zhou,2 Anakha V Babu,1 Zichao Di,3 Xinxin Li,2, 4 Xuedan Ma,2, 4
16
+ Martin Holt,2 Antonino Miceli,1 Charudatta Phatak,5 and Mathew Cherukara1, a)
17
+ 1)Advanced Photon Source, Argonne National Laboratory, Lemont,
18
+ IL 60439.
19
+ 2)Nanoscience and Technology Division, Argonne National Laboratory, Lemont,
20
+ IL 60439.
21
+ 3)Mathematics and Computer Science, Argonne National Laboratory, Lemont,
22
+ IL 60439.
23
+ 4)Consortium for Advanced Science and Engineering, University of Chicago, Chicago,
24
+ Illinois 60637, USA
25
+ 5)Materials Science Division, Argonne National Laboratory, Lemont,
26
+ IL 60439.
27
+ (Dated: 16 January 2023)
28
+ 2
29
+
30
+ With the continuing advances in scientific instrumentation, scanning microscopes are now
31
+ able to image physical systems with up to sub-atomic-level spatial resolutions and sub-
32
+ picosecond time resolutions. Commensurately, they are generating ever-increasing vol-
33
+ umes of data, storing and analysis of which is becoming an increasingly difficult prospect.
34
+ One approach to address this challenge is through self-driving experimentation techniques
35
+ that can actively analyze the data being collected and use this information to make on-
36
+ the-fly measurement choices, such that the data collected is sparse but representative of
37
+ the sample and sufficiently informative. Here, we report the Fast Autonomous Scanning
38
+ Toolkit (FAST) that combines a trained neural network, a route optimization technique,
39
+ and efficient hardware control methods to enable a self-driving scanning microscopy ex-
40
+ periment. The key features of our method are that: it does not require any prior information
41
+ about the sample, it has a very low computational cost, and that it uses generic hardware
42
+ controls with minimal experiment-specific wrapping. We test this toolkit in numerical ex-
43
+ periments and a scanning dark-field x-ray microscopy experiment of a WSe2 thin film,
44
+ where our experiments show that a FAST scan of <25% of the sample is sufficient to pro-
45
+ duce both a high-fidelity image and a quantitative analysis of the surface distortions in the
46
+ sample. We show that FAST can autonomously identify all features of interest in the sam-
47
+ ple while significantly reducing the scan time, the volume of data acquired, and dose on
48
+ the sample. The FAST toolkit is easy to apply for any scanning microscopy modalities and
49
+ we anticipate adoption of this technique will empower broader multi-level studies of the
50
+ evolution of physical phenomena with respect to time, temperature, or other experimental
51
+ parameters.
52
+ a)Electronic mail: [email protected], [email protected]
53
+ 3
54
+
55
+ I.
56
+ INTRODUCTION
57
+ Scanning microscopes are versatile instruments that use photons, electrons, ions, neutrons, or
58
+ mechanical probes to interrogate atomic-scale composition, topography, and functionality of ma-
59
+ terials, with up to sub-atomic spatial resolution and sub-picosecond time resolution1–3. Notwith-
60
+ standing the variation in the probe modalities, these instruments all rely on a scan of the sample
61
+ to generate spatially resolved signals that are then collected to form an image of the sample. On-
62
+ going advances in instrumentation, such as the development of next-generation x-ray and electron
63
+ detectors4,5, has meant that scanning microscopes can now image faster, and at higher resolutions,
64
+ than ever before. We can now envision a broad use of these instruments to study not only static
65
+ systems, but also multi-level studies of dynamic evolution of materials with time, temperature, or
66
+ other parameters, even in situ or operando6. Fine-resolution large-field-of-view scanning exper-
67
+ iments, however, come with some significant drawbacks: the volume of data generated and the
68
+ probe-induced damage to the sample can be prohibitively large. For example, it is now routinely
69
+ possible to perform x-ray imaging of 1 mm3 volumes at ≈10 nm resolution, but this generates
70
+ ≈ 1015 voxels of data7,8 and requires a commensurately high probe dose9. Meanwhile, the in-
71
+ formation of interest in these experiments is often concentrated in sparse regions that contain
72
+ interfaces, defects, or other specific structural elements. Directing the scan to only these locations
73
+ could greatly reduce the scan time and data volume, but it is difficult to obtain this information a
74
+ priori. Addressing this challenge with a human-in-the-loop protocol, where an experienced user
75
+ examines the data acquired to identify trends and guide the scan, can be tedious and prohibitively
76
+ time consuming (in comparison to the experimental acquisition time). Given these factors, the
77
+ development of autonomous acquisition techniques that can continuously analyze acquired data
78
+ and drive the sampling specifically towards regions of interest is imperative so as to make full use
79
+ of the potential of these scientific instruments.
80
+ In parallel to the advances in scientific instrumentation, the last decade has also seen the rapid
81
+ development of deep learning (DL) techniques and their applications in all domains of science
82
+ and technology, including for the acceleration and enhancement of advanced microscopy meth-
83
+ 4
84
+
85
+ ods10–13. These DL-based inversion methods are enabling real-time data analysis, which is in turn
86
+ opening the door to self-driving techniques that make real-time acquisition decisions based on the
87
+ real-time data streams. Such self-driving or autonomous experimentation methods14 are methods
88
+ that combine automated experimental control with on-the-fly data-driven decision making so that
89
+ an algorithm adaptively explores parameter spaces of interest and conducts new experiments until
90
+ it achieves a pre-defined completion criterion15. These methods therefore have the potential to
91
+ not only remove the need for constant human supervision and intervention in experiments, but
92
+ also make optimal choices in parameter spaces that are too large for humans to easily contextual-
93
+ ize. As such, they have the potential to revolutionize experimental design in many scientific fields
94
+ including the field of imaging and materials characterization.
95
+ In general, the use of data-driven priors to direct future experiments is a Bayesian search prob-
96
+ lem, for which the use of off-the-shelf deep learning methods usually do not suffice16. Specific
97
+ to microscopy, a popular Bayesian search approach is to use unsupervised (without pre-training)
98
+ Gaussian Processes (GPs) that could continuously determine the spatial locations that we are most
99
+ uncertain about, then direct the scanning to these locations17–22. While GPs are powerful tech-
100
+ niques, their computational cost tends to scale cubically with the number of points acquired. The
101
+ decision making time increases during the experiment and quickly exceeds the acquisition time
102
+ for the measurement itself. The development of scalable GPs is a significant area of research, but
103
+ these methods are not yet ready for application in large-scale imaging problems23. General super-
104
+ vised alternatives such as reinforcement learning can be powerful and fast, but they often require
105
+ costly pre-training and tend to ignore the global state of the parameter space in exchange for a
106
+ local search; as such they have only found limited traction for scanning imaging modalities24.
107
+ Specifically for scanning microscopy applications, Godaliyadda et al.25 have proposed to
108
+ achieve computationally efficient autonomous sampling with the Supervised Learning Approach
109
+ for Dynamic Sampling (SLADS) technique. The SLADS technique uses curated feature maps
110
+ to quantify the current measurement state and predict the total image quality improvement ob-
111
+ tained by measuring a given point, thereby informing the choice of which point to measure next.
112
+ Variations of this technique have found applications in live steering for dose-efficient crystal posi-
113
+ 5
114
+
115
+ tioning for crystallography26, and for imaging with transmission electron microscopy 27 and mass
116
+ spectrometry28 methods. These works, however, either involve training with and reconstruction
117
+ of binary images only26,27, or, require extensive training with images closely related to the sample
118
+ under study28. As such, they are difficult to translate to imaging settings with more complex im-
119
+ ages, particularly for imaging without any prior assumptions about the sample. Meanwhile, Zhang
120
+ et al.29 have incorporated a neural network (NN) within the SLADS method (for the SLADS-Net
121
+ method) and shown in numerical experiments that it is sufficient to train the method on only a
122
+ generic image, eschewing any prior knowledge about the sample, to produce high-fidelity image
123
+ with sparse sampling. However, this has not yet been demonstrated in experiment.
124
+ In this work, we report the Fast Autonomous Scanning Toolkit (FAST) that combines the
125
+ SLADS-Net method, a route optimization technique, and efficient and modular hardware controls
126
+ to make on-the-fly sampling and scan path choices for synchrotron-based scanning microscopy.
127
+ This method relies on sample-agnostic training to dynamically measure and reconstruct a com-
128
+ plicated (non-binary) sample, distinguishing this toolkit from existing SLADS-based workflows.
129
+ Moreover, its computational cost is negligible compared to the acquisition time even when run on
130
+ a low-power edge computing device placed at a synchrotron beamline, which presents a signifi-
131
+ cant advantage over more generic autonomous experimentation techniques. These characteristics
132
+ enable the application of our workflow in the high-precision nanoscale scanning x-ray microscopy
133
+ instrument present at the hard x-ray nanoprobe beamline at the Advanced Photon Source.
134
+ We validate the FAST scheme through real time demonstration at the hard x-ray nanoprobe
135
+ beamline at the APS30. A few-layer exfoliated two-dimensional WSe2 thin film was chosen as a
136
+ representative example; the preparation process for the thin film often leaves microscopic air bub-
137
+ bles trapped underneath the thin film, deforming the 2D material. We show that an adaptive scan
138
+ of < 25% of the sample is sufficient to produce a high-fidelity reconstruction that identifies all the
139
+ bubbles within the field of view, and even to acquire quantitative information about the film curva-
140
+ ture induced by these bubbles. The scheme quickly identifies the deformed part of the 2D material
141
+ and focuses its attention there, while ignoring regions of the film that are flat and homogeneous.
142
+ Film curvature reconstructed from the adaptive scan (< 25% coverage) is consistent with that re-
143
+ 6
144
+
145
+ FIG. 1. (Artist’s representation) The APS synchrotron produces a coherent x-ray beam that is focused using
146
+ a zone plate setup. It strikes a WSe2 film (green) exfoliated onto a Si substrate (blue), which generates
147
+ diffraction patterns that are collected by a two-dimensional detector. Above the bubbles, the lattice of the
148
+ film rotates, shifting the diffracted intensities away from its nominal positions. The beam position as well
149
+ as the detector acquisition are autonomously controlled by the FAST AI-based workflow.
150
+ constructed from full-grid scan (100% coverage). Given these characteristics, the FAST scheme
151
+ can be directly applied in other scanning techniques and instruments at the APS and elsewhere,
152
+ and may underpin the development of many multi-level experimental studies.
153
+ 7
154
+
155
+ II.
156
+ RESULTS
157
+ Figure 1 shows the experimental setup that scans a focused x-ray beam on a sample while ac-
158
+ quiring a two-dimensional diffraction image at each point. The live demonstration was performed
159
+ on a few-layer WSe2 sample with the detector placed along the 008 Bragg peak, with 2θ = 43.1°
160
+ at 10.4 keV. The diffraction patterns were processed on the detector computer (see Methods) to
161
+ generate the integrated intensities for use in the FAST workflow. The final output of the workflow
162
+ is a dark-field image of the WSe2 sample.
163
+ A.
164
+ Self-driving scanning microscopy workflow
165
+ Figure 2A broadly illustrates the FAST workflow for the experiments reported here. To ini-
166
+ tiate the workflow, a low-discrepancy quasi-random selection (generated using the Hammersely
167
+ sequence31) of sample position is measured corresponding to 1% of the total area of interest. The
168
+ integrated intensities of the measurements are transferred to the edge device, an NVIDIA Jetson
169
+ Xavier AGX32 located adjacent to the detector, which used Inverse Distance Weighted (IDW) in-
170
+ terpolation to estimate the dark-field image. The estimated image serves as input for the decision-
171
+ making step whereby the prospective measurement points are identified.
172
+ This self-driving workflow adopts the Supervised Learning Approach for Dynamic Sampling
173
+ using Deep Neural Networks (SLADS-Net) algorithm29 to find the prospective measurement
174
+ points. In effect, the SLADS-Net algorithm uses the current measurements to identify the best
175
+ unmeasured points that, when added to the existing dataset, would have the greatest effect on the
176
+ quality of the reconstructed image. As illustrated in Figure 2B, this is accomplished by, first,
177
+ representing each unmeasured point as a feature vector with elements that depend on the mea-
178
+ surement state in the neighborhood of the point. These feature vectors are used as input for a
179
+ pre-trained multi-layer perceptron. The neural network then predicts the expected reduction in
180
+ distortion (ERD), a metric (loosely speaking) for the expected improvement in the reconstruction
181
+ quality obtained from measuring this unmeasured point, individually for each unmeasured point.
182
+ The original SLADS-Net algorithm simply uses the unmeasured point with the highest ERD for
183
+ 8
184
+
185
+ COMPUTING CANDIDATES
186
+ OPTIMIZING PATH
187
+ INITIAL ESTIMATION
188
+ INITIAL MEASUREMENTS
189
+ AI @ EDGE
190
+ FINAL RESULT
191
+ NEW ESTIMATION
192
+ NEW MEASUREMENT
193
+ 𝒓
194
+ Generate
195
+ Features
196
+ A
197
+ B
198
+ FIG. 2. The FAST workflow: (A) A set of random initial measurements are transferred to the edge device
199
+ which sequentially generates an initial sample estimate, computes the candidate points to be measured
200
+ next, and calculates the travel path for the measurement. The new measurements are combined with the
201
+ existing measurements and used to calculate a new estimate, and the process is repeated until it achieves
202
+ a completion criterion. (B) The candidate computation starts by examining the local neighborhood (with
203
+ radius r) of each unmeasured point P, with the highlighted points indicating points already measured, to
204
+ generate a 6-dimensional feature vector. The feature vector is transformed to a 50-dimensional vector using
205
+ the Radial Basis Function (RBF) kernel and used as input to a multi-layer NN. The NN then predicts the
206
+ expected improvement in the image (ERD) from measuring the point P. A set of unmeasured pixels with
207
+ the highest ERD are selected as candidates for the next measurement.
208
+ 9
209
+
210
+ 5 hidden layers
211
+ inputs
212
+ 50 nodes per layer
213
+ V1
214
+ output
215
+ ERD
216
+ 02
217
+ V50nVIDIAthe next measurement, and repeats this procedure pointwise. In practice, if the measurement pro-
218
+ cedure and the motor movements are fast, then the ERD calculation also has to be commensurately
219
+ fast to reduce the dead-time in the experiment. In this work, we mitigate this requirement by in-
220
+ stead selecting a batch of points that have the highest ERD, sorted in descending order—we found
221
+ that a batch of 50 points adequately minimized the experimental dead-time while still ensuring
222
+ that the overall measurement was adequately sparse.
223
+ The coordinates of these 50 points are passed on to a route optimization algorithm, based
224
+ on Google’s OR-Tools33, to generate the shortest path for the motors to visit all of the them.
225
+ This path is appended to the look-up table in the EPICS34 scan record, which then kicks off the
226
+ data acquisition. Henceforth, the scan is automatically paused after every 50 points, raising a
227
+ flag which event triggers a callback function on the edge device. There, a new estimated dark
228
+ field image of the sample is generated, and the coordinates for the next 50 prospective points are
229
+ computed. The scan is resumed after the EPICS scan record receives the new coordinates for the
230
+ optimized scanning path. The actual scanning of the focused x-ray beam is achieved by moving
231
+ two piezoelectric linear translation motors in step mode. The detector exposure time is set to 0.5 s
232
+ and comes with an overhead of 0.2 s.
233
+ For the 200×40 pixels object described in Section II C, the workflow required ≈0.15 s to
234
+ compute the new positions, ≈42 s to scan the set of 50 positions, and a total of ≈0.37 s to process
235
+ the diffraction patterns and communicate the measurements. This represents an overhead of ⪅ 2%.
236
+ The workflow is currently entirely CPU-bound, relying on the on-board 8-core ARM CPUs, and
237
+ does not take advantage of the GPU bundled into the NVIDIA AGX device. In the future, we
238
+ expect to perform the computation in a parallelized and asynchronous fashion, which would further
239
+ reduce this overhead. These timing results showcase the rapid data-driven decision-making ability
240
+ that is characteristic of the FAST workflow.
241
+ We also note that, for all the results reported in this work, the underlying NN was trained on a
242
+ single generic image with no relation to microscopy. For details about the SLADS-Net algorithm
243
+ and the sample-agnostic training procedure, the reader is referred to the Methods section.
244
+ 10
245
+
246
+ B.
247
+ Numerical demonstration for scanning dark-field microscopy
248
+ Reconstructions (10% scan )
249
+ True
250
+ Measured points
251
+ Raster grid (RG)
252
+ LD random (LDR)
253
+ FAST
254
+ A
255
+ B
256
+ C
257
+ D
258
+ E
259
+ G
260
+ H
261
+ I
262
+ F
263
+ FIG. 3. Numerical comparison of sampling methods: (A) shows the ground truth with the color scale
264
+ representing the normalized intensity, (B-D) show respectively the RG, LDR, and FAST reconstructions at
265
+ 10% scan coverage, and (G-I) show the actual scan points that produce these reconstructions. (E-F) show
266
+ the evolution of the NRMSE (lower is better) and SSIM (higher is better) as a function of the scan coverage.
267
+ The FAST reconstruction stabilizes at 27% coverage while the other techniques take significantly longer to
268
+ reach the same quality.
269
+ We first validated the performance of the proposed workflow through a numerical experiment
270
+ on a set of pre-acquired dark-field microscopy data. Here, we compared the FAST sampling with
271
+ three static sampling techniques:
272
+ 1. Raster grid (RG) For a test sampling percentage, we generated a equally spaced raster grid
273
+ that provides a uniform coverage of the sample.
274
+ 2. Uniform random (UR) sampling The measurement pixels were drawn from a uniform
275
+ 11
276
+
277
+ LDR
278
+ UR
279
+ RG
280
+ FAST150 μm.
281
+ ·
282
+ ..
283
+ :random distribution.
284
+ 3. Low-discrepancy (LDR) random sampling For each measurement percentage, we gener-
285
+ ated a low-discrepancy sampling grid using the quasi-random Hammersly sequence.
286
+ The test dataset is a dark field image of size 600×400 pixels which represents 240,000 possible
287
+ measurement positions. This covers a physical area of 900 µm×600 µm and encloses multiple
288
+ flakes of WSe2 with various thicknesses, with the thicker regions associated with regions of higher
289
+ brightness in the image (Figure 3). At this spatial resolution, only medium and large sized bubbles
290
+ (with diameter > 2 um) can be observed. As explained previously, the bubbles deform the surface
291
+ and shift the Bragg peak of the 2D materials away from their theoretical (flat region) positions,
292
+ resulting in regions of darker contrast. Finally, the image also contains flake-free regions that have
293
+ zero integrated intensities.
294
+ For this comparison, we first initialized the FAST sampling with a 1% measurement coverage
295
+ (as described above), then successively measured 50 additional points at iteration. For each FAST
296
+ measurement, we also generate RG, UR, and LDR measurement masks with the same number
297
+ of scan points. In this fashion, we generate a sequence of sampling masks and the associated
298
+ reconstructions until we achieve 100% sampling.
299
+ We present the numerical results in Figure 3, where we show a comparison of the various meth-
300
+ ods at 10% sampling. Note that while the proposed method internally uses the fast IDW algorithm
301
+ for the inpainting, the final images presented here are calculated using the higher quality bihar-
302
+ monic inpainting technique35. The uniform random scheme performs worse than the LD-random
303
+ and raster grid schemes and is not shown in the figure. In Figure 3A-D, we can see that the FAST
304
+ sampling is able to reproduce with high fidelity the flake boundaries, the bubbles, and the regions
305
+ of transition between the varying levels of thicknesses. In contrast, the LDR and raster schemes
306
+ produce much lower quality reconstructions of these features. Figure 3E shows an evolution of the
307
+ normalized root mean squared error (NRMSE) and fig. 3F the structural similarity metric (SSIM)
308
+ (which measures multiscale perceptual similarity) for the different sampling techniques. It is ev-
309
+ ident that FAST produces high quality reconstructions at much lower measurement percentages
310
+ 12
311
+
312
+ than the examined static sampling techniques. We note that the result could be further improved
313
+ in the future by using a more sophisticated inpainting technique within the FAST method. To un-
314
+ derstand how FAST outperforms the other methods under the same sampling condition, we show
315
+ the actual measured positions of the various schemes at 10% coverage (Figure 3G-I). FAST pref-
316
+ erentially samples the regions with significant heterogeneity over the homogeneous regions. This
317
+ is particularly useful for sparse samples, where the time spent sampling from empty regions adds
318
+ little additional information.
319
+ C.
320
+ Experimental demonstration
321
+ Full scan image
322
+ FAST reconstructions
323
+ Measured points
324
+ A
325
+ C
326
+ E
327
+ G
328
+ B
329
+ D
330
+ F
331
+ Measurements between 15-20%
332
+ H
333
+ FIG. 4. Evolution of the FAST scan: (A, C, E) show the reconstruction at 5%, 15%, and 20% reconstructions
334
+ respectively, (B, D, F) show the corresponding actual measurement points. (G) shows the image obtained
335
+ through a full-grid pointwise scan. The color scale in (A-G) show the normalized intensities. (H) shows
336
+ only the points sampled between 15% and 20% coverage.
337
+ We next demonstrate the application of the FAST workflow in a live experiment at a syn-
338
+ chrotron beamline. A video showing the sampling, recorded live during the actual experiment, is
339
+ available here36. Other than starting the workflow scripts at the beginning, the entire experiment
340
+ was unmanned and fully automated. In order to measure the deformed WSe2 flakes in details, a
341
+ higher spatial resolution of 100 nm was chosen. This limits the field of view to 20 µm×4 µm for
342
+ 13
343
+
344
+ 5%:15%+++20%2 μma scan point density of 200×40 points.
345
+ In Figure 4 we show the reconstructed dark field image (subplots A,C,E) and the measurement
346
+ points (subplots B,D,F) from 5 % to 20 % coverage and compare them to that obtained from raster
347
+ scanning the sample with 100% coverage(subplot G). We see that the FAST method identifies
348
+ some of the regions of hetereogeneity — the edges of the bubbles — and starts to preferentially
349
+ sample these regions within 5 % coverage of the sample. At 15 % coverage, these regions are
350
+ extensively sampled. The reconstruction does not change significantly between 15 % to 20 %,
351
+ indicating that the reconstruction has stabilized. Moreover, the 20 % reconstruction also contains
352
+ sharp and accurate reproductions of all the major features present in the full scan image.
353
+ A point of interest is that the partially scanned bubble at the bottom right corners of Figure 4E-
354
+ G shows up only in the 20% scan, and not in the 15% scan. To explain this, we note that the
355
+ 5% scan, and therefore the initial 1% random sampling, does not contain any measurements in
356
+ the neighborhood of this bubble. The FAST scheme favors exploitation of regions it knows to
357
+ be heterogeneous over exploration of this fully unknown region, and therefore only explores this
358
+ region much later in the measurement process (Figure 4H). This is, in fact, an instance of the
359
+ general exploration-exploitation tradeoff that exists in all Bayesian search procedures37. Potential
360
+ mitigation steps could be to sample more initially (say 5% random points), or to deliberately
361
+ introduce diversity into each batch of measurement points.
362
+ So far we have reduced the diffraction image measured at each point to one single quantity
363
+ (integrated intensity) in order to guide the automated experiment. These images often need to
364
+ be reprocessed after the experiment to extract additional physically relevant results. Notably, the
365
+ intensity distribution in the diffraction patterns contains information about the strain as well as
366
+ the rotation of the crystal lattice, and in this case, the curvature of the 2D materials due to the
367
+ bubbles underneath. A simple center of mass calculation in the X direction (CoMx) would yield
368
+ the magnitude of the film curved in the XZ plane. The curvature (deviation of the CoMx from its
369
+ nominal value) is the smallest around the center of the bubble and the largest at the edge. It also
370
+ changes sign going from the left side to the right side. Center of mass calculation in the Y direction
371
+ yields the magnitude of the film curved in the YZ plane. The results look slightly different from
372
+ 14
373
+
374
+ the CoMx calculations due to the way the shifted Bragg peak intersects with the Ewald’s sphere.
375
+ Figure 5A and B shows respectively the CoMx and CoMy obtained from raster scan with 100%
376
+ coverage on the area of interest. The unit is the number of pixel shift, relative to the center of the
377
+ nominal diffraction pattern. Figure 5C and B shows respectively the CoMx and CoMy obtained
378
+ with FAST. The curvature information of the film were faithfully reproduced despite scanning just
379
+ 20% of the entire area. For more information on the reconstruction of the CoM maps, he reader is
380
+ referred to the Methods section.
381
+ CoMx
382
+ CoMy
383
+ FAST
384
+ Full
385
+ FIG. 5. Comparison of the per measured point center of mass of the diffraction patterns between the FAST
386
+ scan at 20% coverage and full-grid scan. Subplots (A) and (B) show the inpainted COMx and COMy,
387
+ respectively, for the full-grid raster FAST scan, and subplots (C) and (D) for the FAST scan.
388
+ III.
389
+ DISCUSSION
390
+ In this work, we have showcased the FAST workflow that combines a sparse sampling algo-
391
+ rithm with route planning to drive a scanning diffraction microscopy experiment at a synchrotron
392
+ beamline. In addition to being an effective alternative to a full pointwise scan to acquire a dark-
393
+ field image of the sample, FAST also produces accurate quantitative measurements of its phys-
394
+ ical properties. For our live demonstration of a 200 points×40 points with a measurement time
395
+ of 0.5 s/point, the FAST decision-making time was negligible, leading to an overall saving of
396
+ ≈80 min (about ≈65 %) of the experiment time. This saving was facilitated by our choice to ac-
397
+ quire a batch of 50 measurements between the selection of the prospective measurement points.
398
+ This ensured that the communication time stayed negligible with no noticeable loss in the quality
399
+ of points acquired when compared to a pointwise candidate selection scheme (see Supplemental
400
+ 15
401
+
402
+ CDB
403
+ 2 μmMaterials, Fig. S1).
404
+ The generalizability of the FAST method comes from the fact that the key NN-based compo-
405
+ nent of this workflow is trained on just the standard cameraman image38, not on close analogues of
406
+ a sample of interest. While this generalizability results in a slight loss of performance of the tech-
407
+ nique , it still shows excellent sparsity performance for cases tested in previous research29,39 and
408
+ in the current work. This has the benefit that we do not need a priori knowledge of the sample. As
409
+ such, while general pre-training would be difficult to satisfy for new and expensive experiments,
410
+ the FAST approach can be used directly. Furthermore, the batch prediction and route optimization
411
+ approach we implement can also be directly applied in any application of choice. Moreover, the
412
+ experimental application of our work uses an extensible edge device and the widely used EPICS
413
+ platform for hardware control, both of which can be incorporated into any instrument even with
414
+ the SLADS-Net replaced by any other sampling strategies. For example, we could just replace the
415
+ dark-field detection procedure described here with a fluorescence counting setup and use exactly
416
+ the FAST scheme for a fluorescence-based imaging of the sample. Alternatively, since all the in-
417
+ struments at the APS rely on EPICS controls, one can perform transmission, surface scattering,
418
+ or any other 2D scanning experiment in any applicable beamline with only minor changes to the
419
+ FAST routine.
420
+ The computations in the current workflow have a time complexity of O(2N logN +kM logN),
421
+ where N is the number of measured points, M the number of unmeasured points, and k the num-
422
+ ber of nearest neighboring measurements (k = 10 in our case) that we use for the feature vector
423
+ calculations. Here, the first term accounts for the creation of the nearest neighbor K-d tree and
424
+ the second term for the nearest neighbor calculation. The remainder of the algorithm has a linear
425
+ time complexity and could be performed in parallel for the unmeasured points. We expect that it is
426
+ possible to reduce this complexity using an approximate nearest neighbor search method instead
427
+ of the K-d tree approach. As such, a GPU-based implementation that takes advantage of the par-
428
+ allelization and the approximation would likely significantly reduce the computation time. This
429
+ stands in stark contrast with the time complexity of O
430
+
431
+ N3�
432
+ (for N measured points) for Gaussian
433
+ Processes, a similarly training-free method that is widely used for autonomous experimentation.
434
+ 16
435
+
436
+ For an illustrative example, Vasudevan et al20 report a GP-based scanning microscopy experiment
437
+ where the calculation of each set of measurement candidates takes ≈6 s on an NVIDIA DGX-2
438
+ GPU for a 50×50 image; our workflow performs an equivalent calculation for a larger 200×40
439
+ image within ≈1.5 s in a low-power CPU. We note, however, that GPs remain a very powerful and
440
+ generalizable approach with a bevy of applications beyond only scanning microscopy.
441
+ We caution that our workflow suffers from three important challenges. First, it depends heav-
442
+ ily on the initial 1% random sampling to discover regions of heterogeneity. If an isolated feature
443
+ present in an otherwise homogeneous region is not partially sampled during this random sam-
444
+ pling step, then such a feature can be missed until much later in the scanning experiment (see
445
+ Figure 4H). A related second limitation is that this method produces sub-optimal reconstructions
446
+ if the sample is sufficiently heterogeneous that the data in each pixel changes significantly from
447
+ pixel to pixel throughout the image (Supplemental Material in Hujsak et al27). The third limi-
448
+ tation, more practical in nature, is that the scan paths require significant motor movement, often
449
+ including a retracing over points already measured. As such, there could exist scenarios in which
450
+ the time required for the motor movement eclipses the time required for a single measurement.
451
+ We expect to address these limitations by explicitly including a measurement-density-based term
452
+ 39 or a movement-time-based term in the candidate selection procedure40, or by using a line-based
453
+ sampling technique41.
454
+ Despite these challenges, we believe that the proposed FAST technique has great potential. It
455
+ is an ideal tool for use cases with limited sampling or dosage budgets. It can be used to isolate
456
+ regions of interest in sparse settings, to prepare for pointwise scanning in these regions. More
457
+ generally, it can be used to guide any scanning microscopy experiment where we do not need
458
+ full pointwise information. In the future, we expect to extend this method for 3D imaging, fly
459
+ scans, ptychography, and other imaging applications. We expect that these developments will
460
+ significantly enhance the efficacy of scanning microscopy experiments, bolstering their use for the
461
+ study of dynamic physical phenomena.
462
+ 17
463
+
464
+ IV.
465
+ METHODS
466
+ A.
467
+ The SLADS-Net algorithm
468
+ The SLADS-Net algorithm29 used within the FAST workflow is an adaptation of the Super-
469
+ vised Learning Approach for Dynamic Sampling (SLADS) algorithm originally developed by
470
+ Godaliyadda et al25, and the algorithms differ only in their training approaches ( Section IV B). To
471
+ explain the SLADS algorithm, we first denote the object we want to measure as A ∈ RN, where N
472
+ is the total number of pixels in the image. Further, we can denote the pixel at location 1 ≤ s ≤ N
473
+ as as so that a measurement at the location s extracts the value as; each measurement is thus
474
+ characterized by the pair (s, as). After k measurements, then, we get the k×2 measurement vector
475
+ Y k =
476
+
477
+ �������
478
+ s1
479
+ as1
480
+ s2
481
+ as2
482
+ ...
483
+ sk
484
+ ask
485
+
486
+ �������
487
+ (1)
488
+ Using these k measurements, then, we can reconstruct (e.g. via interpolation) an estimate ˆAkof the
489
+ true object A. The difference between A and ˆAk is denoted as the distortion D(A, ˆAk) and can be
490
+ calculated using any chosen metric. In the current work, we define D(A, ˆAk) to be the L2 norm:
491
+ D(A, ˆAk) = ||A− ˆAk||2.
492
+ Given the measurement Y k and the reconstruction ˆAk, a new measurement at any location s will
493
+ presumably reduce the distortion in the reconstruction. We can denote this reduction in distortion
494
+ (RD) as
495
+ Rk,s = D(A, ˆAk)−D(A, ˆAk,s)
496
+ (2)
497
+ where ˆAk,s is the reconstruction that includes the newly added measurement at s. The goal of
498
+ the SLADS algorithm is then to identify the pixel location that would maximize this reduction in
499
+ distortion:
500
+ sk+1 = argmax
501
+ s
502
+ Rk,s
503
+ (3)
504
+ 18
505
+
506
+ Of course, since we cannot know the value of the measurement as or the ground truth A, SLADS
507
+ bases its selection on the conditional expectation of reduction in distortion (ERD), which is defined
508
+ as:
509
+ Rk,s = E
510
+
511
+ Rk,s��Y k�
512
+ so that
513
+ sk+1 = argmax
514
+ s
515
+ Rk,s.
516
+ (4)
517
+ The algorithm assumes that we can compute the ERD at s based on just the measurement state Yk
518
+ as
519
+ Rk,s = g(vk,s)
520
+ (5)
521
+ where vk,s is a location-dependent feature vector calculated using the measurement state Yk. The
522
+ goal of the SLADS training procedure is to estimate the function g.
523
+ B.
524
+ Training
525
+ The training procedure for the SLADS/SLADS-Net algorithm is a supervised procedure in
526
+ which we generate a large number of (vk,s,Rk,s) pairs and use these to estimate g. Note that this
527
+ is a pixelwise computation that is performed independently for each measurement location s; for
528
+ each measurement s we have to calculate a reconstruction ˆAk,s before we can calculate the RD Rk,s.
529
+ To make this computationally tractable, the Godaliyadda et al25 use approximations that ensure
530
+ that the RD of each pixel only depends on its local neighborhood. Correspondingly, instead of
531
+ working with the full measurement state Y k, the training procedure uses carefully designed feature
532
+ vectors that capture the local neighborhood of the pixel at location s. As shown in Figure 2B, the
533
+ feature vector for the pixel P consists of six features: (i) ∇x and ∇y are the spatial gradients at
534
+ P, (ii) σ1,r and σ2,r measure the deviation of the estimated value for P from the nearby measured
535
+ values (highlighted in red), and (iii) L (which is the distance of P from the closest measured point)
536
+ and ρr measure the density of measurements around P.
537
+ The original SLADS algorithm assumes that this feature vector is linearly related to the RD,
538
+ and the training therefore is a linear regression procedure. The SLADS-Net adaptation first uses
539
+ an radial basis function (RBF) kernelization to transform the 6-dimensional feature vector to a
540
+ 19
541
+
542
+ 50-dimensional vector, then replaces the linear predictor with a nonlinear fully-connected neural
543
+ network that contains 5 hidden layers with 50 nodes each.
544
+ In this work, we train the SLADS-Net neural network on only the standard cameraman image,
545
+ without using any a priori information about the sample. For the training, we generate a mea-
546
+ surement state Y k by randomly choosing a fixed number number of measurement locations, then
547
+ calculate the feature vector vk,s and the RD Rk,s for each unmeasured pixel. We generate such
548
+ sets of training pairs for 10 different sample coverage percentages between 1% and 80%. This
549
+ overall comprises our training dataset. We use this data to train the neural network for 100 epochs
550
+ using the Adam optimizer with the learning rate 0.001. We use this trained model for all the simu-
551
+ lated and experimental measurements. We provide an example of a training measurement set—the
552
+ measured points, the interpolated reconstruction, and the corresponding RD for the unmeasured
553
+ points—in the supplemental materials (Fig. S2)
554
+ C.
555
+ Experimental measurements
556
+ At each point of the measurement, a tight region of interest (RoI) around the expected position
557
+ of the thin film Bragg peak was extracted from the corresponding diffraction image. Integrated
558
+ intensities of the RoI were used to guide the NN prediction. For the flat region, the integrated
559
+ intensity is high, showing up as brighter contrast on the dark field image. For the deformed region,
560
+ the integrated intensity is low (darker contrast on the dark field image) as the illuminated film
561
+ diffraction partially exits the selected RoI (see Supplemental Materials, Fig. S3).
562
+ For the FAST experiment, the predicted ERD and the dark-field reconstruction served as visual
563
+ guides to inform when to stop the experiment.. During the experiment, we noted that the ERD and
564
+ the reconstruction had stabilized by ≈20 % scan coverage, but we let the experiment run to ≈35 %
565
+ coverage to ensure that this behavior persisted (see Supplemental Materials, Fig. S4). While we
566
+ used this visual criterion for our exploratory experiment, it is straightforward to design a numerical
567
+ stopping criterion based on the absolute or relative convergence of the ERD, or on the per-iteration
568
+ change in the reconstructed image.
569
+ 20
570
+
571
+ DATA AND CODE AVAILABILITY
572
+ The data and code will be made available at https://github.com/saugatkandel/fast_
573
+ smart_scanning
574
+ ACKNOWLEDGMENTS
575
+ Work performed at the Center for Nanoscale Materials and Advanced Photon Source, both
576
+ U.S. Department of Energy Office of Science User Facilities, was supported by the U.S. DOE,
577
+ Office of Basic Energy Sciences, under Contract No. DE-AC02-06CH11357. We also acknowl-
578
+ edge support from Argonne LDRD 2021-0090 – AutoPtycho: Autonomous, Sparse-sampled Pty-
579
+ chographic Imaging. We gratefully acknowledge the computing resources provided on Bebop, a
580
+ high-performance computing cluster operated by the Laboratory Computing Resource Center at
581
+ Argonne National Laboratory. X.L. acknowledges support from the National Science Foundation
582
+ CBET Program under the award no. 2025214.
583
+ COMPETING INTERESTS
584
+ The authors declare that they have no competing financial interests.
585
+ REFERENCES
586
+ 1Goldstein, J. I. et al.
587
+ Scanning Electron Microscopy and X-Ray Microanalysis (Springer
588
+ New York,
589
+ New York,
590
+ NY, 2018).
591
+ URL http://link.springer.com/10.1007/
592
+ 978-1-4939-6676-9.
593
+ 2Zuo, J. M. & Spence, J. C. Advanced Transmission Electron Microscopy (Springer New York,
594
+ New York, NY, 2017). URL http://link.springer.com/10.1007/978-1-4939-6607-3.
595
+ 3Voigtländer, B. Scanning Probe Microscopy: Atomic Force Microscopy and Scanning Tunneling
596
+ Microscopy. NanoScience and Technology (Springer Berlin Heidelberg, Berlin, Heidelberg,
597
+ 2015). URL http://link.springer.com/10.1007/978-3-662-45240-0.
598
+ 21
599
+
600
+ 4Hiraki, T. N. et al. Development of an on-the-fly data processing with information lossless
601
+ compression for CITIUS detectors at SPring-8. Acta Crystallographica Section A 77, C531
602
+ (2021). URL https://doi.org/10.1107/S0108767321091583.
603
+ 5Tate, M. W. et al.
604
+ High dynamic range pixel array detector for scanning transmission
605
+ electron microscopy.
606
+ Microscopy and Microanalysis 22, 237–249 (2016).
607
+ URL https://
608
+ www.cambridge.org/core/journals/microscopy-and-microanalysis/article/abs/
609
+ high-dynamic-range-pixel-array-detector-for-scanning-transmission-electron-microscopy/
610
+ 17F33FF3721141C496EEC402F6D962E7. 1511.03539.
611
+ 6Kalinin, S. V. et al. Microscopy is All You Need (2022). URL http://arxiv.org/abs/2210.
612
+ 06526. ArXiv:2210.06526 [cond-mat].
613
+ 7Holler, M. et al. High-resolution non-destructive three-dimensional imaging of integrated cir-
614
+ cuits. Nature 543, 402–406 (2017).
615
+ 8Jiang, Y. et al. Achieving high spatial resolution in a large field-of-view using lensless x-ray
616
+ imaging. Applied Physics Letters 119, 124101 (2021). URL https://doi.org/10.1063/5.
617
+ 0067197.
618
+ 9Du, M. et al. Upscaling X-ray nanoimaging to macroscopic specimens. Journal of Applied
619
+ Crystallography 54, 386–401 (2021). URL https://journals.iucr.org/j/issues/2021/
620
+ 02/00/jo5064/. Number: 2 Publisher: International Union of Crystallography.
621
+ 10Cherukara, M. J. et al. AI-enabled high-resolution scanning coherent diffraction imaging. Ap-
622
+ plied Physics Letters 117, 044103 (2020). URL https://aip.scitation.org/doi/full/
623
+ 10.1063/5.0013065. Publisher: American Institute of Physics.
624
+ 11Chan, H. et al. Rapid 3D nanoscale coherent imaging via physics-aware deep learning. Ap-
625
+ plied Physics Reviews 8, 021407 (2021). URL https://aip.scitation.org/doi/full/10.
626
+ 1063/5.0031486. Publisher: American Institute of Physics.
627
+ 12Yao, Y. et al.
628
+ AutoPhaseNN: unsupervised physics-aware deep learning of 3D nanoscale
629
+ Bragg coherent diffraction imaging.
630
+ npj Computational Materials 8, 1–8 (2022).
631
+ URL
632
+ https://www.nature.com/articles/s41524-022-00803-w. Number: 1 Publisher: Nature
633
+ Publishing Group.
634
+ 22
635
+
636
+ 13Babu, A. V. et al. Deep learning at the edge enables real-time streaming ptychographic imaging
637
+ (2022). URL http://arxiv.org/abs/2209.09408. ArXiv:2209.09408 [cs, eess].
638
+ 14Häse, F., Roch, L. M. & Aspuru-Guzik, A. Next-Generation Experimentation with Self-Driving
639
+ Laboratories. Trends in Chemistry 1, 282–291 (2019). URL https://linkinghub.elsevier.
640
+ com/retrieve/pii/S258959741930019X.
641
+ 15Burger, B. et al. A mobile robotic chemist. Nature 583, 237–241 (2020). URL https://www.
642
+ nature.com/articles/s41586-020-2442-2. Number: 7815 Publisher: Nature Publishing
643
+ Group.
644
+ 16Vasudevan, R. K., Ziatdinov, M., Vlcek, L. & Kalinin, S. V. Off-the-shelf deep learning is not
645
+ enough, and requires parsimony, Bayesianity, and causality. npj Computational Materials 7,
646
+ 1–6 (2021). URL https://www.nature.com/articles/s41524-020-00487-0. Number: 1
647
+ Publisher: Nature Publishing Group.
648
+ 17Noack, M. M. et al. A Kriging-Based Approach to Autonomous Experimentation with Appli-
649
+ cations to X-Ray Scattering. Scientific Reports 9, 11809 (2019). URL https://www.nature.
650
+ com/articles/s41598-019-48114-3. Number: 1 Publisher: Nature Publishing Group.
651
+ 18Noack, M. M., Doerk, G. S., Li, R., Fukuto, M. & Yager, K. G.
652
+ Advances in Kriging-
653
+ Based Autonomous X-Ray Scattering Experiments. Scientific Reports 10, 1325 (2020). URL
654
+ https://www.nature.com/articles/s41598-020-57887-x. Number: 1 Publisher: Nature
655
+ Publishing Group.
656
+ 19Noack, M. M. et al. Gaussian processes for autonomous data acquisition at large-scale syn-
657
+ chrotron and neutron facilities.
658
+ Nature Reviews Physics 3, 685–697 (2021).
659
+ URL https:
660
+ //www.nature.com/articles/s42254-021-00345-y. Number: 10 Publisher: Nature Pub-
661
+ lishing Group.
662
+ 20Vasudevan, R. K. et al. Autonomous Experiments in Scanning Probe Microscopy and Spec-
663
+ troscopy: Choosing Where to Explore Polarization Dynamics in Ferroelectrics.
664
+ ACS Nano
665
+ 15, 11253–11262 (2021). URL https://doi.org/10.1021/acsnano.0c10239. Publisher:
666
+ American Chemical Society.
667
+ 23
668
+
669
+ 21Kalinin, S. V. et al.
670
+ Automated and Autonomous Experiments in Electron and Scanning
671
+ Probe Microscopy. ACS Nano 15, 12604–12627 (2021). URL https://doi.org/10.1021/
672
+ acsnano.1c02104. Publisher: American Chemical Society.
673
+ 22Garnett, R. Bayesian Optimization (Cambridge University Press, 2023). To appear.
674
+ 23Liu, H., Ong, Y.-S., Shen, X. & Cai, J. When Gaussian Process Meets Big Data: A Review of
675
+ Scalable GPs. IEEE Transactions on Neural Networks and Learning Systems 31, 4405–4423
676
+ (2020). Conference Name: IEEE Transactions on Neural Networks and Learning Systems.
677
+ 24Schloz, M., Müller, J., Pekin, T. C., Broek, W. V. d. & Koch, C. T. Deep Reinforcement Learning
678
+ for Data-Driven Adaptive Scanning in Ptychography (2022). URL http://arxiv.org/abs/
679
+ 2203.15413. ArXiv:2203.15413 [physics].
680
+ 25Godaliyadda, G. D. et al. A Supervised Learning Approach for Dynamic Sampling. Elec-
681
+ tronic Imaging 28, 1–8 (2016). URL https://library.imaging.org/ei/articles/28/
682
+ 19/art00020.
683
+ 26Scarborough, N. M. et al. Dynamic X-ray diffraction sampling for protein crystal positioning.
684
+ Journal of Synchrotron Radiation 24, 188–195 (2017). URL https://journals.iucr.org/
685
+ s/issues/2017/01/00/rv5057/. Number: 1 Publisher: International Union of Crystallogra-
686
+ phy.
687
+ 27Hujsak, K. A., Roth, E. W., Kellogg, W., Li, Y. & Dravid, V. P.
688
+ High speed/low dose
689
+ analytical electron microscopy with dynamic sampling.
690
+ Micron 108, 31–40 (2018).
691
+ URL
692
+ https://linkinghub.elsevier.com/retrieve/pii/S0968432817304821.
693
+ 28Hu, H. et al.
694
+ High-Throughput Mass Spectrometry Imaging with Dynamic Sparse Sam-
695
+ pling. ACS Measurement Science Au 2, 466–474 (2022). URL https://doi.org/10.1021/
696
+ acsmeasuresciau.2c00031. Publisher: American Chemical Society.
697
+ 29Zhang, Y. et al. SLADS-Net: Supervised Learning Approach for Dynamic Sampling using
698
+ Deep Neural Networks. Electronic Imaging 30, 131–1–1316 (2018). URL https://library.
699
+ imaging.org/ei/articles/30/15/art00006.
700
+ 30Winarski, R. P. et al.
701
+ A hard X-ray nanoprobe beamline for nanoscale microscopy.
702
+ Jour-
703
+ nal of Synchrotron Radiation 19, 1056–1060 (2012).
704
+ URL https://doi.org/10.1107/
705
+ 24
706
+
707
+ S0909049512036783.
708
+ 31Wong, T.-T., Luk, W.-S. & Heng, P.-A. Sampling with Hammersley and Halton Points. Journal
709
+ of Graphics Tools 2, 9–24 (1997). URL http://www.tandfonline.com/doi/abs/10.1080/
710
+ 10867651.1997.10487471.
711
+ 32NVIDIA Jetson AGX Xavier Developer Kit. . https://developer.nvidia.com/embedded/
712
+ jetson-agx-xavier-developer-kit. [Online; accessed 16-Nov-2021].
713
+ 33Perron, L. & Furnon, V. Or-tools. URL https://developers.google.com/optimization/.
714
+ 34Experimental Physics and Industrial Control System (EPICS). https://epics-controls.
715
+ org.
716
+ 35Damelin, S. B. & Hoang, N. S. On Surface Completion and Image Inpainting by Biharmonic
717
+ Functions: Numerical Aspects. International Journal of Mathematics and Mathematical Sci-
718
+ ences 2018, 3950312 (2018). URL https://doi.org/10.1155/2018/3950312. Publisher:
719
+ Hindawi.
720
+ 36Fast scan video.
721
+ URL https://danielzt12.github.io/latest_news/2022/10/05/
722
+ AI-enabled-smart-scanning.html.
723
+ 37Brochu, E., Cora, V. M. & de Freitas, N. A Tutorial on Bayesian Optimization of Expensive Cost
724
+ Functions, with Application to Active User Modeling and Hierarchical Reinforcement Learning
725
+ (2010). URL http://arxiv.org/abs/1012.2599. ArXiv:1012.2599 [cs].
726
+ 38Cameraman (1978). URL https://dome.mit.edu/handle/1721.3/195767.
727
+ 39Grosche, S., Koller, M., Seiler, J. & Kaup, A. Dynamic Image Sampling Using a Novel Variance
728
+ Based Probability Mass Function. IEEE Transactions on Computational Imaging 6, 1440–1450
729
+ (2020). URL https://ieeexplore.ieee.org/document/9224173/.
730
+ 40Betterton, J.-R., Ratner, D., Webb, S. & Kochenderfer, M. Reinforcement Learning for Adaptive
731
+ Illumination with X-rays. In 2020 IEEE International Conference on Robotics and Automation
732
+ (ICRA), 328–334 (2020). ISSN: 2577-087X.
733
+ 41Helminiak, D., Hu, H., Laskin, J. & Hye Ye, D. Deep Learning Approach for Dynamic Sparse
734
+ Sampling for High-Throughput Mass Spectrometry Imaging. Electronic Imaging 33, 290–1–
735
+ 290–7 (2021). URL https://library.imaging.org/ei/articles/33/15/art00007.
736
+ 25
737
+
738
+ Supplemental material
739
+ January 16, 2023
740
+ 1
741
+ arXiv:2301.05286v1 [physics.app-ph] 12 Jan 2023
742
+
743
+ A
744
+ B
745
+ Figure S1: Comparison of the FAST reconstructions for scan batch size of 1
746
+ (FAST-1) and 50 (FAST-50) as a function of the scan coverage for the numerical
747
+ simulation described in Section II.B. We observe that FAST-1 initially performs
748
+ better, with lower NRMSE and higher SSIM, than FAST-50, but this advantage
749
+ erodes quickly. We ended the FAST-1 experiment at ≈8.2 % sampling.due due to
750
+ simulation time limitations.
751
+ 2
752
+
753
+ A
754
+ B
755
+ C
756
+ Figure S2: Example of training data. (A) shows a set of randomly selected mea-
757
+ surement points. (B)shows the reconstruction calculated by interpolating these
758
+ measurements. (c) shows the ERDs calculated for the unmeasured points, with the
759
+ ERD highest at regions of hetereogeneity. The location of the measured points,
760
+ the measured values, and the reconstruction are used to generate feature vectors
761
+ for the training, and the ERDs are used as the training labels.
762
+ 3
763
+
764
+ CoMx
765
+ CoMy
766
+ Figure S3: Example of ROI selection and change in diffraction patterns around
767
+ the bubbles. (A) and (B) respectively show the CoMx and CoMy calcualted from
768
+ the FAST scan with 20% covergae, as discussed in Section II.C. The diffraction
769
+ patterns for the points marked with the ×, �, and + are shown in the bottom row.
770
+ The × point is in a region without a bubble and has the diffraction pattern at the
771
+ Bragg angle. The points marked with � and + are located at the top and bottom
772
+ edges of the bubble, and therefore show additional anomalous diffraction spots.
773
+ The dashed square boxes in the diffraction pattern figures indicate the ROI used
774
+ for the dark-field image reconstructions (shown in Figure 4 in the main paper).
775
+ The CoM calculations use the regions outside the dashed square boxes as the RoI.
776
+ 4
777
+
778
+ AB
779
+ XOX0+0.0
780
+ 0.5
781
+ 1.0
782
+ 1.5
783
+ 2.0
784
+ 2.5
785
+ ERD
786
+ 1e6
787
+ 0
788
+ 10
789
+ 20
790
+ 30
791
+ 40
792
+ 50
793
+ 60
794
+ Scan iteration
795
+ 0
796
+ 5
797
+ 10
798
+ 15
799
+ 20
800
+ 25
801
+ 30
802
+ 35
803
+ Scan coverage (%)
804
+ 20% coverage
805
+ Figure S4: Evolution in the ERD for the experimental demonstration. The ERD
806
+ initially decreases rapidly, during which point the each batch of 50 points signifi-
807
+ cantly improves the sample reconstruction. At per-iteration change in the ERD is
808
+ much smaller at 20% coverage.
809
+ 5
810
+
4tE4T4oBgHgl3EQf1A0X/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
5tAzT4oBgHgl3EQfEfox/content/2301.00993v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45bfea68cbb4ea342ce55c3725f91490b3b48ad750d46fd91ebecb84e0129a8b
3
+ size 276601
5tAzT4oBgHgl3EQfEfox/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25d5a2932237c1c1a2842d30a26e1514a943f2c4ff2a206b0909a4b78cfa84fb
3
+ size 2228269
5tAzT4oBgHgl3EQfEfox/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a9539a31c671df33902b24d26ef1bb80a10a8e53d00f301184dea9d324077ef
3
+ size 91877
79A0T4oBgHgl3EQfOf-b/content/tmp_files/2301.02162v1.pdf.txt ADDED
@@ -0,0 +1,1504 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.02162v1 [stat.ME] 5 Jan 2023
2
+ Improve Efficiency of Doubly Robust Estimator when
3
+ Propensity Score is Misspecified
4
+ Liangbo Lv∗ and Molei Liu†
5
+ Abstract
6
+ Doubly robust (DR) estimation is a crucial technique in causal inference and miss-
7
+ ing data problems. We propose a novel Propensity score Augmentved Doubly robust
8
+ (PAD) estimator to enhance the commonly used DR estimator for average treatment
9
+ effect on the treated (ATT), or equivalently, the mean of the outcome under covariate
10
+ shift. Our proposed estimator attains a lower asymptotic variance than the conven-
11
+ tional DR estimator when the propensity score (PS) model is misspecified and the
12
+ outcome regression (OR) model is correct while maintaining the double robustness
13
+ property that it is valid when either the PS or OR model is correct. These are realized
14
+ by introducing some properly calibrated adjustment covariates to linearly augment the
15
+ PS model and solving a restricted weighted least square (RWLS) problem to minimize
16
+ the variance of the augmented estimator. Both the asymptotic analysis and simula-
17
+ tion studies demonstrate that PAD can significantly reduce the estimation variance
18
+ compared to the standard DR estimator when the PS model is wrong and the OR is
19
+ correct, and maintain close performance to DR when the PS model is correct. We
20
+ further applied our method to study the effects of eligibility for 401(k) plan on the
21
+ improvement of net total financial assets using data from the Survey of Income and
22
+ Program Participation of 1991.
23
+ Keywords: Causal inference; Covariate shift correction; Propensity score; Outcome regres-
24
+ sion; Double robustness; Intrinsic efficiency.
25
+ ∗Liangbo Lv is an undergraduate student from the School of Statistics, Renmin University of China.
26
+ †Molei Liu is an assistant professor at Columbia University Mailman School of Public Health.
27
+ 1
28
+
29
+ 1
30
+ Introduction
31
+ 1.1
32
+ Background
33
+ Doubly robust (DR) estimation has attracted extensive interest in the literature on semipara-
34
+ metric theory and causal inference and is frequently used in biomedical science, economics,
35
+ and policy science studies. It incorporates two nuisance models, a propensity score (PS)
36
+ model, and an outcome regression (OR) model to characterize distributions of the expo-
37
+ sure and outcome against the adjustment covariates respectively, and draws valid inferences
38
+ when either one of them is correctly specified. It has been well-established that when both
39
+ the PS and OR models are correct, the DR estimator is semiparametric efficient and its
40
+ asymptotic variance does not really depend on the estimating equations for the nuisance
41
+ models (Tsiatis, 2006, e.g.). Nevertheless, there still remains an intriguing question on how
42
+ to improve the asymptotic efficiency of the DR estimator when one nuisance model is mis-
43
+ specified. For the scenario with correct PS and wrong OR models, there is a track of work
44
+ (Cao et al., 2009; Tan, 2010, e.g.) proposing the so-called intrinsic efficient estimator that
45
+ will be reviewed in Section 1.3. This type of estimator preserves the double robustness prop-
46
+ erty and achieves improved efficiency over the standard DR estimator when the PS model
47
+ is correct and the OR is wrong. Interestingly, we notice that the dual problem of this, i.e.,
48
+ improving the (intrinsic) efficiency of the DR estimator under wrong PS and correct OR,
49
+ is supposed to be equally important but has not been handled yet due to certain technical
50
+ reasons that will be discussed later. Aimed in this paper, filling this methodological blank
51
+ can effectively complement the existing tools for DR and semiparametric inference.
52
+ 1.2
53
+ Problem Setup
54
+ To make our idea easier to understand, we focus on a specific missing data problem: trans-
55
+ fer estimation of the outcome’s mean in the presence of covariate shift (Huang et al., 2007,
56
+ e.g.). This is also equivalent to estimating the average treatment effect on the treated (ATT)
57
+ (Hahn, 2004, e.g.) in the context of causal inference and matching-adjusted indirect com-
58
+ parison frequently conducted in biomedical studies (Signorovitch et al., 2010). Our method
59
+ could be generalized to other settings such as estimating the average treatment effect (ATE)
60
+ and transfer learning of regression models (Liu et al., 2020).
61
+ Suppose there are n labeled samples with observed outcome Y and covariates X ∈
62
+ Rd, and N unlabeled samples only observed on X. Let ∆ = 1 indicate that the sample
63
+ is labeled and ∆ = 0 otherwise.
64
+ The labeled observations (Yi, Xi) are collected from a
65
+ source population S with ∆i = 1 for i = 1, 2, . . . , n. Assume (Yi, Xi) ∼ pS(x)q(y|x) for
66
+ 2
67
+
68
+ i = 1, 2, . . . , n where pS(x) and q(y|x) represent the density of X on S and the conditional
69
+ density of Y given X = x respectively. Meanwhile, there are unlabeled samples from a target
70
+ population T indicated by ∆i = 0 and only observed on covariates Xi for i = n+1, . . . , N+n.
71
+ Assume that on T , (Yi, Xi) ∼ pT (x)q(y|x) with pT (x) representing the density of X on
72
+ T and the distribution of Y | X remaining to be the same as that on S. Our goal is to
73
+ estimate µ0 = ET Y , the marginal mean of Y on T . In the absence of observed Y on the
74
+ target samples, two simple strategies to estimate µ0 are introduced below.
75
+ (PS) Define the propensity score (PS) or density ratio between the two populations as
76
+ r0(x) = pT (x)/pS(x). Estimate r0(x) with some �r(x) and average the observed Yi
77
+ weighted by �r(Xi) over i = 1, 2, . . . , n from S.
78
+ (OR) Define the outcome regression (OR) or imputation model for Y as m0(x) = E[Y | X =
79
+ x]. Estimate m0(x) with some �m(x) obtained using the labeled samples and average
80
+ �m(Xi) over i = n + 1, . . . , n + N from T .
81
+ Both the PS and OR strategies are built upon the assumption that the distribution of Y | X
82
+ is the same between S and T so the knowledge of Y on S is transferable to T . This is in the
83
+ same spirit as the no unmeasured confounding assumption in the context of causal inference.
84
+ 1.3
85
+ Related literature
86
+ Our work is based on the doubly robust (DR) inference framework that has been fre-
87
+ quently studied and applied in the past years (Robins et al., 1994; Bang and Robins, 2005;
88
+ Kang and Schafer, 2007; Tan, 2010; Vermeulen and Vansteelandt, 2015, e.g.). It combines
89
+ the PS and OR models introduced in Section 1.2 to construct an estimator that is valid
90
+ when at least one of the two nuisance models are correct and, thus, regarded as a more ro-
91
+ bust statistical inference procedure than the simple PS and OR strategies. Early work in DR
92
+ inference (Bang and Robins, 2005; Kang and Schafer, 2007, e.g.) mainly used working low-
93
+ dimensional parametric regression to construct the PS and OR models. Recent progress has
94
+ been made to accommodate the use of high-dimensional regression or complex machine learn-
95
+ ing methods in estimating the nuisance models (Chernozhukov et al., 2018; Tan, 2020, e.g.),
96
+ which is less prone to model misspecification. We focus the scope of this paper on the low-
97
+ dimensional parametric setting that is technically less involved but more user-friendly and
98
+ less sensitive to over-fitting in practice. It is also possible and valuable to generalize our work
99
+ to the settings of high-dimensional parametric (Tan, 2020; Dukes and Vansteelandt, 2020,
100
+ e.g.) or semi-non-parametric (Liu et al., 2020) nuisance models, in which model misspecifi-
101
+ cation is still an important concern.
102
+ 3
103
+
104
+ There has risen great interest in studying and improving the asymptotic efficiency of the
105
+ DR estimator. One track of literature studied the local efficiency of the DR estimator, i.e., if
106
+ it is semiparametric efficient when both the PS and OR models are known or correctly spec-
107
+ ified. While it was shown that the standard DR estimator for the ATE (Robins et al., 1994)
108
+ achieves such local efficiency (Hahn, 1998; Tsiatis, 2006). This result cannot be directly ap-
109
+ plied to the ATT estimator because unlike ATE, the PS model of ATT is informative (or
110
+ non-ancillary) (Hahn, 1998, 2004). Shu and Tan (2018) further studied this subtle issue and
111
+ proposed locally efficient DR estimators for ATT based on its influence function.
112
+ Meanwhile, another track of literature focuses on improving the efficiency of the DR es-
113
+ timator in the presence of correct PS and potentially wrong OR models and, thus, is more
114
+ relevant to our work that also aims at automatic variance reduction under model misspec-
115
+ ification.
116
+ A class of intrinsic efficient DR estimator has been proposed for the efficient
117
+ estimation of ATE (Cao et al., 2009; Tan, 2010), ATT (Shu and Tan, 2018), casual regres-
118
+ sion model (Rotnitzky et al., 2012), longitudinal data (Han, 2016), individual treatment rule
119
+ (Pan and Zhao, 2021), etc. This type of estimator is (i) valid when either nuisance model is
120
+ correct; (ii) equivalent with the standard DR estimator when both models are correct; and
121
+ (iii) of the minimum variance under correct PS and wrong OR, among all the DR estimators
122
+ with the same parametric specification of the OR model, and, consequently, more efficient
123
+ than the standard DR estimator. In addition, it was shown that including more prognostic
124
+ covariates or auxiliary basis in the PS model can always help to reduce the variance of the
125
+ ATE estimator (Hahn, 2004; Tsiatis, 2006). Motivated by this, Cheng et al. (2020) proposed
126
+ a double-index PS estimator for ATE that smooths the treatment over the parametric PS
127
+ and OR models to achieve the DR property as well as variance reduction under correct PS
128
+ and wrong OR. Nevertheless, such a strategy may also incur over-fitting issues and cause
129
+ poor performance in finite or small sample studies (Gronsbell et al., 2022).
130
+ Although the correct PS and wrong OR setting has been frequently studied, there is still a
131
+ paucity of solutions to its dual problem, i.e., enhancing the DR estimator under the wrong PS
132
+ and correct OR. Some early work like Kang and Schafer (2007) and Cao et al. (2009) argued
133
+ that the simple OR strategy is an ideal choice when one knows the PS model is wrong since
134
+ it is free of PS weighting that may decrease the effective sample size. However, since there
135
+ are no perfect ways to examine model correctness without any additional assumptions, this
136
+ strategy can never be as robust as the DR estimator to misspecification of the OR model.
137
+ We also notice a large body of work in statistical learning and causal inference that aims
138
+ at leveraging some auxiliary data or information to boost the asymptotic efficiency of certain
139
+ estimators using the idea of augmentation. For example, Kawakita and Kanamori (2013),
140
+ Chakrabortty et al. (2018) and Azriel et al. (2021) proposed different semi-supervised learn-
141
+ 4
142
+
143
+ ing methods that improve estimation efficiency of the linear model leveraging large unlabeled
144
+ data drawn from the same distribution as the labeled samples. Methods like Chen and Chen (2000)
145
+ and Yang and Ding (2019) utilized external data with error-prone outcomes or covariates to
146
+ construct control variate for variance reduction. These methods, as well as other examples,
147
+ rely on some auxiliary data to construct estimators that always converge to zero and are
148
+ asymptotically correlated with the target estimator. These zero estimators are then used
149
+ to augment the target estimator properly for variance reduction. Our work also adapts the
150
+ high-level idea of augmentation. But different from these methods, ours does not leverage
151
+ any auxiliary samples or knowledge and additionally cares about the need of prioritizing va-
152
+ lidity (double robustness) over statistical power. Consequently, the asymptotic behavior of
153
+ our augmented estimator actually varies according to the correctness of the nuisance models
154
+ and is more technically involved in to study.
155
+ 1.4
156
+ Our contribution
157
+ To estimate µ0 introduced in Section 1.2 efficiently, we propose a novel Propensity score
158
+ Augmented Doubly robust (PAD) estimation method that enhances the standard DR es-
159
+ timator of µ0 by linearly augmenting the PS model with some functions of X. Both the
160
+ augmentation functions and their linear coefficients are wisely and carefully constructed such
161
+ that the augmentation term always reduces the variance of the DR estimator if the PS is
162
+ wrong and the OR is correct while it automatically converges to zero if the PS is correct,
163
+ in order to avoid bias and ensure double robustness. Also, when both models are correct,
164
+ our PAD estimator becomes asymptotically equivalent to the standard DR estimator. To
165
+ our best knowledge, the proposed estimator is the first one to simultaneously have the DR
166
+ property and a smaller variance than the standard DR estimator under wrong PS and correct
167
+ OR models. Thus, our work serves as an important complement to existing DR inference
168
+ approaches, especially to the intrinsically efficient DR estimators proposed to work for the
169
+ setting with correct PS and wrong OR (Cao et al., 2009; Tan, 2010, e.g.).
170
+ 2
171
+ Method
172
+ 2.1
173
+ Doubly robust estimator
174
+ As a prerequisite of our proposal, we first introduce the standard DR estimator for µ0 un-
175
+ der the setup described in Section 1.2, which has been studied for years (Hahn, 1998, 2004;
176
+ Shu and Tan, 2018, e.g.). Following a common strategy (Bang and Robins, 2005; Shu and Tan, 2018;
177
+ Liu et al., 2020, e.g.), we form the PS and OR models as r(x) = exp(xTγ) and m(x) =
178
+ 5
179
+
180
+ g(xTα) where γ and α are model coefficients and g(·) is a known and differentiable link
181
+ function. We say that the PS (or OR) model is correct if there exists γ0 (or α0) such that
182
+ the true r0(x) = exp(xTγ0) (or m0(x) = g(xTα0)). Denote the empirical mean operator on
183
+ S and T as �ES and �ET such that
184
+ �ESa(X, Y ) = n−1
185
+ n
186
+
187
+ i=1
188
+ a(Xi, Yi),
189
+ �ET a(X, Y ) = N−1
190
+ n+N
191
+
192
+ i=n+1
193
+ a(Xi, Yi)
194
+ for any function a(·). Suppose the two nuisance estimators �γ and �α are obtained respectively
195
+ by solving the estimating equations:
196
+ �ESX exp(X
197
+ Tγ) = �ET X,
198
+ �ESX{Y − g(X
199
+ Tα)} = 0.
200
+ (1)
201
+ The estimating equations for γ in (1) is usually referred as covariate balancing (Imai and Ratkovic, 2014;
202
+ Zhao and Percival, 2017), and those for α correspond to the ordinary least square regression
203
+ when g(a) = a and the logistic regression when Y is binary and g(a) = expit(a) = ea/(1+ea).
204
+ Note that one can use alternative estimation procedures to obtain γ and α, e.g., running a
205
+ logistic regression on ∆ against X to estimate γ, and our proposed method could naturally
206
+ adapt to different choices on this.
207
+ Based on �γ and �α, the PS and OR estimators introduced in Section 1.2 can be specified as
208
+ �µPS = �ESY exp(X
209
+ T�γ) and �µOR = �ET g(X
210
+ T�α) respectively. Then the standard DR estimator
211
+ is constructed by augmenting one of them with another nuisance model:
212
+ �µDR = �ES{Y − g(X
213
+ T�α)} exp(X
214
+ T�γ) + �ET g(X
215
+ T�α).
216
+ (2)
217
+ When the PS model is correct and �γ converges to γ0, �ET g(X
218
+ T�α) − �ESg(X
219
+ T�α) exp(X
220
+ T�γ)
221
+ converges to zero and the remainder term �ESY exp(X
222
+ T�γ) is exactly the PS estimator con-
223
+ verging to µ0. Similarly, when OR is correct, we can show that �ES{Y − g(X
224
+ T�α)} exp(X
225
+ T�γ)
226
+ converges to zero and �ET g(X
227
+ T�α) converges to µ0. Thus �µDR is doubly robust in the sense
228
+ that it is consistent when either the PS or OR model is correctly and consistently estimated.
229
+ 2.2
230
+ Expansion of DR estimator under correct OR model
231
+ To help the readers understand our method more intuitively, we now heuristically derive and
232
+ analyze the asymptotic expansion of �µDR when the OR model is correctly specified. Suppose
233
+ that �γ and �α converge to some ¯γ and ¯α defined as the solutions to the population-level
234
+ estimating equations ESX exp(X
235
+ Tγ) = ET X and ESX{Y − g(X
236
+ Tα)} = 0, respectively.
237
+ Let �r(x) = exp(X
238
+ T�γ), ¯r(x) = exp(X
239
+ T¯γ), and S(α) = S(Y, X, α) = X{Y − g(X
240
+ Tα)}.
241
+ Suppose that the OR model is correct, i.e., m0(x) = g(X
242
+ Tα0) and α0 = ¯α, and n1/2(�α −
243
+ 6
244
+
245
+ ¯α, �γ − ¯γ) is asymptotically normal with mean zero following the standard M-estimation
246
+ theory (Van der Vaart, 2000). Then we have
247
+ �ES{Y − g(X
248
+ T�α)}{�r(X) − ¯r(X)} = op(n−1/2)
249
+ due to Neyman orthogonality (Neyman, 1959), which, as will be strictly proved in Section
250
+ 3, implies that �µDR defined in (2) is asymptotically equivalent with
251
+ �µDR =�ES{Y − g(X
252
+ T ¯α)}¯r(X) + �ET g(X
253
+ T ¯α)
254
+ +
255
+
256
+ �ES{g(X
257
+ T ¯α) − g(X
258
+ T�α)}¯r(X) + �ET {g(X
259
+ T�α) − g(X
260
+ T ¯α)}
261
+
262
+ ≈�ES{Y − g(X
263
+ T ¯α)}¯r(X) + �ET g(X
264
+ T ¯α) + L
265
+ T�ESX{Y − g(X
266
+ T ¯α)},
267
+ where L = − ¯H−1 {ESX ˙g(X
268
+ T ¯α)¯r(X) − ET X ˙g(X
269
+ T ¯α)}, ¯H = ESXX
270
+ T ˙g(X
271
+ T ¯α), and ˙g(a) is
272
+ the derivative of g(a). To derive the above result, we use the standard asymptotic expansion
273
+ of �α given by our Lemma B3 in Appendix, and the symbol “≈” indicates that the difference
274
+ between the two lines is up to op(n−1/2) and, thus, asymptotically negligible. So when OR
275
+ is correct, the asymptotic variance of n1/2(�µDR − µ0) is equal to that of n1/2(�µDR − µ0), which
276
+ can be expressed as
277
+ aVar{n1/2(�µDR − µ0)} = ES{¯r(X)}2v(X) + 2L
278
+ TESX¯r(X)v(X) + C,
279
+ (3)
280
+ where v(x) = Var(Y | X) and C is some positive constant free of ¯r(·) and, thus, needs not
281
+ to be considered in the following derivation. Note that when the PS model also is correct,
282
+ i.e., ¯r(·) = r0(·), we further have L = 0.
283
+ Empirically, term L in (3) can be estimated by
284
+ �L = − �H−1 �
285
+ �ESX ˙g(X
286
+ T�α) exp(X
287
+ T�γ) − �ET X ˙g(X
288
+ T�α)
289
+
290
+ ,
291
+ (4)
292
+ where �H = �ESXX
293
+ T ˙g(X
294
+ T�α). Estimation of v(x) relies on our working assumption on the
295
+ form of Var(Y | X). For example, one may assume Y = m0(X) + ǫ where ǫ ∼ N(0, σ2)
296
+ so v(x) is invariant of x and can be simply imputed with the moment estimator of σ2.
297
+ Also, for the common Poisson model Y ∼ Poisson{exp(X
298
+ Tα0)} and logistic model Y ∼
299
+ Bernoulli{expit(X
300
+ Tα0)}, one can naturally estimate v(x) by exp(xT�α) and expit(xT�α){1 −
301
+ expit(xT�α)} respectively. To preserve generality, we introduce a working model vθ(x) for
302
+ v(x) with some nuisance parameter θ to be estimated as �θ that could be partially or fully
303
+ determined by �α. Suppose that �θ converges to some ¯θ. As will be shown in Section 3,
304
+ violation of this conditional variance model, i.e., v(x) ̸= v¯θ(x) does not impact the double
305
+ robustness of our proposed estimator but only affects its efficiency gain when PS is wrong
306
+ and OR is correct.
307
+ 7
308
+
309
+ 2.3
310
+ PAD estimator
311
+ Now we formally introduce the propensity score augmented doubly robust (PAD) estimator.
312
+ Our central idea is to augment the PS model ¯r(X) = exp(X
313
+ T¯γ) as ¯raug(X; β) = exp(X
314
+ T¯γ)+
315
+ Ψ
316
+ Tβ and use ¯raug(·) to replace ¯r(·) in the DR estimator. Here Ψ is some properly constructed
317
+ basis function of X and β is some loading coefficient vector to be estimated. We first describe
318
+ the empirical construction procedures for PAD in Algorithm 1 and then discuss the reason
319
+ and intuition of the key steps in this algorithm.
320
+ Algorithm 1 Propensity score Augmented Doubly robust (PAD) estimation
321
+ [Step 1] Solve the estimating equations in (1) to obtain �γ and �α, and obtain the conditional
322
+ variance estimator as �θ.
323
+ [Step 2] Specify Φ = φ(X) of larger dimensionality than X using any basis function φ(·),
324
+ and take �Ψ = Φ − �ET [Φv�θ(X)]/�ET v�θ(X).
325
+ [Step 3] Solve the restricted weighted least square (RWLS) problem:
326
+ �β = argminβ �Vµ(β),
327
+ s.t.
328
+ �ESX ˙g(X
329
+ T�α) �Ψ
330
+ Tβ = 0,
331
+ (5)
332
+ where
333
+ �Vµ(β) = �ES{exp(X
334
+ T�γ) + �Ψ
335
+ Tβ}2v�θ(X) + 2�L
336
+ T�ESX{exp(X
337
+ T�γ) + �Ψ
338
+ Tβ}v�θ(X),
339
+ (6)
340
+ and �L is as defined in equation (4).
341
+ [Step 4] Obtain the PAD estimator through
342
+ �µPAD = �ES{Y − g(X
343
+ T�α)}{exp(X
344
+ T�γ) + �Ψ
345
+ T�β} + �ET g(X
346
+ T�α).
347
+ For heuristic analysis, suppose that all estimators used in (6) converge to their limiting
348
+ values. Then let Ψ = Φ −ET [Φv¯θ(X)]/ET vθ(X) be the limits of �Ψ, ¯β the limits of �β, with
349
+ its specific form given by Lemma B1 in Appendix, and
350
+ Vµ(β) = ES{exp(X
351
+ T¯γ) + Ψ
352
+ Tβ}2v¯θ(X) + 2L
353
+ TESX{exp(X
354
+ T¯γ) + Ψ
355
+ Tβ}v¯θ(X)
356
+ the limiting function of �Vµ(β) specified in Algorithm 1. We shall consider two scenarios
357
+ separately to demonstrate that our proposed PAD estimator not only maintains double
358
+ robustness property but also has a lower asymptotic variance than �µDR when the OR model
359
+ is correctly specified and PS is wrong. Rigorous justification for these results will be provided
360
+ in Section 3.
361
+ 8
362
+
363
+ Correct PS model.
364
+ When the PS model is correct, we easily have L = 0 as stated in
365
+ Section 2.2 so Vµ(β) = ES{exp(X
366
+ T¯γ) + Ψ
367
+ Tβ}2v¯θ(X), and
368
+ ∂Vµ(β)
369
+ ∂β
370
+ = ESΨ exp(X
371
+ T¯γ)v¯θ(X) = ET Ψv¯θ(X).
372
+ By definition of Ψ, we have ET Ψv¯θ(X) = 0, as ensured by the mean shift of Φ in Step
373
+ 2 of Algorithm 1. Thus, β = 0 minimizes Vµ(β) and consequently, is the solution of the
374
+ population-level version of the RWLS problem (5) since the linear constraints in (5) is trivially
375
+ satisfied by β = 0. This implies that as long as the PS model is correct, �β converges to 0
376
+ so the augmented PS estimator exp(X
377
+ T�γ) + �Ψ
378
+ T�β converges to the correct PS model, which
379
+ ensures �µPAD to converge to the true µ0. Meanwhile, it is clear that the augmentation of PS
380
+ does not change the OR model at all. Therefore, �µPAD preserves the same DR property as
381
+ �µDR, i.e., being (root-n) consistent whenever the PS or the OR model is correctly specified.
382
+ Correct OR and wrong PS.
383
+ Note that �µPAD = �µDR + �ES �Ψ
384
+ T�β{Y − g(X
385
+ T�α)} and when
386
+ the OR model is correct,
387
+ �ES �Ψ
388
+ T�β{Y − g(X
389
+ T�α)} =�ES �Ψ
390
+ T�β{Y − g(X
391
+ Tα0)} + �ES �Ψ
392
+ T�β{g(X
393
+ Tα0) − g(X
394
+ T�α)}
395
+ ≈�ESΨ
396
+ T¯β{Y − g(X
397
+ Tα0)} + �ES(α0 − �α)
398
+ TX ˙g(X
399
+ T�α) �Ψ
400
+ T�β,
401
+ (7)
402
+ in which we use the orthogonality between �Ψ
403
+ T�β − Ψ
404
+ T¯β and Y − g(X
405
+ Tα0) on the first term,
406
+ as well as expansion on g(X
407
+ Tα0) − g(X
408
+ T�α) in the second term of the first line, to derive
409
+ the “≈” relation shown in the second line. Here, “≈” in (7) again means that the difference
410
+ between the first and second line is up to op(n−1/2) and, thus, becomes asymptotically
411
+ negligible.
412
+ In addition, according to the moment constraint in the RWLS problem (5),
413
+ �ESX ˙g(X
414
+ T�α) �Ψ
415
+ T�β converges to 0.
416
+ So the second term in the second line of (7) is also
417
+ negligible and �µPAD ≈ �µDR + �ESΨ
418
+ T¯β{Y − g(X
419
+ Tα0)}. Combining this with equation (3) as
420
+ well as the asymptotic equivalence between �µDR and �µDR discussed in Section 2.2, we have
421
+ aVar{n1/2(�µPAD − µ0)} = ES{¯r(X) + Ψ
422
+ T¯β}2v(X) + 2L
423
+ TESX{¯r(X) + Ψ
424
+ T¯β}v(X) + C, (8)
425
+ which, after dropping the invariant C, is equal to Vµ(¯β), the limiting value of the minimized
426
+ objective function �Vµ(�β) in the RWLS problem (5). Note that β = 0 is always feasible to
427
+ the linear constraint in (5) and if we simply replace ¯β with 0 in the right-hand side of (8),
428
+ it reduces to the asymptotic variance of n1/2(�µDR − µ0) derived in (3). Meanwhile, when the
429
+ PS model is wrong, ∂Vµ(β)/∂β is typically not 0 at β = 0 so the population-level minimizer
430
+ ¯β ̸= 0. Thus, aVar{n1/2(�µPAD − µ0)} ≤ aVar{n1/2(�µDR − µ0)} when the OR model is correct
431
+ and the strict “<” will hold in general when the PS model is wrong.
432
+ 9
433
+
434
+ 3
435
+ Asymptotic analysis
436
+ In this section, we rigorously present the asymptotic properties of the proposed PAD estima-
437
+ tor and compare PAD with the standard DR estimator. We first introduce some mild and
438
+ common regularity assumptions. Without loss of generality, we assume that n/N = O(1) so
439
+ the desirable parametric rate of the DR estimators will be O(n−1/2).
440
+ Assumption 1. The supports of X and Φ are compact and EY 4 < ∞.
441
+ Assumption 2. The link function g(·) is differentiable with derivative ˙g(·) and there exists
442
+ a constant L such that |˙g(x1) − ˙g(x2)| < L|x1 − x2| for all x1, x2 ∈ R.
443
+ Assumption 3. The dimension of Ψ is larger than that of X. Matrices ES{ΨΨ
444
+ Tv¯θ(X)},
445
+ ES{XX
446
+ T exp(X
447
+ T¯γ)}, ES{XX
448
+ T ˙g(X
449
+ T ¯α)} and ES{ΨX
450
+ T ˙g(X
451
+ T ¯α)} have all their eigenvalues
452
+ bounded and staying away from zero.
453
+ Assumption 4. The conditional variance function vθ(x) is differentiable on θ with a bounded
454
+ partial derivative ∂θvθ(x). The estimator �θ converges to some ¯θ in probability and satisfies
455
+ that n1/2(�θ − ¯θ) is asymptotic normal with mean zero.
456
+ Remark 1. Assumptions 1–3 are all mild, standard, and commonly used to justify the
457
+ asymptotic properties of M-estimation (Van der Vaart, 2000). Note that in Assumption 3,
458
+ we take Ψ to have larger dimension than X and make regularity conditions on ES{XX
459
+ T ˙g(X
460
+ T ¯α)}
461
+ and ES{ΨX
462
+ T ˙g(X
463
+ T ¯α)}. These are to ensure that �β is not zero and properly converges to ¯β.
464
+ Assumption 4 constrains the way of specifying vθ(x) and estimating θ. Under Assumptions
465
+ 1–3, this assumption is satisfied when either θ is fully determined by α, e.g., in a Pois-
466
+ son or logistic model for Y against X, or when θ is estimated by additionally fitting some
467
+ parametric model of Var(Y | X) against X.
468
+ Now we present the main results about the robustness and efficiency of our proposed PAD
469
+ estimator in Theorem 1 with its proof given in Section B of the Appendix. Some important
470
+ heuristics of this theorem has already been discussed in Section 2.3.
471
+ Theorem 1. Under Assumptions 1–4, it holds that
472
+ (i) Double robustness. When either the PS or the OR model is correctly specified, i.e.,
473
+ r0(x) = exp(xTγ0) for some γ0 or m0(x) = g(xTα0) for some α0, �µPAD
474
+ p−→ µ0 and
475
+ n1/2(�µPAD − µ0) weakly converges to some normal distribution with mean zero.
476
+ (ii) Variance reduction under wrong PS. When the OR model is correct while the
477
+ PS model may be misspecified, the asymptotic variance of n1/2(�µPAD − µ0) is always
478
+ 10
479
+
480
+ not larger than that of n1/2(�µDR − µ0). Further when ¯β ̸= 0 (the explicit form of ¯β is
481
+ given in Lemma B1), n1/2(�µPAD − µ0) has a strictly smaller asymptotic variance than
482
+ n1/2(�µDR − µ0).
483
+ (iii) Equivalence under correct PS and OR. When both the PS and OR models are
484
+ correct, n1/2(�µPAD − µ0) and n1/2(�µDR − µ0) are asymptotically equivalent and have the
485
+ same asymptotic variance.
486
+ 4
487
+ Simulation study
488
+ We conducted simulation studies to evaluate our proposed estimator and compare it with
489
+ the standard DR estimator.
490
+ In our studies, we generate covariates X = (X1, X2, X3)T
491
+ from N(0, Σ) with Σ = (σij) ∈ R3×3 and σij = 0.3|i−j|. For generation of the population
492
+ assignment ∆ and outcome Y , we consider six settings, namely:
493
+ (G1) Gassuian Y , Correct PS, Correct OR. Pr(∆ = 1 | X)} = expit(X1 − 2X2 + X3)
494
+ and Y = 0.5X1 + 0.5X2 + X3 + ǫ where ǫ | X ∼ N(0, 1).
495
+ (G2) Gassuian Y , Correct PS, Wrong OR. Pr(∆ = 1 | X) = expit(X1 − 2X2 + X3)
496
+ and Y = 0.5X1 + 0.5X2 + sin(X2 + 0.5X3) + ǫ.
497
+ (G3) Gassuian Y , Wrong PS, Correct OR. Pr(∆ = 1 | X) = expit(4 + X1 + X2 + X3 −
498
+ 1.5|X1| − 1.5|X2| − |X3|) and Y = 0.5X1 + 0.5X2 + X3 + ǫ.
499
+ (L1) Binary Y , Correct PS, Correct OR. Pr(∆ = 1 | X) = expit(X1 − 2X2 + X3) and
500
+ Pr(Y = 1 | X) = expit(0.5X1 + 0.5X2 + X3).
501
+ (L2) Binary Y , Correct PS, Wrong OR. Pr(∆ = 1 | X) = expit(X1 − 2X2 + X3) and
502
+ Pr(Y = 1 | X)} = expit(0.5X1 + 0.5X2 + sin(X2 + 0.5X3))
503
+ (L3) Binary Y , Wrong PS, Correct OR. Pr(∆ = 1 | X) = expit(4 + X1 + X2 + X3 −
504
+ 1.5|X1| − 1.5|X2| − |X3|) and Pr(Y = 1 | X) = expit(0.5X1 + 0.5X2 + X3).
505
+ In Settings (G1)–(G3), Y is a gaussian variable and we fit linear models for Y ∼ X with
506
+ vθ(x) = 1. While in Settings (L1)–(L3), we fit logistic models for the binary Y against
507
+ X with vθ(x) = expit(X
508
+ Tα){1 − expit(X
509
+ Tα)}. We consider different scenarios about the
510
+ correctness of the PS and OR models to examine the robustness and efficiency of PAD.
511
+ Bootstrap is used for estimating the asymptotic variance and constructing the confidence
512
+ interval (CI). For effective variance reduction on PAD when PS is wrong, i.e. under Settings
513
+ 11
514
+
515
+ (G3) and (L3), we include in the augmentation covariates Φ a decent amount of X’s basis
516
+ functions including Xj, exp(Xj), |Xj|, exp(−Xj1 − Xj2), and exp(−X1 − X2 − X3) for all j
517
+ and j1 ̸= j2 ∈ {1, 2, 3}. We set N = n = 500 or N = n = 1000 separately and generate 1000
518
+ realizations for each setting.
519
+ Table 1 reports the absolute average bias (Bias), standard error (SE), and coverage
520
+ probability (CP) of the 95% CI of the DR and PAD estimators. When at least one nuisance
521
+ models are correct, DR and PAD attain very close bias, which is much smaller compared to
522
+ their SE and, thus, grants their CPs to be close to the nominal level. This indicates that
523
+ PAD achieves the double robustness property just like the standard DR estimator under
524
+ finite samples. To compare PAD and DR in terms of their estimation variance and efficiency,
525
+ we present in Table 2 their relative efficiency (RE) defined as Var(�µDR)/ Var(�µPAD). Under
526
+ Settings (G1), (G2), (L1), and (L2) where the PS model is correct, the two estimators show
527
+ nearly identical variance, with their REs located between 1 ±0.04. Under Settings (G3) and
528
+ (L3) with misspecified PS and correct OR models, our proposed PAD estimator shows 20%
529
+ to 40% smaller variance than the standard DR estimator. All these results demonstrate that
530
+ conclusions in Theorem 1 also apply well for finite samples. In specific, PAD performs very
531
+ closely to the standard DR when the PS model is correct and is potentially better than DR
532
+ in the presence of wrong PS models.
533
+ 5
534
+ Real example
535
+ The effects of the 401(k) program have been investigated for a long time (Abadie, 2003;
536
+ Chernozhukov et al., 2018, e.g.). Different from other plans like Individual Retirement Ac-
537
+ counts (IRAs), eligibility for 401(k) is completely decided by employers. Therefore, unob-
538
+ served personal preferences for savings may make little difference in 401(k) eligibility. How-
539
+ ever, there may be some other confounders affecting the causal studies of 401(k), such as job
540
+ choice, income, and age. To address this problem, (Abadie, 2003) and (Chernozhukov et al., 2018)
541
+ proposed to adjust for certain covariates related to job choice so that 401(k) eligibility can
542
+ be regarded exogenous.
543
+ Whether 401(k) eligibility contributes to the improvement of people’s net total financial
544
+ assets is an important topic studied in existing literature like Abadie (2003) and Chernozhukov et al. (2018).
545
+ However, whether 401(k) can improve the financial assets of those actually not eligible for
546
+ 401(k) is still an open and interesting problem. To investigate this problem, we analyze the
547
+ data from the Survey of Income and Program Participation of 1991. The data set consists of
548
+ n + N = 9275 observations. The outcome of our interests, Y is defined as the indication of
549
+ having positive net total financial assets. There are 9 adjustment covariates in X, including
550
+ 12
551
+
552
+ Table 1:
553
+ The absolute average bias (Bias), standard error (SE), and coverage probability (CP) of the 95%
554
+ confidence intervals of the DR and PAD estimators under the settings described in Section 4. All results are
555
+ produced based on 1000 repetitions.
556
+ n = N = 500
557
+ n = N = 1000
558
+ Setting
559
+ Method
560
+ Bias
561
+ SE
562
+ CP
563
+ bias
564
+ SE
565
+ CP
566
+ (G1)
567
+ DR
568
+ 0.006
569
+ 0.145
570
+ 0.94
571
+ 0.005
572
+ 0.106
573
+ 0.92
574
+ PAD
575
+ 0.005
576
+ 0.142
577
+ 0.93
578
+ 0.004
579
+ 0.105
580
+ 0.92
581
+ (G2)
582
+ DR
583
+ 0.007
584
+ 0.152
585
+ 0.92
586
+ 0.008
587
+ 0.111
588
+ 0.92
589
+ PAD
590
+ 0.005
591
+ 0.149
592
+ 0.92
593
+ 0.007
594
+ 0.112
595
+ 0.92
596
+ (G3)
597
+ DR
598
+ 0.010
599
+ 0.162
600
+ 0.93
601
+ 0.001
602
+ 0.121
603
+ 0.92
604
+ PAD
605
+ 0.005
606
+ 0.136
607
+ 0.93
608
+ 0.001
609
+ 0.105
610
+ 0.93
611
+ (L1)
612
+ DR
613
+ 0.000
614
+ 0.055
615
+ 0.92
616
+ 0.001
617
+ 0.040
618
+ 0.92
619
+ PAD
620
+ 0.001
621
+ 0.054
622
+ 0.93
623
+ 0.001
624
+ 0.040
625
+ 0.93
626
+ (L2)
627
+ DR
628
+ 0.001
629
+ 0.054
630
+ 0.92
631
+ 0.004
632
+ 0.040
633
+ 0.92
634
+ PAD
635
+ 0.001
636
+ 0.053
637
+ 0.92
638
+ 0.004
639
+ 0.040
640
+ 0.92
641
+ (L3)
642
+ DR
643
+ 0.005
644
+ 0.057
645
+ 0.91
646
+ 0.003
647
+ 0.038
648
+ 0.92
649
+ PAD
650
+ 0.005
651
+ 0.052
652
+ 0.93
653
+ 0.002
654
+ 0.035
655
+ 0.93
656
+ Table 2:
657
+ Relative efficiency (RE) between DR and PAD, i.e., Var(�µDR)/ Var(�µPAD), under the settings de-
658
+ scribed in Section 4.
659
+ n, N
660
+ (G1)
661
+ (G2)
662
+ (G3)
663
+ (L1)
664
+ (L2)
665
+ (L3)
666
+ 500
667
+ 1.04
668
+ 1.04
669
+ 1.42
670
+ 1.04
671
+ 1.04
672
+ 1.20
673
+ 1000
674
+ 1.02
675
+ 0.98
676
+ 1.33
677
+ 1.00
678
+ 1.00
679
+ 1.18
680
+ age, income, family size, years of education, benefit pension status, marriage, two-earner
681
+ household status, individual participation in IRA plan, and home ownership status. The
682
+ source (treated) samples S with ∆ = 1 are taken as those eligible for 401(k) and the target
683
+ (untreated) samples T are those without 401(k) eligibility. We applied PAD and standard
684
+ DR to estimate µ, the effect of 401(k) eligibility on improving the positive rate of net to-
685
+ tal financial assets among people without 401(k) eligibility. The PS model is specified as
686
+ exp(X
687
+ Tγ) and the OR model is expit(X
688
+ Tα). In our method, the augmentation covariates
689
+ vector Φ consists of X, exp(−0.3Xj), |Xj|, and X2
690
+ j for all Xj’s that are not binary. We
691
+ again use bootstrap to estimate SEs and construct CIs.
692
+ In Table 3, we report the point estimation, their estimated standard errors (ESE), and
693
+ 95% CIs for the treatment effect µ, obtained using the standard DR and our proposed PAD
694
+ 13
695
+
696
+ methods. Outputs of both methods indicate that 401(k) eligibility has a significant effect on
697
+ improving the rate of having positive net total financial assets among people who are actually
698
+ not eligible for 401(k). The estimated treatment effect is 0.169 (95% CI: 0.142, 0.196) by the
699
+ standard DR and 0.150 (95% CI: 0.126, 0.175) by PAD. Moreover, the ESE of our proposed
700
+ PAD estimator is remarkably smaller than that of the standard DR estimator, with their
701
+ estimated RE, i.e., Var(�µDR)/ Var(�µPAD) being around 1.25. This means our proposed PAD
702
+ method can characterize the treatment effect µ more precisely than DR in this example.
703
+ Table 3: The point estimation (PE), its estimated standard error (ESE), and 95% confidence interval (CI)
704
+ for µ, the effect of 401(k) eligibility on improving the positive rate of net total financial assets among people
705
+ without 401(k) eligibility, derived using the standard DR and the PAD methods.
706
+ Method
707
+ PE
708
+ ESE
709
+ CI
710
+ DR
711
+ 0.169
712
+ 0.0140
713
+ (0.142, 0.196)
714
+ PAD
715
+ 0.150
716
+ 0.0125
717
+ (0.126, 0.175)
718
+ 6
719
+ Discussion
720
+ In analogy to our PS model augmentation strategy, we also propose an OR model augmen-
721
+ tation strategy (OAD) that augments the OR model with some bases of X satisfying certain
722
+ moment conditions like Ψ in Algorithm 1. Description and discussion of this method are
723
+ presented in Section A of the Appendix. Similar to Theorem 1, we are able to show that
724
+ this OAD estimator is doubly robust, of a smaller variance than the standard DR estimator
725
+ when the PS model is correct but the OR model is wrong, and equivalent with DR when
726
+ both nuisance models are correct. Just like PAD, this OAD method is easy to implement
727
+ and only requires convex optimization. We notice that some existing methods in intrinsic
728
+ efficient DR estimation like Rotnitzky et al. (2012) and Gronsbell et al. (2022) rely on non-
729
+ convex training to construct the OR model when it is not linear. This OAD strategy could
730
+ mitigate this practical problem and still achieves the purpose of variance reduction in the
731
+ presence of misspecified OR models.
732
+ For ease of demonstration, we focus on covariate shift correction, or equivalently ATT
733
+ estimation in this paper. Our proposed PAD estimation can be potentially generalized to
734
+ address other causal or missing data problems like ATE estimation (Bang and Robins, 2005,
735
+ e.g.), casual model estimation Rotnitzky et al. (2012), transfer learning of a regression model
736
+ Liu et al. (2020), etc. Also, properly specifying the bases Φ is crucial for variance reduction
737
+ in our method. The optimal choice of Φ for the most effective variance reduction is still
738
+ 14
739
+
740
+ an open problem. Related to this, it may be useful and interesting to extend our current
741
+ framework for high-dimensional sparse or sieve construction of the augmentation term Ψ
742
+ Tβ.
743
+ References
744
+ Abadie, A. (2003). Semiparametric instrumental variable estimation of treatment response
745
+ models. Journal of econometrics, 113(2):231–263.
746
+ Azriel, D., Brown, L. D., Sklar, M., Berk, R., Buja, A., and Zhao, L. (2021). Semi-supervised
747
+ linear regression. Journal of the American Statistical Association, pages 1–14.
748
+ Bang, H. and Robins, J. M. (2005). Doubly robust estimation in missing data and causal
749
+ inference models. Biometrics, 61(4):962–973.
750
+ Cao, W., Tsiatis, A. A., and Davidian, M. (2009). Improving efficiency and robustness of
751
+ the doubly robust estimator for a population mean with incomplete data. Biometrika,
752
+ 96(3):723–734.
753
+ Chakrabortty, A., Cai, T., et al. (2018). Efficient and adaptive linear regression in semi-
754
+ supervised settings. The Annals of Statistics, 46(4):1541–1572.
755
+ Chen, Y.-H. and Chen, H. (2000). A unified approach to regression analysis under double-
756
+ sampling designs. Journal of the Royal Statistical Society: Series B (Statistical Methodol-
757
+ ogy), 62(3):449–460.
758
+ Cheng, D., Chakrabortty, A., Ananthakrishnan, A. N., and Cai, T. (2020).
759
+ Estimating
760
+ average treatment effects with a double-index propensity score. Biometrics, 76(3):767–
761
+ 777.
762
+ Chernozhukov, V., Chetverikov, D., Demirer, M., Duflo, E., Hansen, C., Newey, W., and
763
+ Robins, J. (2018). Double/debiased machine learning for treatment and structural param-
764
+ eters.
765
+ Dukes, O. and Vansteelandt, S. (2020). Inference on treatment effect parameters in poten-
766
+ tially misspecified high-dimensional models. Biometrika.
767
+ Gronsbell, J., Liu, M., Tian, L., and Cai, T. (2022). Efficient evaluation of prediction rules in
768
+ semi-supervised settings under stratified sampling. Journal of the Royal Statistical Society.
769
+ Series B, Statistical Methodology, 84(4):1353–1391.
770
+ 15
771
+
772
+ Hahn, J. (1998). On the role of the propensity score in efficient semiparametric estimation
773
+ of average treatment effects. Econometrica, pages 315–331.
774
+ Hahn, J. (2004). Functional restriction and efficiency in causal inference. The Review of
775
+ Economics and Statistics, 86(1):73–76.
776
+ Han, P. (2016).
777
+ Intrinsic efficiency and multiple robustness in longitudinal studies with
778
+ drop-out. Biometrika, 103(3):683–700.
779
+ Huang, J., Gretton, A., Borgwardt, K., Sch¨olkopf, B., and Smola, A. J. (2007). Correcting
780
+ sample selection bias by unlabeled data. In Advances in neural information processing
781
+ systems, pages 601–608.
782
+ Imai, K. and Ratkovic, M. (2014). Covariate balancing propensity score. Journal of the
783
+ Royal Statistical Society: Series B (Statistical Methodology), 76(1):243–263.
784
+ Kang, J. D. and Schafer, J. L. (2007). Demystifying double robustness: A comparison of
785
+ alternative strategies for estimating a population mean from incomplete data. Statistical
786
+ science, 22(4):523–539.
787
+ Kawakita, M. and Kanamori, T. (2013). Semi-supervised learning with density-ratio esti-
788
+ mation. Machine learning, 91(2):189–209.
789
+ Liu, M., Zhang, Y., Liao, K. P., and Cai, T. (2020). Augmented transfer regression learning
790
+ with semi-non-parametric nuisance models. arXiv.
791
+ Neyman, J. (1959). Optimal asymptotic tests of composite hypotheses.
792
+ Probability and
793
+ statsitics, pages 213–234.
794
+ Pan, Y. and Zhao, Y.-Q. (2021). Improved doubly robust estimation in learning optimal indi-
795
+ vidualized treatment rules. Journal of the American Statistical Association, 116(533):283–
796
+ 294.
797
+ Robins, J. M., Rotnitzky, A., and Zhao, L. P. (1994). Estimation of regression coefficients
798
+ when some regressors are not always observed. Journal of the American statistical Asso-
799
+ ciation, 89(427):846–866.
800
+ Rotnitzky, A., Lei, Q., Sued, M., and Robins, J. M. (2012). Improved double-robust estima-
801
+ tion in missing data and causal inference models. Biometrika, 99(2):439–456.
802
+ Shu, H. and Tan, Z. (2018). Improved estimation of average treatment effects on the treated:
803
+ Local efficiency, double robustness, and beyond. arXiv preprint arXiv:1808.01408.
804
+ 16
805
+
806
+ Signorovitch, J. E., Wu, E. Q., Yu, A. P., Gerrits, C. M., Kantor, E., Bao, Y., Gupta,
807
+ S. R., and Mulani, P. M. (2010). Comparative effectiveness without head-to-head trials.
808
+ Pharmacoeconomics, 28(10):935–945.
809
+ Tan, Z. (2010). Bounded, efficient and doubly robust estimation with inverse weighting.
810
+ Biometrika, 97(3):661–682.
811
+ Tan, Z. (2020). Model-assisted inference for treatment effects using regularized calibrated
812
+ estimation with high-dimensional data. The Annals of Statistics, 48(2):811–837.
813
+ Tsiatis, A. A. (2006). Semiparametric theory and missing data. Springer.
814
+ Van der Vaart, A. W. (2000). Asymptotic statistics, volume 3. Cambridge university press.
815
+ Vermeulen, K. and Vansteelandt, S. (2015). Bias-reduced doubly robust estimation. Journal
816
+ of the American Statistical Association, 110(511):1024–1036.
817
+ Yang, S. and Ding, P. (2019). Combining multiple observational data sources to estimate
818
+ causal effects. Journal of the American Statistical Association.
819
+ Zhao, Q. and Percival, D. (2017). Entropy balancing is doubly robust. Journal of Causal
820
+ Inference, 5(1).
821
+ 17
822
+
823
+ A
824
+ Dual construction to augment OR
825
+ In analogy to our PAD estimator, to improve the efficiency our the DR estimator under the
826
+ correct PS and wrong OR models, we propose the Outcome regression Augmented Doubly
827
+ robust (OAD) estimator in the following algorithm.
828
+ Algorithm A1 Outcome regression Augmented Doubly robust (OAD) estimation
829
+ [Step 1] Solve the estimating equations in (1) to obtain �γ and �α, and obtain the conditional
830
+ variance estimator as �θ.
831
+ [Step 2] Let Φ = φ(X) with function φ(·), �g(X
832
+ T�α) = g(X
833
+ T�α) − �ET g(X
834
+ T�α) and
835
+ �Ψ = Φ −
836
+ �ET Φ�g(X
837
+ T�α)
838
+ �ET �g2(X
839
+ T�α)
840
+ �g(X
841
+ T�α).
842
+ [Step 3] Solve the restricted weighted least square (RWLS) problem:
843
+ �β = argminβ �Vµ,OAD(β),
844
+ s.t.
845
+ �ESX �Ψ
846
+ Tβ exp(X
847
+ T�γ) = 0,
848
+ (A1)
849
+ where
850
+ �Vµ,OAD(β) =n−1�
851
+ VarS[{Y − g(X
852
+ T�α) − �Ψ
853
+ T�β} exp(X
854
+ T�γ)] + N−1�
855
+ VarT {g(X
856
+ T�α) + �Ψ
857
+ T�β}
858
+ + 2�
859
+ L∗
860
+ T[N−1 �
861
+ CovT (X, �Ψ
862
+ T�β) + n−1 �
863
+ CovS{X exp(X
864
+ T�γ), �Ψ
865
+ T�β exp(X
866
+ T�γ)}],
867
+ (A2)
868
+ and �
869
+ L∗ = {�ESX exp(X
870
+ T�γ)X
871
+ T}−1�ES{Y − g(X
872
+ T�α)} exp(X
873
+ T�γ)X.
874
+ [Step 4] Obtain the OAD estimator:
875
+ �µOAD = �ES{Y − g(X
876
+ T�α) − �Ψ
877
+ T�β} exp(X
878
+ T�γ) + �ET {g(X
879
+ T�α) + �Ψ
880
+ T�β}.
881
+ To demonstrate how Algorithm A1 works, we define that
882
+ �µOAD =�ES{Y − g(X
883
+ T ¯α) − Ψ
884
+ T¯β} exp(X
885
+ T¯γ) + �ET {g(X
886
+ T ¯α) + Ψ
887
+ T¯β}
888
+ + ES{Y − g(X
889
+ T ¯α)} exp(X
890
+ T¯γ)X
891
+ T{ESX exp(X
892
+ T¯γ)X
893
+ T}−1{�ET X − �ESX exp(X
894
+ T¯γ)}.
895
+ Then similar to our analysis in Section 2.2, when the PS model is correct, �µOAD is asymptot-
896
+ ically equivalent to �µOAD, with its variance being:
897
+ Vµ,OAD(β) =n−1 VarS[{Y − g(X
898
+ T ¯α) − Ψ
899
+ T¯β} exp(X
900
+ T¯γ)] + N−1 VarT {g(X
901
+ T ¯α) + Ψ
902
+ T¯β}
903
+ + 2L∗T[N−1CovT (X, Ψ
904
+ T¯β) + n−1CovS{X exp(X
905
+ T¯γ), Ψ
906
+ T¯β exp(X
907
+ T¯γ)}],
908
+ 1
909
+
910
+ the limiting function of �Vµ,OAD(β) specified in Algorithm A1, where
911
+ L∗ = {ESX exp(X
912
+ T¯γ)X
913
+ T}−1ES{Y − g(X
914
+ T ¯α)} exp(X
915
+ T¯γ)X.
916
+ This corresponds to the objective function in equation (A2). Similar to the PAD construc-
917
+ tion, when β = 0, Vµ,OAD(β) reduces to the asymptotic variance of the standard DR estimator.
918
+ Thus, �µOAD has a smaller variance than the standard DR estimator when the PS model is
919
+ correct and the OR model is wrong, under which we typically have ¯β ̸= 0.
920
+ On the other hand, when OR is correctly specified, we have ¯α = α0, L∗ = 0, and thus
921
+ ∂Vµ,OAD(β)
922
+ ∂β
923
+ |β=0 = CovT (g(X
924
+ T ¯α), Ψ).
925
+ By definition of Ψ, we have CovT (g(X
926
+ T ¯α), Ψ) = 0. Hence, similar to the analysis in Section
927
+ 2.2, �µOAD preserved the same DR property as �µDR, i.e., being root-n consistent whenever the
928
+ PS or the OR model is correctly specified.
929
+ B
930
+ Asymptotic justification
931
+ B.1
932
+ Technical lemma
933
+ Lemma B1. Define a := ESΨ ∂g(XTα)
934
+ ∂αT
935
+ |¯α, b := ESΨ exp(X
936
+ T¯γ)v¯θ(X) + ESΨX
937
+ Tv¯θ(X)LT,
938
+ and Σ := ESΨΨ
939
+ Tv¯θ(X), under Condition 2-5, the solution of the RWLS problem (5) is
940
+ ¯β = Σ−1a(a
941
+ TΣ−1a)−1a
942
+ TΣ−1b − Σ−1b.
943
+ Proof. First we introduce Lagrange multiplier λ and write (5) as the Lagrange form:
944
+ ¯β = argminβ ES{exp(X
945
+ T¯γ)+Ψ
946
+ Tβ}2v¯θ(X)+2LESX{exp(X
947
+ T¯γ)+Ψ
948
+ Tβ}v¯θ(X)−λ
949
+ TES
950
+ ∂g(X
951
+ Tα)
952
+ ∂α
953
+ |¯αΨ
954
+ Tβ.
955
+ Then we have the partial derivative of λ and β:
956
+ ES
957
+ ∂g(X
958
+ Tα)
959
+ ∂α
960
+ |¯αΨ
961
+ Tβ = 0,
962
+ (B1)
963
+ and
964
+ 2ESΨ{exp(X
965
+ T¯γ) + Ψ
966
+ Tβ}v¯θ(X) + 2ESΨX
967
+ Tv¯θ(X)L
968
+ T − ESΨ∂g(X
969
+ Tα)
970
+ ∂αT
971
+ |¯αλ = 0.
972
+ (B2)
973
+ From (B2) we have
974
+ β = {2ESΨΨ
975
+ Tv¯θ(X)}−1{ESΨ∂g(X
976
+ Tα)
977
+ ∂αT
978
+ |¯αλ−2ESΨ exp(X
979
+ T¯γ)v¯θ(X)−2ESΨX
980
+ Tv¯θ(X)L
981
+ T},
982
+ 2
983
+
984
+ together with (B1), we have
985
+ ES
986
+ ∂g(X
987
+ Tα)
988
+ ∂α
989
+ |¯αΨ
990
+ T{ESΨΨ
991
+ Tv¯θ(X)}−1
992
+ ∗ {ESΨ∂g(X
993
+ Tα)
994
+ ∂αT
995
+ |¯αλ − 2ESΨ exp(X
996
+ T¯γ)v¯θ(X) − 2ESΨX
997
+ Tv¯θ(X)L
998
+ T} = 0.
999
+ this function can be simplified as
1000
+ a
1001
+ TΣ−1(aλ − 2b) = 0,
1002
+ and we further have
1003
+ λ = 2(a
1004
+ TΣ−1a)−1a
1005
+ TΣ−1b.
1006
+ Hence, we have
1007
+ ¯β = Σ−1a(a
1008
+ TΣ−1a)−1a
1009
+ TΣ−1b − Σ−1b,
1010
+ and we can estimate it by
1011
+ �β = �Σ
1012
+ −1�a(�a
1013
+ T �Σ
1014
+ −1�a)−1�a
1015
+ T �Σ
1016
+ −1�b − �Σ
1017
+ −1�b
1018
+ for �a := �ES �Ψ∂g(XTα)
1019
+ ∂αT
1020
+ |�α, �b := �ES �Ψ exp(X
1021
+ T�γ)v�θ(X)+�ES �ΨX
1022
+ Tv�θ(X)�LT, and �Σ := �ES �Ψ �Ψ
1023
+ Tv�θ(X)
1024
+ Lemma B2. Under Condition 3, 4 and 6, we have that �Ψ − Ψ = Op(n−1/2).
1025
+ Proof. By definition, we would have that
1026
+ �Ψ − Ψ =
1027
+ �ET {Φv�θ(X)}
1028
+ �ET v�θ(X)
1029
+ − ET {Φv¯θ(X)}
1030
+ ET v¯θ(X)
1031
+ .
1032
+ Under Condition 4 and 6, we have that
1033
+ �ET v�θ(X) − ET v¯θ(X) = �ET v¯θ(X) + �ET
1034
+ ∂vθ(X)
1035
+ ∂θ
1036
+ |�θ(�θ − ¯θ) − ET v¯θ(X) = Op(n−1/2)
1037
+ (B3)
1038
+ for �θ between �θ and ¯θ.
1039
+ By using the same techniques, we have that �ET {Φv�θ(X)} −
1040
+ ET {Φv¯θ(X)} = Op(n−1/2). And we have
1041
+ �Ψ − Ψ =
1042
+ �ET {Φv�θ(X)}ET v¯θ(X) − ET {Φv¯θ(X)}�ET v�θ(X)
1043
+ �ET v�θ(X)ET v¯θ(X)
1044
+ =
1045
+ �ET {Φv�θ(X)}ET v¯θ(X) − ET {Φv¯θ(X)}ET v¯θ(X) − [ET {Φv¯θ(X)}�ET v�θ(X) − ET {Φv¯θ(X)}ET v¯θ(X)]
1046
+ {ET v¯θ(X) + Op(n−1/2)}ET v¯θ(X)
1047
+ = Op(n−1/2)ET v¯θ(X) − ET {Φv¯θ(X)}Op(n−1/2)
1048
+ {ET v¯θ(X) + Op(n−1/2)}ET v¯θ(X)
1049
+ = Op(n−1/2).
1050
+ 3
1051
+
1052
+ Lemma B3. Under Condition 1, 2 and 4, 5, we have that �γ − ¯γ = Op(n−1/2) and �α − ¯α =
1053
+ Op(n−1/2).
1054
+ Proof. The estimation of γ has been given as
1055
+ �ESX exp(X
1056
+ T�γ) = �ET X,
1057
+ by applying Taylor series expansion, we have
1058
+ n−1
1059
+ n
1060
+
1061
+ i=1
1062
+ Xi exp(X
1063
+ T
1064
+ i ¯γ) + n−1
1065
+ n
1066
+
1067
+ i=1
1068
+ Xi exp(X
1069
+ T
1070
+ i �γ)X
1071
+ T
1072
+ i (�γ − ¯γ) = N−1
1073
+ n+N
1074
+
1075
+ i=n+1
1076
+ Xi,
1077
+ where �γ is some vector between �γ and ¯γ. According to (Van der Vaart, 2000), we have
1078
+ �γ − ¯γ = op(1). Let J represent matrix n−1 �n
1079
+ i=1 Xi exp(X
1080
+ T
1081
+ i �γ)X
1082
+ T
1083
+ i , and we have that
1084
+ J = n−1
1085
+ n
1086
+
1087
+ i=1
1088
+ Xi exp(X
1089
+ T
1090
+ i ¯γ)X
1091
+ T
1092
+ i +n−1
1093
+ n
1094
+
1095
+ i=1
1096
+ Xi exp(X
1097
+ T
1098
+ i γ∗)X
1099
+ T
1100
+ i Xi(�γ−¯γ) = ESX exp(X
1101
+ T¯γ)X
1102
+ T+op(1)
1103
+ for γ∗ between �γ and ¯γ. Hence, by central limit theorem and Slutsky theorem, we have
1104
+ that, under Condition 1 and 4
1105
+ �γ − ¯γ = J−1
1106
+
1107
+ N−1
1108
+ n+N
1109
+
1110
+ i=n+1
1111
+ Xi − n−1
1112
+ n
1113
+
1114
+ i=1
1115
+ Xi exp(X
1116
+ T
1117
+ i ¯γ)
1118
+
1119
+ =J−1
1120
+
1121
+ N−1
1122
+ n+N
1123
+
1124
+ i=n+1
1125
+ Xi − ET X + ESX exp(X
1126
+ T¯γ) − n−1
1127
+ n
1128
+
1129
+ i=1
1130
+ Xi exp(X
1131
+ T
1132
+ i ¯γ)
1133
+
1134
+ = Op(n−1/2).
1135
+ Furthermore, The estimation equation of �α is given by
1136
+ �ESS(�α) = �ESX{Y − g(X
1137
+ T�α)} = 0,
1138
+ by using Taylor series expansion, we have that
1139
+ �ESX{Y − g(X
1140
+ T ¯α)} + �ES
1141
+ ∂S(α)
1142
+ ∂αT
1143
+ ����
1144
+ �α
1145
+ (�α − ¯α) = 0
1146
+ for �α between �α and ¯α, and we have
1147
+ �α − ¯α = −�ES
1148
+ �∂S(α)
1149
+ ∂αT
1150
+ ����
1151
+ �α
1152
+ �−1
1153
+ �ESX{Y − g(X
1154
+ T ¯α)}.
1155
+ By using the same techniques as those for obtaining the asymptotic properties of �γ, under
1156
+ Condition 2, 4 and 5, we have �α − ¯α = Op(n−1/2).
1157
+ 4
1158
+
1159
+ Lemma B4. Under Condition 1-6 and Lemma A1-A3, we can obtain that �β−¯β = Op(n−1/2).
1160
+ In addition, when the PS is correctly specified, we further have ¯β = 0 and �β = Op(n−1/2).
1161
+ Proof. By using the same techniques as (B3), under Condition 2-4, we first have that
1162
+ �a−a = �ES �Ψ∂g(X
1163
+ Tα)
1164
+ ∂αT
1165
+ |�α−ESΨ∂g(X
1166
+ Tα)
1167
+ ∂αT
1168
+ |¯α = �ESΨ∂g(X
1169
+ Tα)
1170
+ ∂αT
1171
+ |¯α−ESΨ∂g(X
1172
+ Tα)
1173
+ ∂αT
1174
+ |¯α+Op(n−1/2) = Op(n−1/2).
1175
+ In addition, we can have that �b − b = Op(n−1/2) and �Σ − Σ = Op(n−1/2). Furthermore, we
1176
+ can easily have that
1177
+ �Σ
1178
+ −1 − Σ−1 = Σ−1Σ{Σ + Op(n−1/2)}−1 − Σ−1
1179
+ = Σ−1[Σ{Σ + Op(n−1/2)}−1 − {Σ + Op(n−1/2)}{Σ + Op(n−1/2)}−1] = Op(n−1/2),
1180
+ based on which we can have (�aT �Σ
1181
+ −1�a)−1−(aTΣ−1a)−1 = Op(n−1/2). Let �Ω denote �Σ
1182
+ −1�a(�aT �Σ
1183
+ −1�a)−1�aT �Σ
1184
+ −1
1185
+ and Ω denote Σ−1a(aTΣ−1a)−1aTΣ−1. We can have that �Ω − Ω = Op(n−1/2), hence, we
1186
+ have that �β − ¯β = �Ω�b − Ωb = Op(n−1/2).
1187
+ On the other hand, when the PS is correctly specified, L = 0 and ESΨ exp(X
1188
+ T¯γ)v¯θ(X) =
1189
+ ET Ψv¯θ(X) = 0, which means
1190
+ ¯β = Ωb = Ω{ESΨ exp(X
1191
+ T¯γ)v¯θ(X) + ESΨX
1192
+ Tv¯θ(X)L
1193
+ T} = Ω0 = 0.
1194
+ And at the same time, we have �β = Op(n−1/2).
1195
+ B.2
1196
+ Proof of Theorem 1
1197
+ Proof. Proof of Theorem 1 (i).
1198
+ When the OR is correctly specified, ¯α = α0. Consider �µOR where
1199
+ �µOR = �ES{Y − g(X
1200
+ T ¯α)}{exp(X
1201
+ T¯γ) + Ψ
1202
+ T¯β} + �ET g(X
1203
+ T ¯α)
1204
+ +
1205
+
1206
+ ES
1207
+ ∂g(X
1208
+ Tα)
1209
+ ∂αT
1210
+ ����
1211
+ ¯α
1212
+ exp(X
1213
+ T¯γ) − ET
1214
+ ∂g(X
1215
+ Tα)
1216
+ ∂αT
1217
+ ����
1218
+ ¯α
1219
+
1220
+ ES
1221
+ �∂S(α)
1222
+ ∂αT
1223
+ ����
1224
+ ¯α
1225
+ �−1
1226
+ �ESX{Y − g(X
1227
+ T ¯α)}.
1228
+ It is obvious that E�µOR = ET g(X
1229
+ T ¯α) = µ0. Hence, by using central limit theorem, we have
1230
+ that �µOR − µ0 = Op(n−1/2), n1/2(�µOR − µ0) weakly converges to gaussian distribution with
1231
+ mean 0. On the other hand, we have that
1232
+ �µP AD − �µOR = �ES{Y − g(X
1233
+ Tα0)}{exp(X
1234
+ T¯γ)X
1235
+ T(�γ − ¯γ) + Ψ
1236
+ T(�β − ¯β) + ( �Ψ − Ψ)
1237
+ T¯β}
1238
+
1239
+
1240
+ �ES
1241
+ ∂g(X
1242
+ Tα)
1243
+ ∂αT
1244
+ ����
1245
+ α0
1246
+ {exp(X
1247
+ T¯γ) + Ψ
1248
+ T¯β} − �ET
1249
+ ∂g(X
1250
+ Tα)
1251
+ ∂αT
1252
+ ����
1253
+ α0
1254
+
1255
+ (�α − ¯α) + op(n−1/2)
1256
+ 5
1257
+
1258
+
1259
+
1260
+ ES
1261
+ ∂g(X
1262
+ Tα)
1263
+ ∂αT
1264
+ ����
1265
+ α0
1266
+ exp(X
1267
+ T¯γ) − ET
1268
+ ∂g(X
1269
+ Tα)
1270
+ ∂αT
1271
+ ����
1272
+ α0
1273
+
1274
+ ES
1275
+ �∂S(α)
1276
+ ∂αT
1277
+ ����
1278
+ α0
1279
+ �−1
1280
+ �ESX{Y − g(X
1281
+ Tα0)},
1282
+ by using central limit theorem, along with Lemma A2-A4, we have that
1283
+ �ES{Y − g(X
1284
+ Tα0)}{exp(X
1285
+ T¯γ)X
1286
+ T(�γ − ¯γ) + Ψ
1287
+ T(�β − ¯β) + ( �Ψ − Ψ)
1288
+ T¯β}
1289
+ = [�ES{Y − g(X
1290
+ Tα0)} exp(X
1291
+ T¯γ)X
1292
+ T](�γ − ¯γ) + [�ES{Y − g(X
1293
+ Tα0)}Ψ
1294
+ T](�β − ¯β)
1295
+ + [�ES{Y − g(X
1296
+ Tα0)}¯β
1297
+ T]( �Ψ − Ψ) = Op(n−1/2)op(1) + Op(n−1/2)op(1) + Op(n−1/2)op(1) = op(n−1/2).
1298
+ On the other hand,
1299
+
1300
+
1301
+ �ES
1302
+ ∂g(X
1303
+ Tα)
1304
+ ∂αT
1305
+ ����
1306
+ α0
1307
+ {exp(X
1308
+ T¯γ) + Ψ
1309
+ T¯β} − �ET
1310
+ ∂g(X
1311
+ Tα)
1312
+ ∂αT
1313
+ ����
1314
+ α0
1315
+
1316
+ (�α − ¯α)
1317
+ =
1318
+
1319
+ �ES
1320
+ ∂g(X
1321
+ Tα)
1322
+ ∂αT
1323
+ ����
1324
+ α0
1325
+ {exp(X
1326
+ T¯γ) + Ψ
1327
+ T¯β} − �ET
1328
+ ∂g(X
1329
+ Tα)
1330
+ ∂αT
1331
+ ����
1332
+ α0
1333
+
1334
+ �ES
1335
+ �∂S(α)
1336
+ ∂αT
1337
+ ����
1338
+ ¯α
1339
+ �−1
1340
+ �ESX{Y − g(X
1341
+ T ¯α)}
1342
+ =
1343
+
1344
+ ES
1345
+ ∂g(X
1346
+ Tα)
1347
+ ∂αT
1348
+ ����
1349
+ α0
1350
+ exp(X
1351
+ T¯γ) − ET
1352
+ ∂g(X
1353
+ Tα)
1354
+ ∂αT
1355
+ ����
1356
+ α0
1357
+ + Op(n−1/2)
1358
+
1359
+
1360
+
1361
+ ES
1362
+ �∂S(α)
1363
+ ∂αT
1364
+ ����
1365
+ α0
1366
+ �−1
1367
+ + Op(n−1/2)
1368
+
1369
+ �ESX{Y − g(X
1370
+ Tα0)}.
1371
+ (B4)
1372
+ Hence, we have that
1373
+
1374
+
1375
+ �ES
1376
+ ∂g(X
1377
+ Tα)
1378
+ ∂αT
1379
+ ����
1380
+ α0
1381
+ {exp(X
1382
+ T¯γ) + Ψ
1383
+ T¯β} − �ET
1384
+ ∂g(X
1385
+ Tα)
1386
+ ∂αT
1387
+ ����
1388
+ α0
1389
+
1390
+ (�α − ¯α)
1391
+
1392
+
1393
+ ES
1394
+ ∂g(X
1395
+ Tα)
1396
+ ∂αT
1397
+ ����
1398
+ α0
1399
+ exp(X
1400
+ T¯γ) − ET
1401
+ ∂g(X
1402
+ Tα)
1403
+ ∂αT
1404
+ ����
1405
+ α0
1406
+
1407
+ ES
1408
+ �∂S(α)
1409
+ ∂αT
1410
+ ����
1411
+ α0
1412
+ �−1
1413
+ �ESX{Y − g(X
1414
+ Tα0)}
1415
+ = �ESX{Y − g(X
1416
+ Tα0)}Op(n−1/2) = op(n−1/2).
1417
+ Thus, from previous results, we have that �µP AD − �µOR = op(n−1/2). Together with Slutsky
1418
+ theorem, we futher have that �µP AD − µ0 = Op(n−1/2) and n1/2(�µP AD − µ0) weakly converges
1419
+ to gaussian distribution with mean 0.
1420
+ When the PS is correctly specified, ¯γ = γ0, we consider �µPS where
1421
+ �µPS = �ES{Y − g(X
1422
+ T ¯α)}{exp(X
1423
+ Tγ0) + Ψ
1424
+ T¯β} + �ET g(X
1425
+ T ¯α)
1426
+ + ES{Y − g(X
1427
+ T ¯α)} exp(X
1428
+ T¯γ)X
1429
+ T{ESX exp(X
1430
+ Tγ0)X
1431
+ T}−1{�ET X − �ESX exp(X
1432
+ Tγ0)}.
1433
+ Together with the results from Lemma A4, we have that E�µPS = ESY exp(X
1434
+ T¯γ) = ET Y =
1435
+ µ0. By using the central limit theorem, we have that �µPS − µ0 = Op(n−1/2), n1/2(�µPS − µ0)
1436
+ weakly converges to gaussian distribution with mean 0. On the other hand, we have that
1437
+ �µP AD − �µPS = �ES{Y − g(X
1438
+ T ¯α)}{exp(X
1439
+ T¯γ)X
1440
+ T(�γ − ¯γ) + Ψ
1441
+ T(�β − ¯β)}
1442
+ 6
1443
+
1444
+ − ES{Y − g(X
1445
+ T ¯α)} exp(X
1446
+ T¯γ)X
1447
+ T{ESX exp(X
1448
+ Tγ0)X
1449
+ T}−1{�ET X − �ESX exp(X
1450
+ Tγ0)} + op(n−1/2)
1451
+ By using the techniques from (B4), we would have
1452
+ �ES{Y − g(X
1453
+ T ¯α)} exp(X
1454
+ T¯γ)X
1455
+ T(�γ − ¯γ)
1456
+ − ES{Y − g(X
1457
+ T ¯α)} exp(X
1458
+ T¯γ)X
1459
+ T{ESX exp(X
1460
+ Tγ0)X
1461
+ T}−1{�ET X − �ESX exp(X
1462
+ Tγ0)} = op(n−1/2)
1463
+ And from Lemma A4, we have �β = Op(n−1/2). Thus, we have �µP AD − µ0 = Op(n−1/2). On
1464
+ the other hand, it is worth noticing that �β is the continuous function of �θ, �γ and �α, so
1465
+ under central limit theorem and Slutsky theorem, we would have the asymptotic normality
1466
+ of �β. Hence, we further have that n1/2(�µPS − µ0) weakly converges to gaussian distribution
1467
+ with mean 0.
1468
+ Proof. Proof of Theorem 1 (ii).
1469
+ First we denote U as
1470
+ U = VarT (E(Y |X)) + LESXX
1471
+ T Var(Y |X)L
1472
+ T
1473
+ When the OR is correctly specified, the asymptotic variance of �µP AD, Var{n−1/2(�µP AD−µ0)}
1474
+ is
1475
+ ES{exp(X
1476
+ T¯γ) + Ψ
1477
+ T¯β}2v¯θ(X) + 2LESX{exp(X
1478
+ T¯γ) + Ψ
1479
+ T¯β}v¯θ(X) + U,
1480
+ and ¯β contributes to minimizing this variance. When ¯β = 0, the function above is written
1481
+ as
1482
+ ES{exp(X
1483
+ T¯γ)}2v¯θ(X) + 2LESX{exp(X
1484
+ T¯γ)}v¯θ(X) + U,
1485
+ which is the same as the asymptotic variance of �µDR, Var{n−1/2(�µDR − µ0)}. Hence, when
1486
+ ¯β ̸= 0, �µP AD has the smaller asymptotic variance than standard doubly robust estimator
1487
+ �µDR.
1488
+ Proof. Proof of Theorem 1 (iii).
1489
+ When both the PS and OR is correctly specified, consider �µB, where
1490
+ �µB = �ES{Y − g(X
1491
+ Tα0)} exp(X
1492
+ Tγ0) + �ET g(X
1493
+ Tα0).
1494
+ By using central limit theorem, n1/2(�µB −µ0) weakly converges to gaussian distribution with
1495
+ mean 0. On the other hand, by using Taylor series expansion, we would have �µP AD − �µB =
1496
+ op(n−1/2) and �µDR − �µB = op(n−1/2). Hence, they have the same asymptotic variance.
1497
+ 7
1498
+
1499
+ This figure "DMLmse.png" is available in "png"� format from:
1500
+ http://arxiv.org/ps/2301.02162v1
1501
+
1502
+ This figure "biasdml.png" is available in "png"� format from:
1503
+ http://arxiv.org/ps/2301.02162v1
1504
+
79A0T4oBgHgl3EQfOf-b/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
79AyT4oBgHgl3EQf2_lP/content/2301.00760v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10850732eb3ed5e48e2792640b56945c82eec9a641d8ead8d7152e4c89f66e54
3
+ size 302783
79AyT4oBgHgl3EQf2_lP/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c63e3ebdb53469a95f829987addf4b1d290a77cb7211e2a3f31922c9cc9faa0f
3
+ size 267209
7dE5T4oBgHgl3EQfQA7X/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83f929c7103e400296c28f434eee7d418e579b6d8a7099c5130b4e5f6ff78736
3
+ size 182983
7tE1T4oBgHgl3EQfBwI8/content/tmp_files/2301.02855v1.pdf.txt ADDED
@@ -0,0 +1,2274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1
2
+ An Enhanced Gradient-Tracking Bound for Distributed
3
+ Online Stochastic Convex Optimization
4
+ Sulaiman A. Alghunaim and Kun Yuan
5
+ Abstract—Gradient-tracking
6
+ (GT)
7
+ based
8
+ decentralized
9
+ methods have emerged as an effective and viable alterna-
10
+ tive method to decentralized (stochastic) gradient descent
11
+ (DSGD) when solving distributed online stochastic opti-
12
+ mization problems. Initial studies of GT methods implied
13
+ that GT methods have worse network dependent rate than
14
+ DSGD, contradicting experimental results. This dilemma
15
+ has recently been resolved, and tighter rates for GT methods
16
+ have been established, which improves upon DSGD.
17
+ In this work, we establish more enhanced rates for GT
18
+ methods under the online stochastic convex settings. We
19
+ present an alternative approach for analyzing GT methods
20
+ for convex problems and over static graphs. When compared
21
+ to previous analyses, this approach allows us to establish
22
+ enhanced network dependent rates.
23
+ Index Terms—Distributed stochastic optimization, decen-
24
+ tralized learning, gradient-tracking, adapt-then-combine.
25
+ I. Introduction
26
+ We consider the multi-agent consensus optimization prob-
27
+ lem, in which n agents work together to solve the following
28
+ stochastic optimization problem:
29
+ minimize
30
+ x∈Rd
31
+ f(x) = 1
32
+ n
33
+ n
34
+
35
+ i=1
36
+ fi(x)
37
+ fi(x) ≜ E[Fi(x; ξi)].
38
+ (1)
39
+ Here, fi : Rd → R is the private cost function held by agent
40
+ i, which is defined as the expected value of some loss function
41
+ Fi(·, ξi) over local random variable ξi (e.g., data points). An
42
+ algorithm that solves (1) is said to be a decentralized method
43
+ if its implementation requires the agents to communicate only
44
+ with agents who are directly connected to them (i.e., neighbors)
45
+ based on the given network topology/graph.
46
+ One of the most popular decentralized methods to solve prob-
47
+ lem (1) is decentralized stochastic gradient descent (DSGD)
48
+ [1]–[3]. While DSGD is communication efficient and simple to
49
+ implement, it converges slowly when the local functions/data
50
+ are heterogeneous across nodes. Furthermore, because data
51
+ heterogeneity can be amplified by large and sparse network
52
+ topologies [4], DSGD performance is significantly degraded
53
+ with these topologies.
54
+ In this work, we analyze the performance of the gradient-
55
+ tracking method [5], [6], which is another well-known decentral-
56
+ ized method that solves problem (1). To describe the algorithm,
57
+ we let wij ≥ 0 denote the weight used by agent i to scale
58
+ information received from agent j with wij = 0 if j /∈ Ni where
59
+ Ni is the neighborhood of agent i. The adapt-then-combine
60
+ gradient-tracking (ATC-GT) method [5] is described as follows:
61
+ xk+1
62
+ i
63
+ =
64
+
65
+ j∈Ni
66
+ wij(xk
67
+ j − αgk
68
+ j )
69
+ (2a)
70
+ S. A. Alghunaim ([email protected]) is with the
71
+ Department of Electrical Engineering, Kuwait University, Kuwait. K.
72
+ Yuan ([email protected]) is with the Center for Machine Learning
73
+ Research, Peking University, China.
74
+ gk+1
75
+ i
76
+ =
77
+
78
+ j∈Ni
79
+ wij
80
+
81
+ gk
82
+ j + ∇Fj(xk+1
83
+ j
84
+ ; ξk+1
85
+ j
86
+ ) − ∇Fj(xk
87
+ j ; ξk
88
+ j )�
89
+ (2b)
90
+ with initialization g0
91
+ i = ∇Fi(x0
92
+ i ; ξ0
93
+ i ) and arbitrary x0
94
+ i ∈ Rd.
95
+ Here, ∇Fi(xk
96
+ i ; ξk
97
+ i ) is the stochastic gradient and ξk
98
+ i is the data
99
+ sampled by agent i at iteration k.
100
+ Gradient-tracking can eliminate the impact of heterogeneity
101
+ between local functions [5]–[8]. In massive numerical experi-
102
+ ments reported in [9]–[12], GT can significantly outperform
103
+ DSGD in the online stochastic setting. Initial studies on the
104
+ convergence rate of GT methods are inadequate; they provide
105
+ loose convergence rates that are more sensitive to network
106
+ topology than vanilla DSGD. According to these findings, GT
107
+ will converge slower than DSGD on large and sparse networks,
108
+ which is counter-intuitive and contradicts numerical results
109
+ published in the literature. Recent works [13], [14] establish
110
+ the first convergence rates for GT that are faster than DSGD
111
+ and more robust to sparse topologies under stochastic and non-
112
+ convex settings. In this paper, we will provide additional en-
113
+ hancements for GT under convex and strongly convex settings.
114
+ A. Related works
115
+ Gradient-tracking (GT) methods, which utilize dynamic
116
+ tracking mechanisms [15] to approximate the globally averaged
117
+ gradient, have emerged as an alternative to decentralized gradi-
118
+ ent descent (DGD) [1]–[3], [16], [17] with exact convergence for
119
+ deterministic problems [5]–[8]. Since their inception, numerous
120
+ works have investigated GT methods in a variety of contexts [9],
121
+ [10], [18]–[28]. However, all of these works provide convergence
122
+ rates that can be worse than vanilla DSGD. In particular, these
123
+ results indicate that GT is less robust to sparse topologies even
124
+ if it can remove the influence of data heterogeneity. The work
125
+ [14] established refined bounds for various methods including
126
+ GT methods that improve upon DSGD under nonconvex set-
127
+ tings. Improved network dependent bounds for GT methods in
128
+ both convex and non-convex settings are also provided in [13].
129
+ In this work, we provide additional improvements over previous
130
+ works in convex and strongly convex settings – see Table I.
131
+ It should be noted that there are other methods that are
132
+ different from GT methods but have been shown to have com-
133
+ parable or superior performance – see [14], [29] and references
134
+ therein. In contrast to these other methods, GT methods have
135
+ been shown to converge in a variety of scenarios, such as
136
+ directed graphs and time-varying graphs [18], [19], [22]. We
137
+ should also mention that there are modifications to GT ap-
138
+ proaches that can improve the rate at the price of knowing addi-
139
+ tional network information and/or more computation/memory
140
+ [21]. However, the focus of this study is on basic vanilla GT
141
+ methods.
142
+ B. Contributions
143
+ • We present an alternative approach for analyzing GT
144
+ methods in convex and static graph settings, which may
145
+ arXiv:2301.02855v1 [math.OC] 7 Jan 2023
146
+
147
+ 2
148
+ TABLE I: Convergence rate to reach ϵ accuracy. The strongly convex (SC) and PL condition rates ignores iteration logarithmic factors.
149
+ The quantity λ = ρ(W − 1
150
+ n11T) ∈ (0, 1) is the mixing rate of the network where W is the network combination matrix. a0 = ∥¯x0 − x⋆∥2,
151
+ ς2
152
+ ⋆ = 1
153
+ n
154
+ �n
155
+ i=1 ∥∇fi(x⋆)∥2, ς2
156
+ 0 = 1
157
+ n
158
+ �n
159
+ i=1 ∥∇fi(x0) − ∇f(x0)∥2, x0 is the initialization for all nodes, and x⋆ is an optimal solution of (1).
160
+ Reference
161
+ Iterations to ϵ accuracy
162
+ Remark
163
+ Convex
164
+ [13]
165
+ 1
166
+ nϵ2 +
167
+ log(
168
+ 1
169
+ 1−λ )1/2
170
+ (1−λ)1/2
171
+ 1
172
+ ϵ3/2 +
173
+ log(
174
+ 1
175
+ 1−λ )(a0+ς2
176
+ 0 )
177
+ 1−λ
178
+ 1
179
+ ϵ
180
+ Rate holds only when iteration number K >
181
+ log(
182
+ 1
183
+ 1−λ )
184
+ 1−λ
185
+ Convex
186
+ Our work
187
+ 1
188
+ nϵ2 +
189
+ 1
190
+ (1−λ)1/2
191
+ 1
192
+ ϵ3/2 + (a0+ς2
193
+ ⋆)
194
+ (1−λ)
195
+ 1
196
+ ϵ
197
+
198
+ SC
199
+ [9]
200
+ 1
201
+ nϵ +
202
+ 1
203
+ (1−λ)3/2
204
+ 1
205
+ √ϵ + C
206
+ √ϵ
207
+ C depends on 1/(1 − λ)
208
+ PL∗
209
+ [10]
210
+ 1
211
+ nϵ +
212
+ 1
213
+ (1−λ)3/2
214
+ 1
215
+ √ϵ + ˜C log 1
216
+ ϵ
217
+ ˜C depends on 1/(1 − λ)
218
+ SC
219
+ [13]
220
+ 1
221
+ nϵ +
222
+ log(
223
+ 1
224
+ 1−λ )1/2
225
+ (1−λ)1/2
226
+ 1
227
+ √ϵ +
228
+ log(
229
+ 1
230
+ 1−λ )
231
+ (1−λ)
232
+ log
233
+
234
+ (a0+ς2
235
+ 0 )
236
+ (1−λ)ϵ
237
+
238
+ Rate holds only when iteration number K >
239
+ log(
240
+ 1
241
+ 1−λ )
242
+ 1−λ
243
+ PL∗
244
+ [14]
245
+ 1
246
+ nϵ +
247
+
248
+ 1
249
+ (1−λ)1/2 +
250
+ 1
251
+ (1−λ)√n
252
+ � 1
253
+ √ϵ +
254
+ 1
255
+ 1−λ log
256
+
257
+ (a0+ς2
258
+ ⋆)
259
+ ϵ
260
+
261
+ Rate holds by tuning stepsize from [14, Theorem 2]
262
+ SC
263
+ Our work
264
+ 1
265
+ nϵ +
266
+ 1
267
+ (1−λ)1/2
268
+ 1
269
+ √ϵ +
270
+ 1
271
+ 1 − λ log
272
+
273
+ (a0+ς2
274
+ ⋆)
275
+ ϵ
276
+
277
+
278
+ ∗ The PL condition is weaker than SC and can hold for nonconvex functions; any SC function satisfies the PL condition.
279
+ be useful for analyzing GT methods in other settings such
280
+ as variance-reduced gradients.
281
+ • In stochastic and convex environments, our convergence
282
+ rate improve and tighten existing GT bounds. We show,
283
+ in particular, that under convex settings, GT methods
284
+ have better dependence on network topologies than in
285
+ nonconvex settings [14]. Also, our bounds removes the
286
+ network dependent log factors in [13] – See Table I.
287
+ II. ATC-GT and Main Assumption
288
+ In this section, we describe the GT algorithm (2) in network
289
+ notation and list all necessary assumptions. We begin by defin-
290
+ ing some network quantities.
291
+ A. GT in network notation
292
+ We define xk
293
+ i ∈ Rd as the estimated value of x ∈ Rd at
294
+ agent i and iteration (time) k, and we introduce the augmented
295
+ network quantities:
296
+ xk ≜ col{xk
297
+ 1, . . . , xk
298
+ n} ∈ Rdn
299
+ f(xk) ≜
300
+ n
301
+
302
+ i=1
303
+ fi(xk
304
+ i )
305
+ ∇f(xk) ≜ col{∇f1(xk
306
+ 1), . . . , ∇fn(xk
307
+ n)}
308
+ ∇F(xk) ≜ col{∇F1(xk
309
+ 1; ξk
310
+ 1), . . . , ∇Fn(xk
311
+ n; ξk
312
+ n)}
313
+ gk ≜ col{gk
314
+ 1, . . . , gk
315
+ n} ∈ Rdn.
316
+ Here, col{·} is an operation to stack all vectors on top of each
317
+ other. In addition, we define
318
+ W ≜ [wij] ∈ Rn×n,
319
+ W ≜ W ⊗ Id,
320
+ (3)
321
+ where W is the network weight (or combination, mixing, gossip)
322
+ matrix with elements wij, and symbol ⊗ denotes the Kronecker
323
+ product operation. Using the above quantities, the ATC-GT
324
+ method (2) can be described as follows:
325
+ xk+1 = W[xk − αgk]
326
+ (4a)
327
+ gk+1 = W[gk + ∇F(xk+1) − ∇F(xk)],
328
+ (4b)
329
+ with initialization g0 = ∇F(x0) and arbitrary x0.
330
+ B. Assumptions
331
+ Here, we list the assumptions used in our analyses. Our first
332
+ assumption is on the network graph stated below.
333
+ Assumption 1 (Weight matrix). The network graph is as-
334
+ sumed to be static and, the weight matrix W to be doubly
335
+ stochastic and primitive. We further assume W to be symmetric
336
+ and positive semidefinite.
337
+
338
+ It is important to note that assuming W
339
+ to be positive
340
+ semidefinite is not restrictive; given any doubly stochastic and
341
+ symmetric ˜
342
+ W, we can easily construct a positive semidefinite
343
+ weight matrix by W = (I + ˜
344
+ W)/2. We also remark that, under
345
+ Assumption 1, the mixing rate of the network is:
346
+ λ ≜
347
+ ��W − 1
348
+ n11T�� =
349
+ max
350
+ i∈{2,...,n} |λi| < 1.
351
+ (5)
352
+ The next assumption is on the objective function.
353
+ Assumption 2 (Objective function). Each function fi :
354
+ Rd → R is L-smooth
355
+ ∥∇fi(y) − ∇fi(z)∥ ≤ L∥y − z∥,
356
+ ∀ y, z ∈ Rd
357
+ (6)
358
+ and (µ-strongly) convex for some L ≥ µ ≥ 0. As a result, the
359
+ aggregate function f(x) =
360
+ 1
361
+ n
362
+ �n
363
+ i=1 fi(x) is also L-smooth and
364
+ (µ-strongly) convex. (When µ = 0, then the objective functions
365
+ are simply convex.)
366
+
367
+ We now state our final assumption related to the gradient
368
+ noise.
369
+ Assumption 3 (Gradient noise). For all {i}n
370
+ i=1 and k =
371
+ 0, 1, . . ., we assume the following inequalities hold
372
+ E �
373
+ ∇Fi(xk
374
+ i ; ξk
375
+ i ) − ∇fi(xk
376
+ i ) | F k�
377
+ = 0,
378
+ (7a)
379
+ E �
380
+ ∥∇Fi(xk
381
+ i ; ξk
382
+ i ) − ∇fi(xk
383
+ i )∥2 | F k�
384
+ ≤ σ2,
385
+ (7b)
386
+ for some σ2 ≥ 0, where F k ≜ {x0, x2, . . . , xk} is the algorithm-
387
+ generated filtration. We further assume that conditioned on F k,
388
+ the random data {ξt
389
+ i} are independent of one another for any
390
+ {i}n
391
+ i=1 and {t}t≤k.
392
+
393
+ III. Error Recursion
394
+ To establish the convergence of (4), we will first derive
395
+ an error recursion that will be key to our enhanced bounds.
396
+
397
+ 3
398
+ Motivated by [14], the following result rewrites algorithm (4)
399
+ in an equivalent manner.
400
+ Lemma 1 (Equivalent GT form). Let x0 take any arbitrary
401
+ value and z0 = 0. Then for static graphs, the update for xk in
402
+ algorithm (4) is equivalent to following updates for k = 1, 2, . . .
403
+ xk+1 = (2W − I)xk − αW2∇F(xk) − Bzk
404
+ (8a)
405
+ zk+1 = zk + Bxk
406
+ (8b)
407
+ with initialization x1 = W(x0 − α∇F(x0)) and z1 = Bx0, and
408
+ B = I − W.
409
+ Proof. Clearly with the above initialization, both x1 are iden-
410
+ tical for the updates (4) and (8). Now, for k ≥ 1, it holds from
411
+ (8a) that
412
+ xk+1 − xk = (2W − I)(xk − xk−1) − B(zk − zk−1)
413
+ − αW2(∇F(xk) − ∇F(xk−1)).
414
+ Substituting zk − zk−1 = Bxk−1 ((8b)) and B = I − W into
415
+ the above equation and rearranging the recursion gives
416
+ xk+1 = 2Wxk − W2xk−1 − αW2(∇F(xk) − ∇F(xk−1)).
417
+ Following the same approach, we can also describe the xk
418
+ update for the GT algorithm (4) as above – see [14], [29]. Hence,
419
+ both methods are equivalent for static graph W.
420
+ Under Assumption 1, the fixed point of recursion (8), denoted
421
+ by (x⋆, z⋆), satisfies:
422
+ 0 = αW2∇f(x⋆) + Bz⋆
423
+ 0 = Bx⋆.
424
+ (9)
425
+ where x⋆ = 1 ⊗ x⋆ and x⋆ is the optimal solution of (1). The
426
+ existence of z⋆ can be shown by using similar arguments as in
427
+ [30, Lemma 3.1] or [29, Lemma 1]. By introducing the notation
428
+ ˜xk ≜ xk − x⋆,
429
+ ˜z ≜ zk − z⋆,
430
+ (10)
431
+ using (8) and the fact (2W − I)x⋆ = x⋆, we can get the error
432
+ recursion:
433
+
434
+ ˜xk+1
435
+ ˜zk+1
436
+
437
+ =
438
+
439
+ 2W − I
440
+ −B
441
+ B
442
+ I
443
+ � �
444
+ ˜xk
445
+ ˜zk
446
+
447
+ − α
448
+
449
+ W2�
450
+ ∇f(xk) − ∇f(x⋆) + vk�
451
+ 0
452
+
453
+ ,
454
+ (11)
455
+ where vk ≜ ∇F(xk) − ∇f(xk).
456
+ Remark 1 (Alternative analysis approach). By describing
457
+ GT (4) in the alternative form (8), we are able to derive the
458
+ error recursion from the fixed point (11). This is similar to the
459
+ way Exact-diffusion/D2 is analyzed in [4], [12]. This alternative
460
+ approach allows us to derive tighter bounds compared with
461
+ existing GT works [9], [10], [13], [14].
462
+
463
+ Convergence analysis of (11) still remains difficult. We will
464
+ exploit the properties of the matrix W to transform recursion
465
+ (11) into a more suitable form for our analysis. To that end,
466
+ the following quantities are introduced:
467
+ ¯xk ≜ 1
468
+ n(1T
469
+ n ⊗ Id)xk = 1
470
+ n
471
+ n
472
+
473
+ i=1
474
+ xk
475
+ i ,
476
+ (12a)
477
+ ¯ek
478
+ x ≜ 1
479
+ n(1T
480
+ n ⊗ Id)˜xk = ¯xk − x⋆,
481
+ (12b)
482
+ ∇f(xk) ≜ 1
483
+ n(1T
484
+ n ⊗ Id)∇f(xk) = 1
485
+ n
486
+ n
487
+
488
+ i=1
489
+ ∇fi(xk
490
+ i ),
491
+ (12c)
492
+ ¯vk ≜ 1
493
+ n(1T
494
+ n ⊗ Id)vk.
495
+ (12d)
496
+ Under Assumption 1, the matrix W admits the following eigen-
497
+ decomposition:
498
+ W = UΣU−1 = �
499
+ 1 ⊗ Id
500
+ ˆU
501
+
502
+
503
+ ��
504
+
505
+ U
506
+
507
+ Id
508
+ 0
509
+ 0
510
+ Λ
511
+
512
+
513
+ ��
514
+
515
+ Σ
516
+ � 1
517
+ n1T ⊗ Id
518
+ ˆUT
519
+
520
+
521
+ ��
522
+
523
+ U−1
524
+ (13)
525
+ where Λ is a diagonal matrix with eigenvalues strictly less than
526
+ one and ˆU is an dn × d(n − 1) matrix that satisfies
527
+ ˆUT ˆU = I,
528
+ (1T ⊗ Id) ˆU = 0
529
+ (14a)
530
+ ˆU ˆUT = I − 1
531
+ n11T ⊗ Id.
532
+ (14b)
533
+ Lemma 2 (Decomposed error recursion). Under Assump-
534
+ tion 1, there exists matrices ˆV and Γ to transform the error
535
+ recursion (11) into the following form:
536
+ ¯ek+1
537
+ x
538
+ = ¯ek
539
+ x − α∇f(xk) + α¯vk,
540
+ (15a)
541
+ ˆxk+1 = Γˆxk − α ˆV−1
542
+ l
543
+ Λ2 ˆUT�
544
+ ∇f(xk) − ∇f(x⋆) + vk�
545
+ ,
546
+ (15b)
547
+ where
548
+ ˆxk ≜ ˆV−1
549
+ � ˆUT˜xk
550
+ ˆUT˜zk
551
+
552
+ ,
553
+ (16)
554
+ and ˆV−1
555
+ l
556
+ denotes the left block of ˆV−1 = [ ˆV−1
557
+ l
558
+ ˆV−1
559
+ r ]. Moreover,
560
+ the following bounds hold:
561
+ ∥ ˆV∥2 ≤ 3,
562
+ ∥ ˆV−1∥2 ≤ 9,
563
+ ∥Γ∥ ≤ 1+λ
564
+ 2 ,
565
+ (17)
566
+ where λ = maxi∈{2,...,n} λi.
567
+ Proof. See Appendix A
568
+ The preceding result will serve as the starting point for deriv-
569
+ ing the bounds that will lead us to our conclusions. Specifically,
570
+ we can derive the following bounds from the above result.
571
+ Lemma 3 (Coupled error inequality). Suppose Assump-
572
+ tions 1–2 hold. Then, if α <
573
+ 1
574
+ 4L, we have
575
+ E ∥¯ek+1
576
+ x
577
+ ∥2 ≤ (1 − µα) E ∥¯ek
578
+ x∥2 − α�
579
+ E f(¯xk) − f(x⋆)�
580
+ + 3αc2
581
+ 1L
582
+ 2n
583
+ E ∥ˆxk∥2 + α2σ2
584
+ n
585
+ ,
586
+ (18)
587
+ and
588
+ E ∥ˆxk+1∥2 ≤ γ E ∥ˆxk∥2 + α2c2
589
+ 2λ4
590
+ (1 − γ) E ∥∇f(xk) − ∇f(x⋆)∥2
591
+ + α2c2
592
+ 2λ4nσ2,
593
+ (19)
594
+ where γ ≜ ∥Γ∥, c1 ≜ ∥ ˆV∥, and c2 = ∥ ˆV−1∥.
595
+ Proof. See Appendix B.
596
+ IV. Convergence Results
597
+ In this section, we present our main convergence results in
598
+ Theorems 1 and 2. We then discuss our results and highlight
599
+ the differences with existing bounds.
600
+ Theorem 1 (Convex case). Suppose that Assumptions 1-2
601
+ are satisfied. Then, there exists a constant stepsize α such that
602
+ 1
603
+ K
604
+ K−1
605
+
606
+ k=0
607
+
608
+ E[f(¯xk) − f ⋆] + L
609
+ n E ∥xk − 1 ⊗ ¯xk∥2�
610
+ ≤ σ∥¯e0
611
+ x∥
612
+
613
+ nK
614
+ +
615
+
616
+ Lλ4σ2
617
+ 1 − λ
618
+ �1/3 �
619
+ ∥¯e0
620
+ x∥2
621
+ K
622
+ � 2
623
+ 3
624
+
625
+ 4
626
+ +
627
+
628
+ Lλ2
629
+ 1 − λ∥¯e0
630
+ x∥2 +
631
+ ς2
632
+
633
+ L(1 − λ)
634
+
635
+ C
636
+ K ,
637
+ (20)
638
+ where ¯e0
639
+ x ≜ ¯x0 − x⋆, ς2
640
+ ⋆ ≜
641
+ 1
642
+ n
643
+ �n
644
+ i=1 ∥∇fi(x⋆)∥2, and C is an
645
+ absolute constant.
646
+ Proof. See Appendix C.
647
+ Theorem 2 (Strongly-convex case). Suppose that Assump-
648
+ tions 1-2 are satisfied. Then, there exists a constant stepsize α
649
+ such that
650
+ E ∥¯eK
651
+ x ∥2 + 1
652
+ n∥xK − 1 ⊗ ¯xK∥2 ≤ ˜O
653
+
654
+ σ2
655
+ nK +
656
+ σ2
657
+ (1 − λ)K2
658
+
659
+ + ˜O
660
+
661
+ σ2
662
+ (1 − λ)2nK3 + (a0 + ς2
663
+ ⋆) exp [−(1 − λ)K]
664
+
665
+ ,
666
+ (21)
667
+ where a0 ≜ ∥¯x0 − x⋆∥2, ς2
668
+ ⋆ ≜
669
+ 1
670
+ n
671
+ �n
672
+ i=1 ∥∇fi(x⋆)∥2, and the
673
+ notation ˜O(·) ignores logarithmic factors.
674
+ Proof. See Appendix D.
675
+ In comparison to [13], our results removes the log factor
676
+ O(log(
677
+ 1
678
+ 1−λ)) and holds for any number of iteration K – see
679
+ Table I. Moreover, observe that for the strongly-convex case,
680
+ unlike [13], we do not have a network term 1/(1−λ) multiplying
681
+ the highest order exponential term exp(·).
682
+ Remark 2 (Improvement upon nonconvex GT rates). The
683
+ GT rates for convex and strongly-convex settings provided in
684
+ Theorems 1 and 2 improve upon the GT rates for non-convex
685
+ [13], [14] and PL condition [14] settings. For example, observe
686
+ from Table I that the GT rate under the PL condition [14] is
687
+ 1
688
+ nϵ +
689
+
690
+ 1
691
+ (1−λ)1/2 +
692
+ 1
693
+ (1−λ)√n
694
+
695
+ 1
696
+ √ϵ +
697
+ 1
698
+ 1−λ log
699
+
700
+ (a0+ς2
701
+ ⋆)
702
+ ϵ
703
+
704
+ , which has
705
+ an additional term
706
+ 1
707
+ (1−λ)√n
708
+ 1
709
+ √ϵ compared to our strongly-convex
710
+ rate.
711
+
712
+ Remark 3 (Comparison with Exact-diffusion/D2 [12]). For
713
+ the convex case, the difference with Exact-diffusion/D2 [12] is in
714
+ the highest order term. Exact-diffusion/D2 is
715
+
716
+ a0
717
+ (1−λ) + ς2
718
+
719
+
720
+ 1
721
+ K
722
+ while GT is
723
+
724
+ a0
725
+ (1−λ) +
726
+ ς2
727
+
728
+ (1−λ)
729
+
730
+ 1
731
+ K where GT has 1/(1 − λ) mul-
732
+ tiplied by ς2
733
+ ⋆, which is slightly worse than Exact-diffusion/D2.
734
+ A similar conclusion can be reached for the strongly-convex
735
+ scenario.
736
+
737
+ V. Simulation results
738
+ This section will present several numerical simulations that
739
+ compare Gradient-tracking with centralized SGD (CSGD) and
740
+ decentralized SGD (DSGD).
741
+ Linear regression. We consider solving a strongly-convex
742
+ problem (1) with fi(x) =
743
+ 1
744
+ 2E(aT
745
+ i x − bi)2 in which random
746
+ variable ai ∼ N(0, Id), bi = aT
747
+ i x⋆
748
+ i + ni for some local so-
749
+ lution x⋆
750
+ i ∈ Rd and ni ∼ N(0, σ2
751
+ n). The stochastic gradient
752
+ is calculated as ∇Fi(x) = ai(aT
753
+ i x − bi). Each local solution
754
+ x⋆
755
+ i = x⋆ +vi is generated using the formula x⋆
756
+ i = x⋆ +vi, where
757
+ x⋆ ∼ N(0, Id) is a randomly generated global solution while
758
+ vi ∼ N(0, σ2
759
+ vId) controls similarities between local solutions.
760
+ Generally speaking, a large σ2
761
+ v will result in local solutions
762
+ {x⋆
763
+ i }n
764
+ i=1 that are vastly different from one another. We used
765
+ d = 5, σ2
766
+ n = 0.01, and σ2
767
+ v = 1 in simulations. Experiments
768
+ are carried out on ring and exponential graphs of size n = 30,
769
+ respectively. Each algorithm’s stepsize (learning rate) is care-
770
+ fully tuned so that they all converge to the same relative mean-
771
+ square-error. Each simulation is run 30 times, with the solid line
772
+ representing average performance and the shadow representing
773
+ 0
774
+ 100
775
+ 200
776
+ 300
777
+ 400
778
+ iteration
779
+ 10
780
+ 4
781
+ 10
782
+ 3
783
+ 10
784
+ 2
785
+ 10
786
+ 1
787
+ 100
788
+ relative error
789
+ Exponential graph with 30 nodes
790
+ CSGD
791
+ DSGD
792
+ Gradient-Tracking
793
+ 0
794
+ 250
795
+ 500
796
+ 750
797
+ 1000 1250 1500 1750 2000
798
+ iteration
799
+ 10
800
+ 5
801
+ 10
802
+ 4
803
+ 10
804
+ 3
805
+ 10
806
+ 2
807
+ 10
808
+ 1
809
+ 100
810
+ relative error
811
+ Ring graph with 30 nodes
812
+ CSGD
813
+ DSGD
814
+ Gradient-Tracking
815
+ Fig. 1: Comparison between different algorithms over exponential
816
+ and ring graphs when solving distributed linear regression with
817
+ heterogeneous data distributions. The spectral gap 1 − λ is 0.33
818
+ and 0.0146 for exponential and ring graphs, respectively.
819
+ 0
820
+ 200
821
+ 400
822
+ 600
823
+ 800
824
+ iteration
825
+ 10
826
+ 4
827
+ 10
828
+ 3
829
+ 10
830
+ 2
831
+ 10
832
+ 1
833
+ 100
834
+ relative error
835
+ Exponential graph with 30 nodes
836
+ CSGD
837
+ DSGD
838
+ Gradient-Tracking
839
+ 0
840
+ 200
841
+ 400
842
+ 600
843
+ 800
844
+ 1000 1200 1400 1600
845
+ iteration
846
+ 10
847
+ 4
848
+ 10
849
+ 3
850
+ 10
851
+ 2
852
+ 10
853
+ 1
854
+ 100
855
+ relative error
856
+ Ring graph with 30 nodes
857
+ CSGD
858
+ DSGD
859
+ Gradient-Tracking
860
+ Fig. 2: Comparison between different algorithms over exponential
861
+ and ring graphs when solving distributed logistic regression.
862
+ standard deviation. The results are depicted in Fig. 1. The rela-
863
+ tive error is shown on the y-axis as 1
864
+ n
865
+ �n
866
+ i=1 E∥xk
867
+ i −x⋆∥2/∥x⋆∥2.
868
+ When running over the exponential graph which has a well-
869
+ connected topology with 1 − λ = 0.33, it is observed that
870
+ both DSGD and Gradient-tracking perform similarly to CSGD.
871
+ However, when running over the ring graph which has a badly-
872
+ connected topology with 1 − λ = 0.0146, DSGD gets far
873
+ slower than CSGD due to its sensitivity to network topology.
874
+ In contrast, Gradient-tracking just gets a little bit slower than
875
+ CSGD and performs far better than DSGD. This phenomenon
876
+ coincides with our established complexity bound in Table I
877
+ showing that GT has a much weaker dependence on network
878
+ topology (i.e., 1 − λ).
879
+ Logistic regression. We next consider the logistic regres-
880
+ sion problem, which has fi(x) = E ln(1 + exp(−yihT
881
+ i x)) where
882
+ (hi, yi) represents the training dataset stored in node i with
883
+ hi ∈ Rd as the feature vector and yi ∈ −{1, +1} as the label.
884
+ This is a convex but not strongly-convex problem. Similar to
885
+ the linear regression experiments, we will first generate a local
886
+ solution x⋆
887
+ i based on x⋆
888
+ i = x⋆ + vi using vi ∼ N(0, σ2
889
+ vId). We
890
+ can generate local data that follows distinct distributions using
891
+ x⋆
892
+ i . To this end, we generate each feature vector hi ∼ N(0, Id)
893
+ at node i. To produce the corresponding label yi, we create a
894
+ random variable zi ∼ U(0, 1). If zi ≤ 1 + exp(−yihT
895
+ i x⋆
896
+ i ), we
897
+ set yi = 1; otherwise yi = −1. Clearly, solution x⋆
898
+ i controls
899
+ the distribution of the labels. By adjusting σ2
900
+ v, we can easily
901
+ control data heterogeneity. The remaining parameters are the
902
+ same as in linear regression experiments. The performances
903
+ of each algorithm in logistic regression depicted in Fig. 2 are
904
+ consistent with that in linear regression, i.e., Gradient-tracking
905
+ performs well for both graphs while DSGD has a significantly
906
+ deteriorated performance over the ring graph due to its less
907
+ robustness to network topology.
908
+ Appendix A
909
+ Decomposed Error Recursion
910
+
911
+ 5
912
+ Prof of Lemma 2
913
+ Using the decomposition (13) and B = I − W:
914
+ W2 = UΣ2U−1 = �
915
+ 1 ⊗ Id
916
+ ˆU
917
+ � �
918
+ Id
919
+ 0
920
+ 0
921
+ Λ2
922
+ � � 1
923
+ n1T ⊗ Id
924
+ ˆUT
925
+
926
+ (22a)
927
+ B = U(I − Σ)U−1 = �
928
+ 1 ⊗ Id
929
+ ˆU
930
+ � �
931
+ 0
932
+ 0
933
+ 0
934
+ I − Λ
935
+ � � 1
936
+ n1T ⊗ Id
937
+ ˆUT
938
+
939
+ ,
940
+ (22b)
941
+ with I − Λ > 0. Substituting (22) into (11) and multiplying
942
+ both sides by blkdiag{U−1, U−1} on the left, we obtain
943
+
944
+ U−1˜xk+1
945
+ U−1˜zk+1
946
+
947
+ =
948
+
949
+ 2Σ2 − I
950
+ −(I − Σ)
951
+ I − Σ
952
+ I
953
+ � �
954
+ U−1˜xk
955
+ U−1˜zk
956
+
957
+ − α
958
+
959
+ Σ2U−1�
960
+ ∇f(xk) − ∇f(x⋆) + vk�
961
+ 0
962
+
963
+ .
964
+ (23)
965
+ Since ˜zk always lies in the range space of B, we have (1T
966
+ n ⊗
967
+ Id)˜zk = 0 for all k. Using, the structure of U from (13) and the
968
+ definitions (12), we have
969
+ U−1˜xk =
970
+
971
+ ¯ek
972
+ x
973
+ ˆUT˜xk
974
+
975
+ ,
976
+ U−1˜zk =
977
+
978
+ 0
979
+ ˆUT˜zk
980
+
981
+ U−1∇f(x) =
982
+
983
+ ∇f(xk)
984
+ ˆUT∇f(x)
985
+
986
+ .
987
+ Thus, by using the structure of Σ2 and Σ2
988
+ b given in (22), we
989
+ can rewrite (23) as
990
+ ¯ek+1
991
+ x
992
+ = ¯ek
993
+ x − α�
994
+ ∇f(xk) − ∇f(x⋆)�
995
+ (24a)
996
+ � ˆUT˜xk+1
997
+ ˆUT˜zk+1
998
+
999
+ =
1000
+
1001
+ 2Λ − I
1002
+ −(I − Λ)
1003
+ I − Λ
1004
+ I
1005
+ � � ˆUT˜xk
1006
+ ˆUT˜zk
1007
+
1008
+ − α
1009
+
1010
+ Λ2 ˆUT�
1011
+ ∇f(xk) − ∇f(x⋆)vk�
1012
+ 0
1013
+
1014
+ .
1015
+ (24b)
1016
+ Let
1017
+ G ≜
1018
+
1019
+ 2Λ − I
1020
+ −(I − Λ)
1021
+ I − Λ
1022
+ I
1023
+
1024
+ .
1025
+ (25)
1026
+ It is important to note that the matrix G is identical to the one
1027
+ studied in [14] (for nonconvex case). Therefore, following the
1028
+ same arguments used in [14, Appendix B], we can decompose it
1029
+ as G = ˆVΓ ˆV−1 for matrices ˆV and Γ satisfying the conditions
1030
+ in the lemma. Multiplying the second equation in (24) by ˆV−1,
1031
+ we arrive at (15).
1032
+ Appendix B
1033
+ Coupled Error Inequalities
1034
+ Proof of Lemma 3
1035
+ Proof of inequality (18)
1036
+ The proof adjusts the argument from [31, Lemma 8]. Using
1037
+ (15a) and Assumption 3, we have
1038
+ E[∥¯ek+1
1039
+ x
1040
+ ∥2|F k]
1041
+ = ∥¯ek
1042
+ x − α
1043
+ n
1044
+ �n
1045
+ i=1(∇fi(xk
1046
+ i ) − ∇fi(x⋆))∥2 + α2 E[∥¯vk∥2|F k]
1047
+ ≤ ∥¯ek
1048
+ x − α
1049
+ n
1050
+ �n
1051
+ i=1(∇fi(xk
1052
+ i ) − ∇fi(x⋆))∥2 + α2σ2
1053
+ n
1054
+ = ∥¯ek
1055
+ x∥2 + α2∥ 1
1056
+ n
1057
+ n�
1058
+ i=1
1059
+ (∇fi(xk
1060
+ i ) − ∇fi(x⋆))∥2
1061
+ − 2α
1062
+ n
1063
+ n�
1064
+ i=1
1065
+
1066
+ ∇fi(xk
1067
+ i ), ¯ek
1068
+ x
1069
+
1070
+ + α2σ2
1071
+ n
1072
+ ,
1073
+ (26)
1074
+ where we used �n
1075
+ i=1 ∇fi(x⋆) = 0. The second term on the right
1076
+ can be bounded as follows:
1077
+ α2∥ 1
1078
+ n
1079
+ n�
1080
+ i=1
1081
+
1082
+ ∇fi(xk
1083
+ i ) − ∇fi(¯xk) + ∇fi(¯xk) − ∇fi(x⋆)�
1084
+ ∥2
1085
+ ≤ 2α2∥ 1
1086
+ n
1087
+ n�
1088
+ i=1
1089
+ (∇fi(xk
1090
+ i ) − ∇fi(¯xk))∥2
1091
+ + 2α2∥ 1
1092
+ n
1093
+ n�
1094
+ i=1
1095
+ (∇fi(¯xk) − ∇fi(x⋆))∥2
1096
+ ≤ 2α2
1097
+ n
1098
+ n�
1099
+ i=1
1100
+ ∥∇fi(xk
1101
+ i ) − ∇fi(¯xk)∥2
1102
+ (27)
1103
+ + 2α2∥∇f(¯xk) − ∇f(x⋆)∥2
1104
+ ≤ 2α2L2
1105
+ n
1106
+ ∥xk − 1 ⊗ ¯xk∥2 + 2α2∥∇f(¯xk) − ∇f(x⋆)∥2
1107
+ ≤ 2α2L2
1108
+ n
1109
+ ∥xk − 1 ⊗ ¯xk∥2 + 4Lα2(f(¯xk) − f(x⋆)),
1110
+ (28)
1111
+ where the first two inequalities follows from Jensen’s inequal-
1112
+ ity. The third inequality follows from the Lipschitz gradient
1113
+ assumption. In the last inequality, we used the L-smoothness
1114
+ property of the aggregate function [32]:
1115
+ ∥∇f(¯xk) − ∇f(x⋆)∥2 ≤ 2L�
1116
+ f(¯xk) − f(x⋆)�
1117
+ .
1118
+ Note that for L-smooth and µ-strongly-convex function f, it
1119
+ holds that [32]:
1120
+ f(x) − f(y) − L
1121
+ 2 ∥x − y∥2 ≤ ⟨∇f(y), (x − y)⟩
1122
+ (29a)
1123
+ f(x) − f(y) + µ
1124
+ 2 ∥x − y∥2 ≤ ⟨∇f(x), (x − y)⟩.
1125
+ (29b)
1126
+ Using these inequalities, the cross term in (28) can be bounded
1127
+ by
1128
+ − 2α
1129
+ n
1130
+ n�
1131
+ i=1
1132
+ ⟨∇fi(xk
1133
+ i ), ¯ek
1134
+ x⟩
1135
+ = 2α
1136
+ n
1137
+ n�
1138
+ i=1
1139
+
1140
+ − ⟨∇fi(xk
1141
+ i ), ¯xk − xk
1142
+ i ⟩ − ⟨∇fi(xk
1143
+ i ), xk
1144
+ i − x⋆⟩�
1145
+ ≤ 2α
1146
+ n
1147
+ n�
1148
+ i=1
1149
+
1150
+ − fi(¯xk) + fi(xk
1151
+ i ) + L
1152
+ 2 ∥¯xk − xk
1153
+ i ∥2
1154
+ − µ
1155
+ 2 ∥xk
1156
+ i − x⋆∥2 − fi(xk
1157
+ i ) + fi(x⋆)
1158
+
1159
+ ≤ −2α�
1160
+ f(¯xk) − f(x⋆)�
1161
+ + Lα
1162
+ n
1163
+ n�
1164
+ i=1
1165
+ ∥¯xk − xk
1166
+ i ∥2 − µα∥¯xk − x⋆∥2
1167
+ = −2α�
1168
+ f(¯xk) − f(x⋆)�
1169
+ + Lα
1170
+ n ∥xk − 1 ⊗ ¯xk∥2 − µα∥¯ek
1171
+ x∥2,
1172
+ (30)
1173
+ where the last inequality holds due to − 1
1174
+ n
1175
+ �n
1176
+ i=1 ∥xk
1177
+ i − x⋆∥2 ≤
1178
+ −∥ 1
1179
+ n
1180
+ �n
1181
+ i=1(xk
1182
+ i −x⋆)∥2. Substituting (28) and (30) into (26) and
1183
+ taking expectation, we obtain:
1184
+ E ∥¯ek+1
1185
+ x
1186
+ ∥2 ≤ (1 − µα) E ∥¯ek
1187
+ x∥2 − 2α(1 − 2Lα) E �
1188
+ f(¯xk) − f(x⋆)�
1189
+ + αL
1190
+ n (1 + 2αL) E ∥xk − 1 ⊗ ¯xk∥2 + α2σ2
1191
+ n
1192
+ ≤ (1 − µα) E ∥¯ek
1193
+ x∥2 − α�
1194
+ E f(¯xk) − f(x⋆)�
1195
+ + 3Lα
1196
+ 2n E ∥xk − 1 ⊗ ¯xk∥2 + α2σ2
1197
+ n
1198
+ ,
1199
+ (31)
1200
+ where the last step uses α
1201
+
1202
+ 1
1203
+ 4L. Using (14), we have
1204
+ ∥ ˆUT˜xk∥2 = ∥ ˆUT ˆU˜xk∥2 = ∥xk − 1 ⊗ ¯xk∥2. Hence,
1205
+ ∥xk − 1 ⊗ ¯xk∥2 (16)
1206
+ = ∥ ˆVˆxk∥2 − ∥ ˆUT˜zk∥2 ≤ ∥ ˆV∥2∥ˆxk∥2.
1207
+ (32)
1208
+ Substituting the above into (31) yields (18).
1209
+
1210
+ 6
1211
+ Proof of inequality (19)
1212
+ From (15b), we have
1213
+ E[∥ˆxk+1∥2|F k]
1214
+ = E
1215
+ ���Γˆxk − α ˆV−1
1216
+ l
1217
+ Λ2 ˆUT�
1218
+ ∇f(xk) − ∇f(x⋆) + vk�
1219
+ |F k���
1220
+ 2
1221
+ (7a)
1222
+ =
1223
+ ���Γˆxk − α ˆV−1
1224
+ l
1225
+ Λ2 ˆUT�
1226
+ ∇f(xk) − ∇f(x⋆)����
1227
+ 2
1228
+ + α2 E
1229
+ ��� ˆV−1
1230
+ l
1231
+ Λ2 ˆUTvk��F k���
1232
+ 2
1233
+ (7b)
1234
+
1235
+ ���Γˆxk − α ˆV−1
1236
+ l
1237
+ Λ2 ˆUT�
1238
+ ∇f(xk) − ∇f(x⋆)����
1239
+ 2
1240
+ + α2∥ ˆV−1
1241
+ l
1242
+ ∥2∥Λ2∥2∥ ˆUT∥2nσ2.
1243
+ Now, for any vectors a and b, it holds from Jensen’s inequality
1244
+ that ∥a + b∥2 ≤ 1
1245
+ θ ∥a∥2 +
1246
+ 1
1247
+ 1−θ ∥b∥ for any θ ∈ (0, 1). Utilizing
1248
+ this bound with θ = γ ≜ ∥Γ∥ on the first term of the previous
1249
+ inequality, we get
1250
+ E[∥ˆxk+1∥2|F k]
1251
+ ≤ γ∥ˆxk∥2 +
1252
+ α2∥ ˆ
1253
+ V−1
1254
+ l
1255
+ ∥2∥Λ2∥2∥ ˆ
1256
+ UT∥2
1257
+ (1−γ)
1258
+ ∥∇f(xk) − ∇f(x⋆)∥2
1259
+ + α2∥ ˆV−1
1260
+ l
1261
+ ∥2∥Λ2∥2∥ ˆUT∥2nσ2.
1262
+ Taking expectation and using ∥ ˆUT∥ ≤ 1, ∥ ˆV−1
1263
+ l
1264
+ ∥2 ≤ ∥ ˆV−1∥2,
1265
+ and ∥Λ2∥2 ≤ λ4 yield our result (19).
1266
+ Appendix C
1267
+ Proof of Theorem 1
1268
+ Using similar argument to (28) and (32), it holds that
1269
+ ∥∇f(xk) − ∇f(x⋆)∥2
1270
+ ≤ 2∥∇f(1 ⊗ ¯xk) − ∇f(x⋆)∥2 + 2∥∇f(xk) − ∇f(1 ⊗ ¯xk)∥2
1271
+ ≤ 4nL[f(¯xk) − f(x⋆)] + 2c2
1272
+ 1L2∥ˆxk∥2.
1273
+ Plugging the above bound into (19) gives
1274
+ E ∥ˆxk+1∥2 ≤
1275
+
1276
+ γ +
1277
+ 2α2c2
1278
+ 1c2
1279
+ 2L2λ4
1280
+ (1−γ)
1281
+
1282
+ E ∥ˆxk∥2
1283
+ +
1284
+ 4α2c2
1285
+ 2Lλ4n
1286
+ (1−γ)
1287
+ E ˜f(¯xk) + α2c2
1288
+ 2λ4nσ2
1289
+ ≤ ¯γ E ∥ˆxk∥2 +
1290
+ 4α2c2
1291
+ 2Lλ4n
1292
+ (1−γ)
1293
+ E ˜f(¯xk) + α2c2
1294
+ 2λ4nσ2,
1295
+ where ˜f(¯xk) ≜ f(¯xk)−f(x⋆), ¯γ ≜ 1+γ
1296
+ 2 , and the last inequiality
1297
+ holds when γ +
1298
+ 2α2c2
1299
+ 1c2
1300
+ 2L2λ4
1301
+ (1−γ)
1302
+ ≤ 1+γ
1303
+ 2 , which is satisfied for
1304
+ α ≤
1305
+ 1 − λ
1306
+ 4c1c2Lλ2 .
1307
+ (33)
1308
+ Iterating the last recursion (for any k = 1, 2, . . . ) gives
1309
+ E ∥ˆxk∥2 ≤ ¯γk∥ˆx0∥2 +
1310
+ 4α2c2
1311
+ 2Lλ4n
1312
+ (1−γ)
1313
+ k−1
1314
+
1315
+ ℓ=0
1316
+ ¯γk−1−ℓ E ˜f(¯xℓ)
1317
+ +
1318
+ k−1
1319
+
1320
+ ℓ=0
1321
+ ¯γk−1−ℓ �
1322
+ α2c2
1323
+ 2λ4nσ2�
1324
+ ≤ ¯γk∥ˆx0∥2 +
1325
+ 4α2c2
1326
+ 2Lλ4n
1327
+ (1−γ)
1328
+ k−1
1329
+
1330
+ ℓ=0
1331
+ ¯γk−1−ℓ E ˜f(¯xℓ)
1332
+ +
1333
+ α2c2
1334
+ 2λ4nσ2
1335
+ 1−¯γ
1336
+ .
1337
+ (34)
1338
+ In the last inequality we used �k−1
1339
+ ℓ=0 ¯γk−1−ℓ ≤
1340
+ 1
1341
+ 1−¯γ . Averaging
1342
+ over k = 1, 2 . . . , K and using ¯γ = 1+γ
1343
+ 2 , it holds that
1344
+ 1
1345
+ K
1346
+ K
1347
+
1348
+ k=1
1349
+ E ∥ˆxk∥2
1350
+
1351
+ 2∥ˆx0∥2
1352
+ (1−γ)K +
1353
+ 4α2c2
1354
+ 2Lλ4n
1355
+ (1−γ)K
1356
+ K
1357
+
1358
+ k=1
1359
+ k−1
1360
+
1361
+ ℓ=0
1362
+ � 1+γ
1363
+ 2
1364
+ �k−1−ℓ E ˜f(¯xℓ) +
1365
+ 2α2c2
1366
+ 2λ4nσ2
1367
+ 1−γ
1368
+
1369
+ 2∥ˆx0∥2
1370
+ (1−γ)K +
1371
+ 8α2c2
1372
+ 2Lλ4n
1373
+ (1−γ)2K
1374
+ K−1
1375
+
1376
+ k=0
1377
+ E ˜f(¯xk) +
1378
+ 2α2c2
1379
+ 2λ4nσ2
1380
+ 1−γ
1381
+ .
1382
+ (35)
1383
+ It follows that
1384
+ 1
1385
+ K
1386
+ K−1
1387
+
1388
+ k=0
1389
+ E ∥ˆxk∥2 ≤
1390
+ 3∥ˆx0∥2
1391
+ (1 − γ)K +
1392
+ 8α2c2
1393
+ 2Lλ4n
1394
+ (1−γ)2K
1395
+ K−1
1396
+
1397
+ k=0
1398
+ E ˜f(¯xk)
1399
+ + 2α2c2
1400
+ 2λ4nσ2
1401
+ 1 − γ
1402
+ .
1403
+ (36)
1404
+ where we added
1405
+ ∥ˆx0∥2
1406
+ (1−γ)K and used ∥ˆx0∥2
1407
+ K
1408
+
1409
+ ∥ˆx0∥2
1410
+ (1−γ)K . Now when
1411
+ µ = 0, we can rearrange (18) to get
1412
+ E(f(¯xk) − f(x⋆)) ≤ 1
1413
+ α
1414
+
1415
+ E ∥¯ek
1416
+ x∥2 − E ∥¯ek+1
1417
+ x
1418
+ ∥2�
1419
+ + 3c2
1420
+ 1L
1421
+ 2n E ∥ˆxk∥2 + ασ2
1422
+ n .
1423
+ (37)
1424
+ Averaging over k = 0, . . . , K − 1 (K ≥ 1), it holds that
1425
+ 1
1426
+ K
1427
+ K−1
1428
+
1429
+ k=0
1430
+ E ˜f(¯xk) ≤
1431
+ ∥¯e0
1432
+ x∥2
1433
+ αK
1434
+ +
1435
+ 3c2
1436
+ 1L
1437
+ 2nK
1438
+ K−1
1439
+
1440
+ k=0
1441
+ E ∥ˆxk∥2 + ασ2
1442
+ n .
1443
+ (38)
1444
+ Multiplying inequality (36) by 2 ×
1445
+ 3c2
1446
+ 1L
1447
+ 2n , adding to (38), and
1448
+ rearranging we obtain
1449
+
1450
+ 1 −
1451
+ 24α2c2
1452
+ 1c2
1453
+ 2L2λ4
1454
+ (1−γ)2
1455
+ � 1
1456
+ K
1457
+ K−1
1458
+
1459
+ k=0
1460
+ E ˜f(¯xk) +
1461
+ 3c2
1462
+ 1L
1463
+ 2nK
1464
+ K−1
1465
+
1466
+ k=0
1467
+ E ∥ˆxk∥2
1468
+ ≤ ∥¯e0
1469
+ x∥2
1470
+ αK
1471
+ + 9c2
1472
+ 1L∥ˆx0∥2
1473
+ (1 − γ)nK + ασ2
1474
+ n
1475
+ + 6α2c2
1476
+ 1c2
1477
+ 2Lλ4σ2
1478
+ 1 − γ
1479
+ .
1480
+ (39)
1481
+ Notice from (16) that
1482
+ ∥ˆx0∥2 ≤ ∥ ˆV−1∥2 �
1483
+ ∥ ˆUT˜x0∥2 + ∥ ˆUT˜z0∥2�
1484
+ .
1485
+ (40)
1486
+ If we start from consensual initialization x0 = 1 ⊗ x0 and use
1487
+ the fact z0 = 0, the above reduces to
1488
+ ∥ˆx0∥2 ≤ ∥ ˆV−1∥2∥ ˆUTz⋆∥2 ≤ α2c2
1489
+ 2λ4
1490
+ (1 − λ)2 ∥ ˆUT∇f(x⋆)∥2,
1491
+ (41)
1492
+ where the last step holds by using (9) and (22), which implies
1493
+ that ˆUTz⋆ = α(I − Λ)−1Λ2 ˆUT∇f(x⋆). Plugging the previous
1494
+ inequality into (39) and setting 1
1495
+ 2 ≤ 1 −
1496
+ 24α2c2
1497
+ 1c2
1498
+ 2L2λ4
1499
+ (1−γ)2
1500
+ , i.e.,
1501
+ α ≤
1502
+ 1 − λ
1503
+ 4
1504
+
1505
+ 6c1c2Lλ2 ,
1506
+ (42)
1507
+ gives
1508
+ 1
1509
+ K
1510
+ K−1
1511
+
1512
+ k=0
1513
+ Ek ≤ ∥¯e0
1514
+ x∥2
1515
+ αK
1516
+ + a1α + a2α2
1517
+
1518
+ ��
1519
+
1520
+ ≜ΨK
1521
+ +a⋆α2
1522
+ K
1523
+ ,
1524
+ (43)
1525
+ where we defined Ek ≜ 1
1526
+ 2 E ˜f(¯xk) +
1527
+ 3c2
1528
+ 1L
1529
+ 2n E ∥ˆxk∥2 and
1530
+ a⋆ ≜ 18c2
1531
+ 1c2
1532
+ 2Lλ4∥ ˆUT∇f(x⋆)∥2
1533
+ (1 − λ)3n
1534
+ (44a)
1535
+ a1 ≜ σ2
1536
+ n
1537
+ a2 ≜ 12c2
1538
+ 1c2
1539
+ 2Lλ4σ2
1540
+ 1 − λ
1541
+ .
1542
+ (44b)
1543
+ We now select the stepsize α to arrive at our result in a manner
1544
+ similar to [31]. First note that the previous inequality holds for
1545
+ α ≤ 1
1546
+ α ≜ min
1547
+
1548
+ 1
1549
+ 4L,
1550
+ 1 − λ
1551
+ 4
1552
+
1553
+ 6c1c2Lλ2
1554
+
1555
+ .
1556
+ (45)
1557
+
1558
+ 7
1559
+ Setting α = min
1560
+ ��
1561
+ ∥¯e0
1562
+ x∥2
1563
+ a1K
1564
+ � 1
1565
+ 2 ,
1566
+
1567
+ ∥¯e0
1568
+ x∥2
1569
+ a2K
1570
+ � 1
1571
+ 3 , 1
1572
+ α
1573
+
1574
+
1575
+ 1
1576
+ α we have
1577
+ three cases: i) If α = 1
1578
+ α, which is smaller than both
1579
+
1580
+ ∥¯e0
1581
+ x∥2
1582
+ a1K
1583
+ � 1
1584
+ 2
1585
+ and
1586
+
1587
+ ∥¯e0
1588
+ x∥2
1589
+ a2K
1590
+ � 1
1591
+ 3 , then
1592
+ ΨK = α∥¯e0
1593
+ x∥2
1594
+ K
1595
+ + a1
1596
+ α + a2
1597
+ α2
1598
+ ≤ α∥¯e0
1599
+ x∥2
1600
+ K
1601
+ +
1602
+
1603
+ a1∥¯e0
1604
+ x∥2
1605
+ K
1606
+ � 1
1607
+ 2
1608
+ + a
1609
+ 1
1610
+ 3
1611
+ 2
1612
+
1613
+ ∥¯e0
1614
+ x∥2
1615
+ K
1616
+ � 2
1617
+ 3
1618
+ ;
1619
+ ii) If α =
1620
+
1621
+ ∥¯e0
1622
+ x∥2
1623
+ a1K
1624
+ � 1
1625
+ 2 <
1626
+
1627
+ ∥¯e0
1628
+ x∥2
1629
+ a2K
1630
+ � 1
1631
+ 3 , then
1632
+ ΨK ≤ 2
1633
+
1634
+ a1∥¯e0
1635
+ x∥2
1636
+ K
1637
+ � 1
1638
+ 2
1639
+ + a2
1640
+
1641
+ ∥¯e0
1642
+ x∥2
1643
+ a1K
1644
+
1645
+ ≤ 2
1646
+
1647
+ a1∥¯e0
1648
+ x∥2
1649
+ K
1650
+ � 1
1651
+ 2
1652
+ + a
1653
+ 1
1654
+ 3
1655
+ 2
1656
+
1657
+ ∥¯e0
1658
+ x∥2
1659
+ K
1660
+ � 2
1661
+ 3
1662
+ ;
1663
+ iii) If α =
1664
+
1665
+ ∥¯e0
1666
+ x∥2
1667
+ a2K
1668
+ � 1
1669
+ 3 <
1670
+
1671
+ ∥¯e0
1672
+ x∥2
1673
+ a1K
1674
+ � 1
1675
+ 2 , then
1676
+ ΨK ≤ 2a
1677
+ 1
1678
+ 3
1679
+ 2
1680
+
1681
+ ∥¯e0
1682
+ x∥2
1683
+ K
1684
+ � 2
1685
+ 3
1686
+ + a1
1687
+
1688
+ ∥¯e0
1689
+ x∥2
1690
+ a2K
1691
+ � 1
1692
+ 3
1693
+ ≤ 2a
1694
+ 1
1695
+ 3
1696
+ 2
1697
+
1698
+ ∥¯e0
1699
+ x∥2
1700
+ K
1701
+ � 2
1702
+ 3
1703
+ +
1704
+
1705
+ a1∥¯e0
1706
+ x∥2
1707
+ K
1708
+ � 1
1709
+ 2
1710
+ .
1711
+ Combining the above cases, we have
1712
+ ΨK ≤ 2
1713
+
1714
+ a1∥¯e0
1715
+ x∥2
1716
+ K
1717
+ � 1
1718
+ 2
1719
+ + 2a1/3
1720
+ 2
1721
+
1722
+ ∥¯e0
1723
+ x∥2
1724
+ K
1725
+ � 2
1726
+ 3
1727
+ + α∥¯e0
1728
+ x∥2
1729
+ K
1730
+ .
1731
+ Therefore, substituting into (43) we conclude that
1732
+ 1
1733
+ K
1734
+ K−1
1735
+
1736
+ k=0
1737
+ Ek ≤ 2
1738
+
1739
+ a1∥¯e0
1740
+ x∥2
1741
+ K
1742
+ � 1
1743
+ 2 + 2a
1744
+ 1
1745
+ 3
1746
+ 2
1747
+
1748
+ ∥¯e0
1749
+ x∥2
1750
+ K
1751
+ � 2
1752
+ 3
1753
+ +
1754
+ (α∥¯e0
1755
+ x∥2 + a⋆
1756
+ α2 )
1757
+ K
1758
+ .
1759
+ Plugging the constants (44) and the upper bound for α in (45),
1760
+ and using ς2
1761
+ ⋆ = 1
1762
+ n∥ ˆUT∇f(x⋆)∥2 = 1
1763
+ n
1764
+ �n
1765
+ i=1 ∥∇fi(x⋆)−∇f(x⋆)∥2
1766
+ yields our rate (20).
1767
+ Appendix D
1768
+ Proof of Theorem 2
1769
+ Substituting the bound
1770
+ ∥∇f(xk) − ∇f(x⋆)∥2 ≤ L2∥xk − x⋆∥2
1771
+ ≤ 2L2∥xk − 1 ⊗ ¯xk∥2 + 2L2∥1 ⊗ ¯xk − x⋆∥2
1772
+ ≤ 2L2c2
1773
+ 1∥ˆxk∥2 + 2nL2∥¯ek
1774
+ x∥2,
1775
+ into (19), we get
1776
+ E ∥ˆxk+1∥2
1777
+
1778
+
1779
+ γ +
1780
+ 2α2c2
1781
+ 1c2
1782
+ 2L2λ4
1783
+ (1−γ)
1784
+
1785
+ E ∥ˆxk∥2 +
1786
+ 2α2c2
1787
+ 2L2λ4n
1788
+ (1−γ)
1789
+ ∥¯ek
1790
+ x∥2 + α2c2
1791
+ 2λ4nσ2
1792
+
1793
+ �1 + γ
1794
+ 2
1795
+
1796
+ E ∥ˆxk∥2 +
1797
+ 2α2c2
1798
+ 2L2λ4n
1799
+ (1−γ)
1800
+ ∥¯ek
1801
+ x∥2 + α2c2
1802
+ 2λ4nσ2,
1803
+ (46)
1804
+ where we used condition (33) in the last inequality. Using
1805
+ −α�
1806
+ E f(¯xk) − f(x⋆)�
1807
+ ≤ 0 in (18) and combining with above,
1808
+ it holds that
1809
+ � E ∥¯ek+1
1810
+ x
1811
+ ∥2
1812
+ c2
1813
+ 1
1814
+ n E ∥ˆxk+1∥2
1815
+
1816
+
1817
+
1818
+ 1 − µα
1819
+ 3
1820
+ 2αL
1821
+ 2α2c2
1822
+ 1c2
1823
+ 2L2λ4
1824
+ (1−γ)
1825
+ 1+γ
1826
+ 2
1827
+
1828
+
1829
+ ��
1830
+
1831
+ ≜A
1832
+ � E ∥¯ek
1833
+ x∥2
1834
+ c2
1835
+ 1
1836
+ n E ∥ˆxk∥2
1837
+
1838
+ +
1839
+
1840
+ α2σ2
1841
+ n
1842
+ α2c2
1843
+ 1c2
1844
+ 2λ4σ2
1845
+
1846
+
1847
+ ��
1848
+
1849
+ ≜b
1850
+ .
1851
+ (47)
1852
+ The spectral radius of the matrix A can be upper bounded by:
1853
+ ρ(A) ≤ ∥A∥1 = max
1854
+
1855
+ 1 − µα +
1856
+ 2c2
1857
+ 1c2
1858
+ 2α2L2λ4
1859
+ (1−γ)
1860
+ ,
1861
+ 1+γ
1862
+ 2
1863
+ + 3
1864
+ 2Lα
1865
+
1866
+ ≤ 1 − µα
1867
+ 2 ,
1868
+ (48)
1869
+ where the last inequality holds under the stepsize condition:
1870
+ α ≤ min
1871
+
1872
+ µ(1 − γ)
1873
+ 4c2
1874
+ 1c2
1875
+ 2L2λ4 , 1 − γ
1876
+ 3L + µ
1877
+
1878
+ .
1879
+ (49)
1880
+ Since ρ(A) < 1, we can iterate inequality (47) to get
1881
+ � E ∥¯ek
1882
+ x∥2
1883
+ c2
1884
+ 1
1885
+ n E ∥ˆxk∥2
1886
+
1887
+ ≤ Ak
1888
+ � E ∥¯e0
1889
+ x∥2
1890
+ c2
1891
+ 1
1892
+ n E ∥ˆx0∥2
1893
+
1894
+ +
1895
+ k−1
1896
+
1897
+ ℓ=0
1898
+ Aℓb
1899
+ ≤ Ak
1900
+ � E ∥¯e0
1901
+ x∥2
1902
+ c2
1903
+ 1
1904
+ n E ∥ˆx0∥2
1905
+
1906
+ + (I − A)−1b.
1907
+ (50)
1908
+ Taking the (induced) 1-norm, using the sub-multiplicative
1909
+ properties of matrix induced norms, it holds that
1910
+ E ∥¯ek
1911
+ x∥2 +
1912
+ c2
1913
+ 1
1914
+ n E ∥ˆxk∥2 ≤ ∥Ak∥1˜a0 +
1915
+ ��(I − A)−1b
1916
+ ��
1917
+ 1
1918
+ ≤ ∥A∥k
1919
+ 1˜a0 +
1920
+ ��(I − A)−1b
1921
+ ��
1922
+ 1 .
1923
+ (51)
1924
+ where ˜a0 = E ∥¯x0 − x⋆∥2 +
1925
+ c2
1926
+ 1
1927
+ n E ∥ˆx0∥2. We now bound the last
1928
+ term by noting that
1929
+ (I − A)−1b
1930
+ =
1931
+ 1
1932
+ det(I−A)
1933
+
1934
+ 1−γ
1935
+ 2
1936
+ 3
1937
+ 2αL
1938
+ 2α2c2
1939
+ 1c2
1940
+ 2L2λ4
1941
+ (1−γ)
1942
+ µα
1943
+
1944
+ b
1945
+ =
1946
+ 1
1947
+ αµ(1 − γ)( 1
1948
+ 2 −
1949
+ 3α2c2
1950
+ 1c2
1951
+ 2L3λ4
1952
+ (1−γ)2µ
1953
+ )
1954
+
1955
+ 1−γ
1956
+ 2
1957
+ 3
1958
+ 2αL
1959
+ 2α2c2
1960
+ 1c2
1961
+ 2L2λ4
1962
+ (1−γ)
1963
+ µα
1964
+ � �
1965
+ α2σ2
1966
+ n
1967
+ α2c2
1968
+ 1c2
1969
+ 2λ4σ2
1970
+
1971
+
1972
+ 4
1973
+ αµ(1 − γ)
1974
+
1975
+
1976
+ (1−γ)α2σ2
1977
+ 2n
1978
+ + 3
1979
+ 2c2
1980
+ 1c2
1981
+ 2α3Lλ4σ2
1982
+ 2α4c2
1983
+ 1c2
1984
+ 2L2λ4σ2
1985
+ n(1−γ)
1986
+ + α3c2
1987
+ 1c2
1988
+ 2µλ4σ2
1989
+
1990
+ � ,
1991
+ where det(·) denotes the determinant operation. In the last step
1992
+ we used 1
1993
+ 2 −
1994
+ 3c2
1995
+ 1c2
1996
+ 2α2L3λ4
1997
+ (1−γ)2µ
1998
+ ≥ 1
1999
+ 4 or α ≤
2000
+ √µ(1−γ)
2001
+ 2
2002
+
2003
+ 3c1c2L3/2λ2 . Therefore,
2004
+ from (51)
2005
+ E ∥¯ek
2006
+ x∥2 +
2007
+ c2
2008
+ 1
2009
+ n E ∥ˆxk∥2
2010
+ ≤ (1 − αµ
2011
+ 2 )k˜a0 +
2012
+ ��(I − A)−1b
2013
+ ��
2014
+ 1
2015
+ ≤ (1 − αµ
2016
+ 2 )k˜a0 + 2σ2
2017
+ µn α
2018
+ +
2019
+ 6c2
2020
+ 1c2
2021
+ 2(L/µ)λ4σ2+4c2
2022
+ 1c2
2023
+ 2λ4σ2
2024
+ 1−γ
2025
+ α2 +
2026
+ 8c2
2027
+ 1c2
2028
+ 2L2λ4σ2
2029
+ µn(1−γ)2
2030
+ α3.
2031
+ (52)
2032
+ Using (1 − αµ
2033
+ 2 )K ≤ exp(− αµ
2034
+ 2 K) and (41), it holds that
2035
+ E ∥¯eK
2036
+ x ∥2 +
2037
+ c2
2038
+ 1
2039
+ n E ∥ˆxK∥2
2040
+ ≤ exp(− αµ
2041
+ 2 K)(a0 + α2a⋆) + a1α + a2α2 + a3α3,
2042
+ (53)
2043
+ where
2044
+ a0 ≜ E ∥¯x0 − x⋆∥2,
2045
+ a⋆ ≜
2046
+ c2
2047
+ 1c2
2048
+ 2λ4
2049
+ (1−λ)2n∥ ˆUT∇f(x⋆)∥2
2050
+ (54a)
2051
+ a1 ≜ 2σ2
2052
+ µn ,
2053
+ a2 ≜ 10c2
2054
+ 1c2
2055
+ 2Lλ4σ2
2056
+ µ(1 − γ)
2057
+ (54b)
2058
+ a3 ≜ 8c2
2059
+ 1c2
2060
+ 2L2λ4σ2
2061
+ µn(1 − γ)2 .
2062
+ (54c)
2063
+
2064
+ 8
2065
+ Note that by combining all stepsize conditions, it is sufficient
2066
+ to require
2067
+ α ≤ 1
2068
+ α ≜ min
2069
+
2070
+ 1 − λ
2071
+ 8L , µ(1 − λ)
2072
+ 8c2
2073
+ 1c2
2074
+ 2L2λ4 ,
2075
+ √µ(1 − λ)
2076
+ 4
2077
+
2078
+ 3c1c2L3/2λ2
2079
+
2080
+ .
2081
+ (55)
2082
+ We now select
2083
+ α = min
2084
+
2085
+ ln
2086
+
2087
+ max
2088
+
2089
+ 2, µ2(a0 + a⋆
2090
+ α2 ) K
2091
+ a1
2092
+ ��
2093
+ /µK, 1
2094
+ α
2095
+
2096
+ ≤ 1
2097
+ α. (56)
2098
+ Under this choice the exponential term in (53) can be upper
2099
+ bounded as follows. i) If α =
2100
+ ln(max{1,µ2(a0+a⋆/α2)K/a1})
2101
+ µK
2102
+ ≤ 1
2103
+ α
2104
+ then
2105
+ exp(− αµ
2106
+ 2 K)(a0 + α2a⋆)
2107
+ ≤ ˜O
2108
+
2109
+ (a0 + a⋆
2110
+ α2 ) exp
2111
+
2112
+ − ln
2113
+
2114
+ max
2115
+
2116
+ 1, µ2(a0 + a⋆
2117
+ α2 )K/a1
2118
+ ����
2119
+ = O
2120
+
2121
+ a1
2122
+ µK
2123
+
2124
+ ;
2125
+ ii) Otherwise α = 1
2126
+ α ≤
2127
+ ln(max{1,µ2(a0+a⋆/α2)K/a1})
2128
+ µK
2129
+ and
2130
+ exp(− αµ
2131
+ 2 K)(a0 + α2a⋆) = exp
2132
+
2133
+ − µK
2134
+
2135
+
2136
+ (a0 + a⋆
2137
+ α2 ).
2138
+ Therefore, under the stepsize condition (56) it holds that
2139
+ E ∥¯eK
2140
+ x ∥2 +
2141
+ c2
2142
+ 1
2143
+ n E ∥ˆxK∥2
2144
+ ≤ exp(− αµ
2145
+ 2 K)(a0 + α2a⋆) + a1α + a2α2 + a3α3
2146
+ ≤ ˜O
2147
+
2148
+ a1
2149
+ µK +
2150
+ a2
2151
+ µ2K2 +
2152
+ a3
2153
+ µ3K3 + (a0 + a⋆
2154
+ α2 ) exp
2155
+
2156
+ − K
2157
+ α
2158
+ ��
2159
+ .
2160
+ Plugging the constants (54) into the above inequality, using
2161
+ (55) and (32) yields our rate (21).
2162
+ References
2163
+ [1] C. G. Lopes and A. H. Sayed, “Diffusion least-mean squares
2164
+ over adaptive networks: Formulation and performance analy-
2165
+ sis,” IEEE Transactions on Signal Processing, vol. 56, no. 7,
2166
+ pp. 3122–3136, 2008.
2167
+ [2] S. S. Ram, A. Nedic, and V. V. Veeravalli, “Distributed stochas-
2168
+ tic subgradient projection algorithms for convex optimization,”
2169
+ J. Optim. Theory Appl., vol. 147, no. 3, pp. 516–545, 2010.
2170
+ [3] F. S. Cattivelli and A. H. Sayed, “Diffusion LMS strategies for
2171
+ distributed estimation,” IEEE Trans. Signal Process, vol. 58,
2172
+ no. 3, p. 1035, 2010.
2173
+ [4] K. Yuan, S. A. Alghunaim, B. Ying, and A. H. Sayed, “On
2174
+ the influence of bias-correction on distributed stochastic opti-
2175
+ mization,” IEEE Transactions on Signal Processing, vol. 68,
2176
+ pp. 4352–4367, 2020.
2177
+ [5] J. Xu, S. Zhu, Y. C. Soh, and L. Xie, “Augmented distributed
2178
+ gradient methods for multi-agent optimization under uncoordi-
2179
+ nated constant stepsizes,” in Proc. 54th IEEE Conference on
2180
+ Decision and Control (CDC), (Osaka, Japan), pp. 2055–2060,
2181
+ 2015.
2182
+ [6] P. Di Lorenzo and G. Scutari, “Next: In-network nonconvex
2183
+ optimization,” IEEE Transactions on Signal and Information
2184
+ Processing over Networks, vol. 2, no. 2, pp. 120–136, 2016.
2185
+ [7] A. Nedic, A. Olshevsky, and W. Shi, “Achieving geometric con-
2186
+ vergence for distributed optimization over time-varying graphs,”
2187
+ SIAM Journal on Optimization, vol. 27, no. 4, pp. 2597–2633,
2188
+ 2017.
2189
+ [8] G. Qu and N. Li, “Harnessing smoothness to accelerate dis-
2190
+ tributed optimization,” IEEE Transactions on Control of Net-
2191
+ work Systems, vol. 5, pp. 1245–1260, Sept. 2018.
2192
+ [9] S. Pu and A. Nedi´c, “Distributed stochastic gradient tracking
2193
+ methods,” Mathematical Programming, vol. 187, no. 1, pp. 409–
2194
+ 457, 2021.
2195
+ [10] R. Xin, U. A. Khan, and S. Kar, “An improved convergence
2196
+ analysis for decentralized online stochastic non-convex opti-
2197
+ mization,” IEEE Transactions on Signal Processing, vol. 69,
2198
+ pp. 1842–1858, 2021.
2199
+ [11] S. Lu, X. Zhang, H. Sun, and M. Hong, “Gnsd: A gradient-
2200
+ tracking based nonconvex stochastic algorithm for decentralized
2201
+ optimization,” in 2019 IEEE Data Science Workshop (DSW),
2202
+ pp. 315–321, IEEE, 2019.
2203
+ [12] K. Yuan and S. A. Alghunaim, “Removing data heterogeneity
2204
+ influence enhances network topology dependence of decentral-
2205
+ ized SGD,” arXiv preprint:2105.08023, 2021.
2206
+ [13] A. Koloskova, T. Lin, and S. U. Stich, “An improved analysis of
2207
+ gradient tracking for decentralized machine learning,” Advances
2208
+ in Neural Information Processing Systems, vol. 34, pp. 11422–
2209
+ 11435, 2021.
2210
+ [14] S. A. Alghunaim and K. Yuan, “A unified and refined con-
2211
+ vergence analysis for non-convex decentralized learning,” IEEE
2212
+ Transactions on Signal Processing, vol. 70, pp. 3264–3279, June
2213
+ 2022. (ArXiv preprint:2110.09993).
2214
+ [15] M. Zhu and S. Martinez, “Discrete-time dynamic average con-
2215
+ sensus,” Automatica, vol. 46, no. 2, pp. 322–329, 2010.
2216
+ [16] A. Nedic and A. Ozdaglar, “Distributed subgradient methods
2217
+ for multi-agent optimization,” IEEE Transactions on Automatic
2218
+ Control, vol. 54, no. 1, pp. 48–61, 2009.
2219
+ [17] K. Yuan, Q. Ling, and W. Yin, “On the convergence of decentral-
2220
+ ized gradient descent,” SIAM Journal on Optimization, vol. 26,
2221
+ no. 3, pp. 1835–1854, 2016.
2222
+ [18] C. Xi, V. S. Mai, R. Xin, E. H. Abed, and U. A. Khan, “Linear
2223
+ convergence in optimization over directed graphs with row-
2224
+ stochastic matrices,” IEEE Transactions on Automatic Control,
2225
+ vol. 63, no. 10, pp. 3558–3565, 2018.
2226
+ [19] S. Pu, W. Shi, J. Xu, and A. Nedi´c, “Push–pull gradient meth-
2227
+ ods for distributed optimization in networks,” IEEE Transac-
2228
+ tions on Automatic Control, vol. 66, no. 1, pp. 1–16, 2020.
2229
+ [20] A. Daneshmand, G. Scutari, and V. Kungurtsev, “Second-order
2230
+ guarantees of distributed gradient algorithms,” SIAM Journal
2231
+ on Optimization, vol. 30, no. 4, pp. 3029–3068, 2020.
2232
+ [21] Y. Sun, G. Scutari, and A. Daneshmand, “Distributed opti-
2233
+ mization based on gradient tracking revisited: Enhancing con-
2234
+ vergence rate via surrogation,” SIAM Journal on Optimization,
2235
+ vol. 32, no. 2, pp. 354–385, 2022.
2236
+ [22] G. Scutari and Y. Sun, “Distributed nonconvex constrained
2237
+ optimization over time-varying digraphs,” Mathematical Pro-
2238
+ gramming, vol. 176, no. 1-2, pp. 497–544, 2019.
2239
+ [23] F. Saadatniaki, R. Xin, and U. A. Khan, “Decentralized op-
2240
+ timization over time-varying directed graphs with row and
2241
+ column-stochastic matrices,” IEEE Transactions on Automatic
2242
+ Control, vol. 65, no. 11, pp. 4769–4780, 2020.
2243
+ [24] Y. Tang, J. Zhang, and N. Li, “Distributed zero-order algorithms
2244
+ for nonconvex multiagent optimization,” IEEE Transactions on
2245
+ Control of Network Systems, vol. 8, no. 1, pp. 269–281, 2020.
2246
+ [25] R. Xin, U. A. Khan, and S. Kar, “A fast randomized incremental
2247
+ gradient method for decentralized non-convex optimization,”
2248
+ IEEE Transactions on Automatic Control, vol. to appear, 2021.
2249
+ [26] R. Xin, U. A. Khan, and S. Kar, “Fast decentralized non-
2250
+ convex finite-sum optimization with recursive variance reduc-
2251
+ tion,” SIAM Journal on Optimization, to appear, 2021.
2252
+ [27] B. Li, S. Cen, Y. Chen, and Y. Chi, “Communication-efficient
2253
+ distributed optimization in networks with gradient tracking and
2254
+ variance reduction,” in International Conference on Artificial
2255
+ Intelligence and Statistics, pp. 1662–1672, PMLR, 2020.
2256
+ [28] H. Sun, S. Lu, and M. Hong, “Improving the sample and com-
2257
+ munication complexity for decentralized non-convex optimiza-
2258
+ tion: Joint gradient estimation and tracking,” in International
2259
+ Conference on Machine Learning, pp. 9217–9228, PMLR, 2020.
2260
+ [29] S. A. Alghunaim, E. K. Ryu, K. Yuan, and A. H. Sayed,
2261
+ “Decentralized proximal gradient algorithms with linear conver-
2262
+ gence rates,” IEEE Transactions on Automatic Control, vol. 66,
2263
+ pp. 2787–2794, June 2021.
2264
+ [30] W. Shi, Q. Ling, G. Wu, and W. Yin, “EXTRA: An exact
2265
+ first-order algorithm for decentralized consensus optimization,”
2266
+ SIAM Journal on Optimization, vol. 25, no. 2, pp. 944–966,
2267
+ 2015.
2268
+ [31] A. Koloskova, N. Loizou, S. Boreiri, M. Jaggi, and S. Stich,
2269
+ “A unified theory of decentralized SGD with changing topology
2270
+ and local updates,” in International Conference on Machine
2271
+ Learning, pp. 5381–5393, 2020.
2272
+ [32] Y. Nesterov, Introductory Lectures on Convex Optimization: A
2273
+ Basic Course, vol. 87. Springer, 2013.
2274
+
7tE1T4oBgHgl3EQfBwI8/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
8dAzT4oBgHgl3EQfgfzo/content/tmp_files/2301.01471v1.pdf.txt ADDED
@@ -0,0 +1,726 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Freeform Islamic Geometric Patterns
2
+ Rebecca Lin1,2 and Craig S. Kaplan3
3
+ 1 Massachusetts Institute of Technology, Cambridge, MA, USA
4
+ 2 University of British Columbia, Vancouver, BC, Canada
5
+ 3 University of Waterloo, Waterloo, ON, Canada
6
+ Abstract. Islamic geometric patterns are a rich and venerable orna-
7
+ mental tradition. Many classic designs feature periodic arrangements of
8
+ rosettes: star shapes surrounded by rings of hexagonal petals. We present
9
+ a new technique for generating ‘freeform’ compositions of rosettes: finite
10
+ designs that freely mix rosettes of unusual sizes while retaining the aes-
11
+ thetics of traditional patterns. We use a circle packing as a scaffolding
12
+ for developing a patch of polygons and fill each polygon with a motif
13
+ based on established constructions from Islamic art.
14
+ Keywords: Islamic geometric patterns · modular design · circle packing
15
+ 1
16
+ Introduction
17
+ Islamic geometric patterns are a rich and venerable ornamental tradition [6]. The
18
+ most iconic of these patterns involve periodic arrangements of star shapes, with
19
+ gaps between them filled by additional polygons with less symmetry (Fig. 2).
20
+ Often, the concave corners of stars are filled with rings of petal-shaped hexagons,
21
+ yielding composite shapes called rosettes [25] (Fig. 1).
22
+ We refer to the number of arms in a star or rosette as its order. These motifs
23
+ appear in patterns in several standard orders: multiples of three and four are the
24
+ most common due to their compatibility with rotations in periodic symmetry
25
+ groups. It is challenging to create patterns that incorporate unusual orders or
26
+ unusual combinations of orders. For example, considerable geometric sleight-of-
27
+ hand is required for orders such as 11 and 13 (Fig. 2b), which are incompatible
28
+ with crystallographic symmetries [3, Pg. 484].
29
+ Whether designers employ stars and rosettes of standard or unusual orders,
30
+ they typically construct periodic compositions repeating in two directions in the
31
+ plane. Repetition is one of the hallmarks of ornamentation: surface decoration on
32
+ walls and floors, clothing, and objects should be appealing but not distracting.
33
+ When presented with a periodic pattern, we visually ‘factor’ for a non-repeating
34
+ kernel and a rule for filling the plane with copies of that kernel. Thus the eye
35
+ may casually appreciate the pattern without being overwhelmed by it. Accord-
36
+ ing to Gombrich [18], ‘aesthetic delight lies somewhere between boredom and
37
+ confusion’, a sentiment echoed by many others [21,2,11]. In decorative contexts,
38
+ a measure of boredom helps a pattern recede from conscious attention.
39
+ arXiv:2301.01471v1 [cs.GR] 4 Jan 2023
40
+
41
+ 2
42
+ R. Lin and C.S. Kaplan
43
+ Fig. 1. Freeform designs composed of rosettes of many different orders.
44
+ On the other hand, art benefits from a larger dose of confusion. An artwork
45
+ like a painting is a finite composition that rewards careful study, and so every
46
+ part of that composition can bear some measure of novelty. In contrast, an
47
+ infinite Islamic pattern that pleases the eye when elaborated over a wall might
48
+ lose its appeal if cropped, framed, and hung on that same wall. As an artwork,
49
+ it would have no natural boundary—no broad composition to guide the eye.
50
+ This article presents a technique for constructing ‘freeform’ Islamic geomet-
51
+ ric patterns: finite, non-repetitive arrangements of rosettes intended as self-
52
+ contained compositions rather than as ornamental textures. A few sample com-
53
+ positions appear in Fig. 1. Our freeform designs give us significant flexibility to
54
+ mix and match unusual rosette orders. We move along Gombrich’s continuum,
55
+ away from the boredom of ornamentation and towards the confusion of art. The
56
+ resulting visual experiment allows us to reimagine the canonical motifs of Islamic
57
+ geometric patterns in a highly non-traditional setting.
58
+ We construct motifs based on an initial polygonal patch using a hybrid of
59
+ standard techniques (Sec. 3). We define the overall arrangement of rosettes from
60
+ a circle packing derived from a triangulation (Sec. 4). We show how any circle
61
+ packing can be converted into a patch of connected polygons (Sec. 4.3). We then
62
+ inscribe a motif in each polygon and join the motifs together to form the final
63
+ pattern (Sec. 4.4). This technique is robust over a wide range of rosette orders.
64
+ The designer can control the final pattern by starting with a triangulation of
65
+ their choosing. We support a few additional special effects via ‘gadgets’ that
66
+ perform local surgery on the computed circle packing (Sec. 5). We also adapt
67
+ our technique to periodic patterns via toroidal circle packings (Sec. 6).
68
+
69
+ rosetteFreeform Islamic Geometric Patterns
70
+ 3
71
+ 2
72
+ Related Work
73
+ Artists and mathematicians use many strategies to disrupt the potential monoto-
74
+ ny of ornamental Islamic patterns. For example, an artist often introduces mild
75
+ variations in colours, decorative fills, or calligraphic inscriptions in periodic pat-
76
+ terns of otherwise identical stars or rosettes. They also sometimes alter the geom-
77
+ etry at the centres of selected rosettes while maintaining outward compatibility
78
+ with the rest of the pattern.
79
+ As the practice of Islamic geometric patterns grew in sophistication, artists
80
+ sought to incorporate stars or rosettes of unusual orders into their work. The
81
+ Topkapı scroll, a 15th-century visual guide to the drawing of Islamic ornament,
82
+ included a number of patterns with unusual combinations of stars. Cromwell [12]
83
+ analyzed these patterns and articulated rules for their construction. Later, he
84
+ presented a robust method for assembling patterns from irregular stars with dif-
85
+ ferent numbers of points [13]. That work demonstrated the wheel construction
86
+ (Sec. 3), which we will detail and use in our method. More recently, Gailiunas [16]
87
+ studied the amount of geometric error that accumulates when juxtaposing oth-
88
+ erwise incompatible stars.
89
+ Bonner [3] presented a comprehensive treatment of the modular construction
90
+ of Islamic patterns. His polygonal technique, also known as polygons-in-contact
91
+ (PIC) after Hankin [20], builds a motif in every tile of a polygonal tiling (Fig. 2).
92
+ Bonner’s book includes a vast collection of patterns with different combinations
93
+ of stars, including some ‘non-systematic’ patterns that feature stars or rosettes
94
+ with unusual orders, such as 7, 9, 11, 13, and 14.
95
+ Fig. 2. Illustration of polygons-in-contact, with examples of rosettes highlighted. (a) A
96
+ classical Islamic geometric pattern derived from a tiling by regular decagons, pentagons,
97
+ and barrel-shaped hexagons. (b) A non-systematic pattern with an underlying tiling
98
+ by regular 11-gons, 13-gons, and irregular pentagons.
99
+ Another means of achieving irregularity is to move away from the Euclidean
100
+ plane. In Islamic architecture, domes are often decorated with specialized geo-
101
+
102
+ (a)
103
+ (b)4
104
+ R. Lin and C.S. Kaplan
105
+ metric patterns adjusted to varying curvature [6]. Kaplan and Salesin [24] demon-
106
+ strated adapting PIC to produce patterns on the sphere and in the hyperbolic
107
+ plane. While repetitive in the mathematical sense, hyperbolic patterns are nec-
108
+ essarily distorted when projected into the Euclidean plane. Kaplan [22] later
109
+ presented a more general method for mapping planar patterns with sufficient
110
+ symmetry, including many Islamic patterns, onto arbitrary surfaces in 3D.
111
+ A Moroccan zellij design typically features a large central star surrounded
112
+ by radially symmetric constellations of smaller modules [7]. These modules are
113
+ formed from a standard set of individual tile shapes derived from an 8-pointed
114
+ star. The result is a monumental work containing substantial visual novelty
115
+ and appeal. The puzzle of creating such designs is more combinatorial than
116
+ geometric: the artist seeks new discrete configurations of a fixed set of shapes.
117
+ Recently, Kaplan [23] presented an algorithm for the procedural generation of
118
+ small zellij compositions, which shares some aesthetic goals with our work.
119
+ Modern mathematics allows us to produce patterns that are orderly without
120
+ being periodic. Many techniques have been proposed that use substitution tilings
121
+ or quasiperiodicity to guide the placement of Islamic motifs [4,27,8,9,29]. Some
122
+ researchers have even credited ancient designers with an explicit understanding
123
+ of quasiperiodicity [1,26], though such claims are controversial [14]. Non-periodic
124
+ patterns with long-range organization occupy an aesthetic sweet spot: they ad-
125
+ vertise global structure, but the precise nature of that structure is not trivially
126
+ unravelled by the eye.
127
+ In the broader world of computer graphics, researchers have explored some
128
+ interactive and automated techniques for laying out small motifs to create or-
129
+ namental patterns [17]. Practical numerical algorithms for constructing circle
130
+ packings are relatively new [10], so circle packings have not received much at-
131
+ tention as an organizing tool for pattern design. A notable exception is the work
132
+ of Hamekasi and Samavati [19], who use circle packings to guide the placement of
133
+ motifs in Persian floral designs. Most recently, Brewer et al. derived circle pack-
134
+ ings from k-uniform tilings and used them as a framework in which to inscribe
135
+ Islamic motifs [5]. Their technique overlaps somewhat with ours, though they
136
+ are restricted to arrangements that can arise naturally from the vertex types
137
+ and polygon orders of the tilings they use as a starting point.
138
+ 3
139
+ Modular Motif Construction
140
+ Many standard techniques for constructing Islamic patterns are modular: they
141
+ decompose the canvas into disjoint regions such as disks or polygons and define
142
+ a procedure for filling every region with a motif. This section summarizes two
143
+ motif construction techniques that will form the basis of our method.
144
+ In the polygons-in-contact technique (PIC), the canvas is subdivided into
145
+ polygons that meet edge-to-edge. We choose a contact angle θ ∈ (0, π/2). For
146
+ every edge of a polygon P in the subdivision, we construct the two rays that grow
147
+ from the edge’s midpoint towards the interior of P, rotated by ±θ relative to
148
+ the edge. A motif is formed by truncating these rays where they meet rays from
149
+
150
+ Freeform Islamic Geometric Patterns
151
+ 5
152
+ other edges. In simple cases, we need only compute intersections with rays from
153
+ neighbouring edges (Fig. 3a), or from two edges away (Fig. 3b). A more robust
154
+ construction requires heuristics to decide how to truncate rays, such as minimiz-
155
+ ing the total length of the motif’s line segments [3, Sec. 4.4.2]. Fig. 2 shows two
156
+ patterns created by constructing motifs for every polygon in a subdivision.
157
+ In the wheel construction, the modules are circles, each tangent to neigh-
158
+ bouring circles in a larger pattern. The construction inscribes a star in every
159
+ circle. Given a circle C of radius r, we first identify a set of points S on its
160
+ boundary, including the points where C meets its neighbours. We also choose a
161
+ smaller circle C′ with radius αr for a given α ∈ (0, 1), lying in the interior of
162
+ C. Let p and q be two points in S. We construct the perpendicular bisector of
163
+ chord pq and find the intersection of that bisector with C′. Then we draw line
164
+ segments from p and q to the intersection. Fig. 3 shows stars that emerge when
165
+ this process is repeated for all pairs of p and q in S that are consecutive (c) or
166
+ non-consecutive (d). Here we can control the sharpness of the star by varying
167
+ the scaling ratio α between the radii of the outer and inner circles.
168
+ Fig. 3. Regular 9-pointed stars constructed using PIC (left) and the wheel construction
169
+ (right). Using PIC, we truncate rays at their first (a) or second (b) intersections with a
170
+ contact angle of 2π/5. Using the wheel construction, we draw zig-zag paths connecting
171
+ consecutive points (c) or every other point (d) on the outer circle.
172
+ Both of these constructions can produce symmetric n-pointed stars. For PIC,
173
+ a symmetric star is produced when P is regular; for the wheel construction, we
174
+ require the points in S to be distributed evenly around C, and for C and C′ to
175
+ be concentric. We can convert between PIC’s θ and the wheel construction’s α
176
+ in this case. Stars (a) and (c) in Fig. 3 are related by
177
+ α = 1 −
178
+ sin (π/n) sin θ
179
+ sin (π(n + 2)/2n − θ)
180
+ (1)
181
+ and stars (b) and (d) are related by
182
+ α = 1 − 2 sin (π/n) sin (π(n − 2)/2n) sin (θ − π/n)
183
+ sin (π/2 + 2π/n − θ)
184
+ .
185
+ (2)
186
+ Empirically, the wheel construction works well when forming a star whose
187
+ points lie on a common circle: it degrades gracefully as the point distribution
188
+
189
+ (a)
190
+ (b)
191
+ (d)6
192
+ R. Lin and C.S. Kaplan
193
+ becomes uneven. PIC is a better choice for small polygons whose irregularity
194
+ is harder to characterize. We shall use both in our method. Note that neither
195
+ of these techniques explicitly constructs rosettes. Although explicit rosette con-
196
+ structions exist [25], we will allow rosettes to emerge as a by-product of the
197
+ polygonal decompositions we use as a basis for motif construction, as in the
198
+ examples of Fig. 2.
199
+ 4
200
+ Freeform Designs
201
+ In this section, we present the steps that make up our main technique for con-
202
+ structing finite, freeform compositions of rosettes. The steps are visualized in
203
+ Fig. 4. We begin with an arbitrary simplicial complex (a), which induces a circle
204
+ packing (b). Based on the circle packing, we construct a polygonal patch (c),
205
+ comprising large cyclic polygons separated by smaller irregular pentagons. Fi-
206
+ nally, we use a combination of PIC and the wheel construction to define motifs
207
+ for each polygon (d), and optionally render the design (e). In the following sub-
208
+ sections, we describe each of these steps in detail.
209
+ Fig. 4. Our method takes a complex (a) and computes a circle packing (b). Then it
210
+ forms a freeform patch of polygons (c), from which it develops motifs that form a
211
+ seamless constellation (d) that may be styled (e).
212
+
213
+ (a)
214
+ (b)
215
+ (e)
216
+ (d)
217
+ cFreeform Islamic Geometric Patterns
218
+ 7
219
+ 4.1
220
+ Complex
221
+ The main input to our technique is a complex, more formally a planar, simply
222
+ connected, pure simplicial 2-complex K. In simpler terms, we may regard K as a
223
+ collection of non-overlapping triangles in the plane, meeting edge-to-edge. The
224
+ union of the triangles defines a region the plane—a simple polygon. We will
225
+ refer to the vertices, edges, and faces of the complex. We distinguish between its
226
+ boundary vertices, which lie on the simple polygon, and interior vertices, which
227
+ lie interior to the polygon.
228
+ We may construct input complexes in numerous ways. It is easy to author
229
+ them manually by placing and connecting vertices. They can also be generated
230
+ procedurally, such as by computing the Delaunay triangulation of a point set.
231
+ 4.2
232
+ Circle Packing
233
+ Let K be a complex with n vertices. A circle packing for K is a collection of
234
+ non-overlapping circles {C1, . . . , Cn} whose tangencies echo the combinatorial
235
+ structure of K. Each circle Ci corresponds with vertex vi of the complex, and
236
+ two circles Ci and Cj are externally tangent if and only if vi and vj are con-
237
+ nected by an edge in K. The Discrete Uniformization Theorem guarantees that a
238
+ circle packing exists for any given complex K [28]. Although the circle packing’s
239
+ connectivity will be identical to that of its complex, they will generally not be
240
+ equivalent geometrically: the locations and sizes of the circles are not directly
241
+ related to the locations of the vertices in the complex, or to the shapes of its
242
+ triangles.
243
+ Collins and Stephenson [10] describe a simple numerical algorithm that com-
244
+ putes circle packings through iterative adjustments of an initial assignment of
245
+ radii to the Ci. The radii of boundary circles must be further constrained with
246
+ additional boundary conditions. The simple Python script by Eppstein [15] ac-
247
+ cepts explicit values for boundary radii. Given a boundary vertex of degree n, our
248
+ implementation chooses a radius r for a circle that would be perfectly surrounded
249
+ by 2n − 2 unit circles, giving r = (1 − sin φ)/ sin φ, where φ = π/(2n − 2).
250
+ 4.3
251
+ Polygonal Patch
252
+ A patch is a finite set of polygons with disjoint interiors whose union is a topo-
253
+ logical disk. Given a circle packing, we construct a patch that has a large cyclic
254
+ polygon (i.e., a polygon whose vertices lie on a common circle) associated with
255
+ each circle, separated from other cyclic polygons by haloes of pentagonal ‘filler
256
+ polygons’. By design, these polygons can serve as scaffolding for building motifs
257
+ typical in Islamic geometric patterns.
258
+ Let C be an interior circle in a circle packing, and let k be the degree of the
259
+ vertex associated with C in the complex. As illustrated in Fig. 5a, we construct
260
+ a cyclic 2k-gon P in the interior of C. To begin, we set the vertices of P to be the
261
+ k points of tangency between C and its neighbours, together with the midpoints
262
+ of the minor arcs of C connecting adjacent tangency points. Now let τ ∈ (0, 1)
263
+
264
+ 8
265
+ R. Lin and C.S. Kaplan
266
+ be a user-selected scaling factor. Scale P relative to the centre of C by a factor
267
+ of τ, and add the scaled polygon to the patch. By default, we use τ = 0.8, a
268
+ choice that we discuss in Sec. 7.
269
+ The gaps between circles in the packing are triangular regions bounded by
270
+ arcs of three mutually tangent circles. Let Ci, Cj, and Ck be one such trio
271
+ of circles. We divide the space between their cyclic polygons into three new
272
+ pentagons, as shown in Fig. 5b, by drawing edges connecting vertices of cyclic
273
+ polygons. Three outer line segments pass through the pairwise tangencies of
274
+ the circles. Three inner segments connect arc midpoints to a new point o, the
275
+ incentre of the triangle formed from the centres of Ci, Cj, and Ck.
276
+ Fig. 5. Constructing a polygonal patch from a circle packing: we create a cyclic polygon
277
+ for every circle (a), and fill the gaps between three mutually tangent circles with trios
278
+ of irregular pentagons (b).
279
+ 4.4
280
+ Motif Construction
281
+ The final step in our process is to construct a motif for every polygon in the
282
+ patch produced in the previous step. Here we apply both the wheel construction
283
+ and PIC, depending on the type of polygon being decorated. Our large cyclic
284
+ polygons yield motifs that garner attention. We safeguard the quality of these
285
+ motifs by exploiting the robustness of the wheel construction in their develop-
286
+ ment (Fig. 6a). We then use PIC for the more unpredictable filler pentagons
287
+ (Fig. 6b). Optionally, we remove motif segments around the boundary of the
288
+ resulting composition, paring it down to a core of whole rosettes (Fig. 6c). Our
289
+ construction depends on a single global contact angle θ, as described in Sec. 3.
290
+ By default, we use θ = 2π/5, the angle for which PIC would inscribe a perfect
291
+ pentacle in a regular pentagon.
292
+ Let C be an interior circle in the packing with centre o and k points of
293
+ tangency. Let P be the cyclic 2k-gon associated with C in the patch. We use the
294
+ wheel construction to build a star centred at o whose outer points lie at the edge
295
+
296
+ (a)
297
+ (b)Freeform Islamic Geometric Patterns
298
+ 9
299
+ Fig. 6. To construct a design from a patch, we use the wheel construction to create a
300
+ star in every cyclic polygon (a) and apply PIC to build motifs for filler polygons (b).
301
+ Optionally, we remove the outer layers of geometry to extract an arrangement of whole
302
+ rosettes (c).
303
+ midpoints of P. Generally, these midpoints do not lie on a common circle, but the
304
+ wheel construction is tolerant of small deviations in their distances from o. Let rC
305
+ be the radius of C, and define r to be rC cos(π/2k). The value r approximates the
306
+ radius of an inscribed circle meeting P’s edge midpoints, an approximation that
307
+ converges on the correct value when P is regular. Now compute α by plugging
308
+ the user-supplied contact angle θ into Eqn. 2, and let C′ be a circle with center
309
+ o and radius αr. The radius of C′ is chosen to ensure that the contact angles at
310
+ the points of the star approximate θ. We apply the wheel construction using the
311
+ edge midpoints of P and the inner circle C′, connecting every other star point
312
+ as in Fig. 3d.
313
+ It remains to build motifs for the filler pentagons. Let Q be one such pen-
314
+ tagon. As in PIC, construct a pair of rays emanating from the midpoint of every
315
+ edge of Q, and truncate them where they intersect rays growing from neigh-
316
+ bouring edges. If an edge e of Q is adjacent to a cyclic polygon, then we choose
317
+ contact angles that yield rays parallel to the star edges meeting across e (Fig. 6b,
318
+ red). These angles may not be symmetric across the perpendicular bisector of e,
319
+ but the discrepancy is small in practice. If e is adjacent to another pentagon, on
320
+ the other hand, then we use θ as the contact angle for its rays (Fig. 6b, purple).
321
+ In summary, our method uses a patch to construct a constellation of localized
322
+ motifs that combine to form familiar visual elements: rosettes. By our application
323
+ of the Discrete Uniformization Theorem, each rosette corresponds to a vertex in
324
+ the triangulation K, and two rosettes are adjacent if and only if their vertices
325
+ share an edge in K. The order of a rosette is twice the degree of its associated
326
+ vertex.
327
+ 5
328
+ Gadgets
329
+ The basic technique of the previous section can produce a wide variety of freeform
330
+ designs with combinations of rosettes of different orders. However, some config-
331
+ urations found in traditional Islamic geometric patterns remain out of reach,
332
+
333
+ (a)
334
+ (b)
335
+ C10
336
+ R. Lin and C.S. Kaplan
337
+ most obviously because we define only one way to fill the triangular gaps be-
338
+ tween cyclic polygons. In this section, we introduce two gadgets that help us
339
+ recover some of that variety, increasing the visual intrigue of our designs. Gad-
340
+ gets are small subgraphs with labelled vertices that can be incorporated into a
341
+ complex. These vertices then determine local clusters of polygons during patch
342
+ construction, overriding the polygons of Sec. 4.3.
343
+ A square gadget is a 5-vertex subgraph with a central vertex a of degree 4,
344
+ as shown in Fig. 8a. Given a complex containing a copy of the square gadget,
345
+ we obtain a circle packing containing a cluster of circles like the one shown in
346
+ Fig. 8b, where circle A is associated with vertex a. When building the patch,
347
+ we remove A from the circle packing and tile the hole left behind with four
348
+ pentagons, as shown in Fig. 8c. The new point o is the mean of vertices i ,j, k,
349
+ and ℓ. Our motif construction will produce a squarish region surrounded by four
350
+ rosettes containing a central octagon.
351
+ Fig. 7. The square gadget (a) produces a circle packing (b) from which we derive four
352
+ filler pentagons (c).
353
+ Fig. 8. The bowtie gadget (a) produces a circle packing (b) from which we derive a
354
+ cluster (c) of four filler pentagons and a barrel-shaped hexagon.
355
+ A bowtie gadget is a 6-vertex subgraph with two central vertices a and b
356
+ of degree 4, as shown in Fig. 7a. As with the square gadget, we remove the
357
+ corresponding circles A and B from the circle packing and fill the void with a
358
+ new configuration of tiles. First, when constructing a cyclic polygon for circle D
359
+ associated with vertex d, we divide the minor arc between the tangencies with
360
+ C and E into three equal pieces instead of the usual two, yielding vertices j and
361
+
362
+ C
363
+ a
364
+ A
365
+ D
366
+ B
367
+ h
368
+ d
369
+ E
370
+ (a)
371
+ (b)
372
+ Cd
373
+ D
374
+ 10
375
+ a
376
+ E
377
+ A
378
+ B
379
+ e
380
+ F
381
+ (a)
382
+ (b)
383
+ cFreeform Islamic Geometric Patterns
384
+ 11
385
+ j′ in Fig. 7c. Similarly, we divide F’s arc into three, which gives us vertices ℓ
386
+ and ℓ′. We then construct a bowtie-shaped arrangement of four pentagons and
387
+ one barrel-shaped hexagon, as illustrated in Fig. 7c, where o is the mean of i, j,
388
+ and ℓ and o′ is the mean of j′, k, and ℓ′.
389
+ When the barrel-shaped hexagon in the centre of the
390
+ bowtie gadget becomes too thin, it can produce a motif
391
+ with a small region of overlap at its centre, as shown in red
392
+ in the inset. When these overlaps occur, we replace the
393
+ red segments with a perfect ‘X’, shown in blue. The blue
394
+ segments alter the contact angles with the edges of the hexagon; we propagate
395
+ any changes to the hexagon’s four neighbouring pentagons.
396
+ Recall that without gadgets, our construction was limited to
397
+ rosettes of even orders. But when a bowtie gadget appears in
398
+ a complex, vertices d and f each contribute three edges to their
399
+ corresponding cyclic polygons. Therefore, if a complex vertex acts
400
+ as d or f in one such gadget, as in the central vertex of the
401
+ subgraph in the inset, that vertex will yield a rosette of odd order. More generally,
402
+ we may hang any odd number of suitably oriented bowtie gadgets from a vertex
403
+ to obtain an odd-order rosette.
404
+ Fig. 9 shows a freeform design constructed from a random arrangement of
405
+ bowtie and square gadgets. In future work, we hope to explore gadgets beyond
406
+ these two.
407
+ Fig. 9. A composition based on a square grid, where every square is randomly subdi-
408
+ vided with a diagonal, a square gadget, or a bowtie gadget.
409
+
410
+ 12
411
+ R. Lin and C.S. Kaplan
412
+ 6
413
+ Periodic Patterns
414
+ While the focus of our technique is the creation of finite, freeform compositions,
415
+ we have also examined its ability to produce more orderly designs. For example, a
416
+ finite subset of a periodic arrangement of bowtie gadgets (Fig. 10, left) yields an
417
+ approximation of the decagonal design in Fig. 2a (Fig. 10, right). Other periodic
418
+ arrangements of triangles and gadgets can reproduce different classic designs.
419
+ However, because of flexibility in the circle packing algorithm, these freeform
420
+ designs could contain rosettes of continuously varying scales.
421
+ We can extend our technique to generate truly periodic patterns by gener-
422
+ alizing the Discrete Uniformization Theorem beyond the Euclidean plane. In
423
+ particular, if K is embedded on a torus, then the theorem guarantees the ex-
424
+ istence of a circle packing in the torus’s intrinsic metric [28, Ch. 9]. The circle
425
+ packing algorithm is, in some sense, even simpler in this case because there is no
426
+ longer any need for explicit boundary conditions: every circle is completely sur-
427
+ rounded. The torus can then be cut open and unrolled, yielding a finite collection
428
+ of circles that can be stamped out to produce an infinite periodic packing.
429
+ Fig. 11 gives an example of a periodic pattern generated from a simplicial
430
+ complex embedded on a torus. The light grey disks in Fig. 11b should be inter-
431
+ preted as translated copies of the dark grey disks with the same indices. The
432
+ numerical circle packing algorithm yields a layout that tiles the plane by trans-
433
+ lation (Fig. 11c), from which we can create a periodic pattern with rosettes of
434
+ orders 10, 12, 14, and 16.
435
+ Future work could explore the analogous extensions of this technique to
436
+ other spaces, such as the sphere and the Poincar´e disk model of the hyperbolic
437
+ plane [24].
438
+ Fig. 10. A periodic arrangement of bowtie gadgets (left) can be used to generate a
439
+ freeform version of the pattern in Fig. 2a (right).
440
+
441
+ Freeform Islamic Geometric Patterns
442
+ 13
443
+ Fig. 11. A triangulation drawn on a square with periodic boundary conditions (a) is
444
+ used to generate a circle packing (b) that covers the plane through translation (c). We
445
+ construct motifs to obtain a periodic Islamic geometric pattern (d).
446
+ 7
447
+ Discussion
448
+ In this section, we discuss some of the details of our technique, including al-
449
+ ternative approaches that we considered during its development. Some of these
450
+ alternatives may offer opportunities for future work.
451
+ Selecting interior circles. We typically do not generate a motif for every
452
+ circle in the packing. Boundary circles, and circles adjacent to them, can dif-
453
+ fer substantially in size from their neighbours. These variations can propagate
454
+ through the rest of the construction and produce unacceptably distorted motifs,
455
+ such as uneven rosette petals (Fig. 12). Future work could consider ways to op-
456
+ timize the geometry of the circle packing to serve patch and motif construction.
457
+ For now, we omit outer layers of circles in our final designs. Note that this ap-
458
+ proach may separate the design into multiple connected components, in which
459
+ case we simply keep the largest component.
460
+ Beyond these technicalities, we can be selective for aesthetic reasons. Hav-
461
+ ing the freedom to craft the shape of a design provides opportunities to create
462
+ interesting compositions (Fig. 15).
463
+
464
+ (b)
465
+ a
466
+ (d)14
467
+ R. Lin and C.S. Kaplan
468
+ Fig. 12. Circles near the boundary of a packing can lead to distorted stars and rosettes
469
+ (right). We discard outer circles, which can sometimes partition a design into multiple
470
+ connected components (left).
471
+ Choosing a scale factor. Recall that the
472
+ parameter τ controls the scale of each cyclic
473
+ polygon relative to its circle, which in turn affects the shapes of filler pentagons.
474
+ As shown in the inset, the quality of a motif generated within a filler pentagon
475
+ decreases as that pentagon deviates from regularity. Thus we seek to choose τ
476
+ to minimize the total deviation across a design.
477
+ To gauge the deviation of a pentagon Q from regularity, we adopt a contin-
478
+ uous symmetry measure by Zabrodsky et al. [30], which quantifies the minimal
479
+ distance that the vertices of Q must travel to form a regular pentagon. Let the
480
+ error of a freeform patch be the average deviation of its pentagons from regular-
481
+ ity. We can compute this error for a range of closely-spaced τ values and choose
482
+ the one with minimal error (Fig. 13a). Over a range of circle packings, we see
483
+ significant deviation outside the range (0.7, 0.9) and find that τ = 0.8 produces
484
+ satisfactory results, as shown throughout this work.
485
+ Fig. 13. A patch with pentagons coloured by their deviations (red) from regularity
486
+ (green). In (a), cyclic polygons are scaled by τ = 0.7, 0.8, and 0.9, showing that 0.8
487
+ produces good quality overall. In (b), they are offset by a fixed amount, with less
488
+ consistent results.
489
+
490
+ a)
491
+ b
492
+ RFreeform Islamic Geometric Patterns
493
+ 15
494
+ In the future, we hope to investigate other measurements of polygon quality
495
+ in order to produce patches that are closer to ideal. For example, PIC can often
496
+ produce a satisfactory motif in a polygon that has lower-order symmetries while
497
+ not being fully regular.
498
+ As an alternative to treating τ as a scaling factor, we also considered offsetting
499
+ cyclic polygons by a constant inward distance τ. However, we found that this
500
+ approach was not as successful in producing high-quality pentagons (Fig. 13b).
501
+ With either interpretation of τ, the quality is the poorest for pentagons adjacent
502
+ to two cyclic polygons of widely different radii. Hamekasi and Samavati note this
503
+ issue as well [19], and mitigate it by avoiding complexes containing neighbouring
504
+ vertices of widely varying degrees. In future work, we would like to develop a
505
+ global optimization that chooses a different scaling factor for every cyclic polygon
506
+ so as to maximize the overall quality of all filler pentagons.
507
+ Cyclic vs. regular polygons. It is tempt-
508
+ ing to construct regular polygons in place of
509
+ cyclic polygons, as these would yield perfectly
510
+ symmetric stars as motifs. Using the aforemen-
511
+ tioned regularity measurement [30], we fit a reg-
512
+ ular polygon ˆP to each cyclic polygon P gen-
513
+ erated in Sec. 4.3, and centre ˆP on the circum-
514
+ centre of P. The result for τ = 0.8 is shown in
515
+ the inset. This approach prioritizes the quality of large, prominent stars. How-
516
+ ever, it yields distorted pentagons whose motifs self-intersect. By choosing cyclic
517
+ polygons rather than regular polygons, our algorithm sacrifices some quality in
518
+ large stars for the sake of creating feasible connections between them.
519
+ Fig. 14. Generative designs constructed from Delaunay triangulations of random points
520
+ sets, without (left) and with (right) bowtie gadgets.
521
+
522
+ orders
523
+ 18
524
+ 17
525
+ 16
526
+ 15
527
+ 14
528
+ 13
529
+ 12
530
+ 11
531
+ 10
532
+ 9
533
+ 816
534
+ R. Lin and C.S. Kaplan
535
+ 8
536
+ Results
537
+ We demonstrate the versatility of our technique by presenting a range of freeform
538
+ designs. For stylized results such as the filled composition in Fig. 14 and the
539
+ interlaced design in Fig. 17, we adapt the rendering algorithms described by
540
+ Kaplan in Bonner’s text [3, Sec. 4.5].
541
+ Our method places no constraints on the input complex, giving users consid-
542
+ erable control over the output design. Fully generative designs can be created
543
+ using Delaunay triangulations of random point sets, leading to arrangements of
544
+ rosettes with various orders (Fig. 14, left). We can further increase the number
545
+ of possible charges and broaden the expressiveness of our technique by insert-
546
+ ing random gadgets (Fig. 14, right). Of course, an artist may select a subset of
547
+ rosettes in a generative design to craft a desired high-level composition (Fig. 15).
548
+ Fig. 15. A generative design in which the user has manually chosen to keep a subset
549
+ of rosettes from an initial arrangement, producing a more dynamic composition with
550
+ an irregular boundary and internal voids.
551
+ On the other hand, we can begin with a highly structured complex and obtain
552
+ a repetitive final design (Figs. 10 and 17), or use a toroidal complex to produce a
553
+ truly periodic pattern (Fig. 11d). In principle, these approaches could be used to
554
+ produce exactly or approximately periodic drawings of many historical Islamic
555
+ geometric patterns. However, we have not attempted to catalogue exactly which
556
+ ones are possible because existing construction techniques are much better suited
557
+ to the task of drawing them.
558
+ In between the extremes of full control and generative randomness, we can in-
559
+ sert carefully constructed subgraphs into a complex to create a single high-order
560
+ rosette (Fig. 16a), or create appealing local arrangements of rosettes (Fig. 16b,c).
561
+
562
+ Freeform Islamic Geometric Patterns
563
+ 17
564
+ Another way to balance order and chaos is to place random gadgets within an
565
+ otherwise ordered grid (Fig. 9).
566
+ Fig. 16. A tuned design with a high-order rosette (a), for which τ = 0.96 and α = 0.75,
567
+ and a composition (c) incorporating multiple instances of a web-like sub-complex (b).
568
+ The high-order rosette in Fig. 16a is a special case, in that it requires hand-
569
+ tuning. Recall that adjacent circles with widely varying radii can produce dis-
570
+ torted motifs (Fig. 12). To produce a satisfactory large rosette, we manually set
571
+ the τ and α values for its cyclic polygon for a better fit with the surrounding
572
+ geometry.
573
+ Close examination of many of our results reveals small geometric discrepan-
574
+ cies of the kind illustrated starkly in Fig. 12. When rosettes have arms that vary
575
+ too dramatically in width or length, they disrupt the elegance of a pattern and
576
+ the feeling of ‘inevitability’ in its construction. There are several places in our
577
+ work where we choose global constants like τ that produce acceptable results in
578
+ general without adapting to the detailed geometry of local parts of individual
579
+ designs. The large rosette in Fig. 16a gives one clear example of where local ad-
580
+ justments can improve a design. In future work, we would like to explore more
581
+ fine-grained constructions that can enhance the quality of every rosette based
582
+ on the configuration of the circle packing in its immediate neighbourhood.
583
+ 9
584
+ Conclusion
585
+ We presented a robust method for constructing freeform Islamic geometric pat-
586
+ terns comprising rosettes of unusual orders. Our technique relies on the theory
587
+ of circle packings, giving us a principled geometric scaffolding from which to
588
+ develop a polygonal patch and then motifs. The user controls the initial com-
589
+ plex and any gadgets in it, allowing for significant creative freedom in the design
590
+
591
+ (b)
592
+ (a)18
593
+ R. Lin and C.S. Kaplan
594
+ process. Our results are more organic and less repetitive than existing patterns
595
+ and suggest many ideas for further exploration. We believe they communicate
596
+ the aesthetic of Islamic geometric patterns while also interpreting them in a non-
597
+ traditional context. They still manage to convey the ‘aesthetic delight’ that Gom-
598
+ brich discussed [18], but with slightly less boredom and more confusion, paving
599
+ the way for more artistic applications of these designs. The work enhances our
600
+ understanding of traditional patterns and reveals new opportunities—freeform
601
+ or otherwise—for both ornamentation and art-making.
602
+ Fig. 17. A highly structured composition (right) generated from a finite subset of a
603
+ conceptually periodic complex (left). Although the circle packing is not constructed in
604
+ a toroidal domain as in Sec. 6, the resulting composition appears close to periodic.
605
+ Acknowledgements
606
+ This research was supported by the Natural Sciences and Engineering Research
607
+ Council of Canada and the Cheriton School of Computer Science at the Univer-
608
+ sity of Waterloo.
609
+ References
610
+ 1. R.A. Al Ajlouni, The global long-range order of quasi-periodic patterns in islamic
611
+ architecture, Acta Crystallographica Section A 68 (2012), pp. 235–243. Available
612
+ at https://doi.org/10.1107/S010876731104774X.
613
+ 2. R. Arnheim, Art and Visual Perception: A Psychology of the Creative Eye, Uni-
614
+ versity of California Press, Berkeley and Los Angeles, California, 1974.
615
+ 3. J. Bonner, Islamic Geometric Patterns: Their Historical Development and Tradi-
616
+ tional Methods of Construction, Springer-Verlag, New York, 2017.
617
+
618
+ Freeform Islamic Geometric Patterns
619
+ 19
620
+ 4. J. Bonner and M. Pelletier, A 7-Fold System for Creating Islamic Geometric Pat-
621
+ terns Part 1: Historical Antecedents, in Proceedings of Bridges 2012: Mathemat-
622
+ ics, Music, Art, Architecture, Culture, R. Bosch, D. McKenna, and R. Sarhangi,
623
+ eds., Phoenix, Arizona. Tessellations Publishing, 2012, pp. 141–148. Available at
624
+ http://archive.bridgesmathart.org/2012/bridges2012-141.html.
625
+ 5. S.G. Brewer, M. Zha, and S. Neno, Generating Families of Islamic Star Rosette
626
+ Patterns Based on k-Uniform Tilings, in Proceedings of Bridges 2022: Mathematics,
627
+ Art, Music, Architecture, Culture, D. Reimann, D. Norton, and E. Torrence, eds.,
628
+ Phoenix, Arizona. Tessellations Publishing, 2022, pp. 391–394. Available at http:
629
+ //archive.bridgesmathart.org/2021/bridges2022-391.html.
630
+ 6. E. Broug, Islamic Geometric Design, Thames & Hudson, London, 2013.
631
+ 7. J.M. Castera, Arabesques: Decorative Art in Morocco, ACR Edition, Paris, 1999.
632
+ 8. J.M. Castera, From the Angle of Quasicrystals, in Proceedings of Bridges 2010:
633
+ Mathematics, Music, Art, Architecture, Culture, G.W. Hart and R. Sarhangi, eds.,
634
+ Phoenix, Arizona. Tessellations Publishing, 2010, pp. 215–222. Available at http:
635
+ //archive.bridgesmathart.org/2010/bridges2010-215.html.
636
+ 9. J.M. Castera, Another look at Pentagonal Persian Patterns, in Proceedings of
637
+ Bridges 2016: Mathematics, Music, Art, Architecture, Education, Culture, E. Tor-
638
+ rence, B. Torrence, C. S´equin, D. McKenna, K. Fenyvesi, and R. Sarhangi,
639
+ eds., Phoenix, Arizona. Tessellations Publishing, 2016, pp. 325–330. Available at
640
+ http://archive.bridgesmathart.org/2016/bridges2016-325.html.
641
+ 10. C.R. Collins and K. Stephenson, A circle packing algorithm, Computational Ge-
642
+ ometry 25 (2003), pp. 233–256.
643
+ 11. P. Cromwell, Looking at islamic patterns i: The perception of order, PsyArXiv
644
+ (2021). Available at psyarxiv.com/qhg3f.
645
+ 12. P.R. Cromwell, Islamic geometric designs from the topkapı scroll i: unusual ar-
646
+ rangements of stars, Journal of Mathematics and the Arts 4 (2010), pp. 73–85.
647
+ Available at https://doi.org/10.1080/17513470903311669.
648
+ 13. P.R.
649
+ Cromwell,
650
+ On
651
+ irregular
652
+ stars
653
+ in
654
+ islamic
655
+ geometric
656
+ patterns,
657
+ 2013.
658
+ Available at https://girih.wordpress.com/on-irregular-stars-in-islamic-
659
+ geometric-patterns/, accessed 12 August 2021.
660
+ 14. P.R. Cromwell, Cognitive bias and claims of quasiperiodicity in traditional islamic
661
+ patterns, Math Intelligencer 37 (2015), pp. 30–44.
662
+ 15. D. Eppstein, CirclePack.py
663
+ (2012). Available at https://www.ics.uci.edu/
664
+ ~eppstein/PADS/CirclePack.py.
665
+ 16. P. Gailiunas, Near-miss Star Patterns, in Proceedings of Bridges 2020: Mathemat-
666
+ ics, Art, Music, Architecture, Education, Culture, C. Yackel, R. Bosch, E. Torrence,
667
+ and K. Fenyvesi, eds., Phoenix, Arizona. Tessellations Publishing, 2020, pp. 27–34.
668
+ Available at http://archive.bridgesmathart.org/2020/bridges2020-27.html.
669
+ 17. L. Gieseke, P. Asente, R. Mˇech, and M. Benes Bedrich an d Fuchs, A survey of
670
+ control mechanisms for creative pattern generation, Computer Graphics Forum 40
671
+ (2021), pp. 585–609. Available at https://onlinelibrary.wiley.com/doi/abs/
672
+ 10.1111/cgf.142658.
673
+ 18. E. Gombrich, The Sense of Order: A study in the psychology of decorative art,
674
+ second revised ed., Phaidon Press, Oxford, 1994.
675
+ 19. N. Hamekasi and F. Samavati, Designing Persian Floral Patterns using Circle
676
+ Packing, in Proceedings of the International Conference on Computer Graphics
677
+ Theory and Applications and International Conference on Information Visual-
678
+ ization Theory and Applications (GRAPP/IVAPP), P. Richard, M. Kraus, R.S.
679
+ Laramee, and J. Braz, eds., Feb 24-26. SciTePress, 2012, pp. 135–142. Available at
680
+ http://dx.doi.org/10.5220/0003850101350142.
681
+
682
+ 20
683
+ R. Lin and C.S. Kaplan
684
+ 20. E.H. Hankin, The Drawing of Geometric Patterns in Saracenic Art, Memoirs of
685
+ the Archaeological Society of India Vol. 15, Government of India, Calcutta, 1925.
686
+ 21. F.
687
+ Hutcheson,
688
+ An
689
+ inquiry
690
+ into
691
+ the
692
+ original
693
+ of
694
+ our
695
+ ideas
696
+ of
697
+ beauty
698
+ and
699
+ virtue, Printed by J. Darby in Bartholomew Close, London, 1725, Avail-
700
+ able at https://oll.libertfund.org/title/leidhold-an-inquiry-into-the-
701
+ original-of-our-ideas-of-beauty-and-virtue-1726-2004#lf1458_head_013.
702
+ 22. C.S. Kaplan, Semiregular patterns on surfaces, in NPAR ’09: Proceedings of the
703
+ 7th international symposium on Non-photorealistic animation and rendering, New
704
+ York. ACM Press, 2009, pp. 35–39.
705
+ 23. C.S. Kaplan, Generative Zellij, in Proceedings of Bridges 2022: Mathematics,
706
+ Art, Music, Architecture, Culture, D. Reimann, D. Norton, and E. Torrence,
707
+ eds., Phoenix, Arizona. Tessellations Publishing, 2022, pp. 285–288. Available at
708
+ http://archive.bridgesmathart.org/2022/bridges2022-285.html.
709
+ 24. C.S. Kaplan and D.H. Salesin, Islamic star patterns in absolute geometry, ACM
710
+ Transactions on Graphics 23 (2004), pp. 97–119.
711
+ 25. A. Lee, Islamic star patterns, Muqarnas 4 (1987), pp. 182–197.
712
+ 26. P.J. Lu and P.J. Steinhardt, Decagonal and quasi-crystalline tilings in medieval
713
+ islamic architecture, Science 315 (2007), pp. 1106–1110.
714
+ 27. M. Pelletier and J. Bonner, A 7-Fold System for Creating Islamic Geometric Pat-
715
+ terns Part 2: Contemporary Expression, in Proceedings of Bridges 2012: Mathemat-
716
+ ics, Music, Art, Architecture, Culture, R. Bosch, D. McKenna, and R. Sarhangi,
717
+ eds., Phoenix, Arizona. Tessellations Publishing, 2012, pp. 149–156. Available at
718
+ http://archive.bridgesmathart.org/2012/bridges2012-149.html.
719
+ 28. K. Stephenson, Introduction to circle packing: The theory of discrete analytic func-
720
+ tions, Cambridge University Press, New York, 2005.
721
+ 29. B. Wichmann and J. Rigby, A penrose-type islamic interlacing pattern, Vi-
722
+ sual Mathematics 9 (2007). Avaiable at http://symmetry-us.com/Journals/
723
+ wichmann2007/penrose.html, accessed 12 August 2021.
724
+ 30. H. Zabrodsky, S. Peleg, and D. Avnir, Continuous symmetry measures, Journal of
725
+ the American Chemical Society 114 (1992), pp. 7843–7851.
726
+
8dAzT4oBgHgl3EQfgfzo/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
8tFRT4oBgHgl3EQfpzcC/content/tmp_files/2301.13614v1.pdf.txt ADDED
@@ -0,0 +1,1117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Synchronized states in dissipatively coupled harmonic oscillator networks
2
+ Juan N. Moreno,1, ∗ Christopher W. W¨achtler,1, 2, † and Alexander Eisfeld1, 3, ‡
3
+ 1Max Planck Institut f¨ur Physik komplexer Systeme, N¨othnitzer Str. 38, 01187 Dresden, Germany
4
+ 2Department of Physics, University of California, Berkeley, California 94720, USA
5
+ 3Universit¨at Potsdam, Institut f¨ur Physik und Astronomie,
6
+ Karl-Liebknecht-Str. 24-25, 14476 Potsdam, Deutschland
7
+ The question under which conditions oscillators with slightly different frequencies synchronize
8
+ appears in various settings.
9
+ We show that synchronization can be achieved even for harmonic
10
+ oscillators that are bilinearly coupled via a purely dissipative interaction. By appropriately tuned
11
+ gain/loss stable dynamics may be achieved where for the cases studied in this work all oscillators
12
+ are synchronized. These findings are interpreted using the complex eigenvalues and eigenvectors of
13
+ the non-Hermitian matrix describing the dynamics of the system.
14
+ I.
15
+ INTRODUCTION
16
+ Synchronization is a fascinating phenomenon, which
17
+ can be interpreted as a display of cooperative behavior
18
+ appearing in many complex systems [1, 2]. Since the first
19
+ observation by Huygens in the late 1600s [3], it has been
20
+ studied in diverse communities, where it plays an im-
21
+ portant role in our understanding for example in electric
22
+ networks in engineering, circadian rhythms in biology,
23
+ pattern formation in statistical mechanics, and chemical
24
+ reactions in chemistry [4–6]. By now, it is seen as a uni-
25
+ versal phenomenon that is important both in fundamen-
26
+ tal studies and in technical applications, ranging from
27
+ laser networks [7], to phase-locked loops [8], Josephson
28
+ junction arrays [9, 10], spin-torque resonators [11], and
29
+ power grids [12].
30
+ Even today, the originally observed
31
+ phenomenon of clock synchronization remains a crucial
32
+ application for modern communication networks [13, 14].
33
+ Typically synchronization is viewed in terms of the ad-
34
+ justment of rhythms of autonomous oscillators, which at-
35
+ tain stable periodic orbits without active regulation from
36
+ the outside [15] and thus require nonlinearities in the
37
+ governing equations of motion. Far less common is the
38
+ investigation of synchronization in models that are lin-
39
+ ear in both the oscillators and the couplings. Without
40
+ dissipation, coupled harmonic oscillators form collective
41
+ eigenmodes, where the individual oscillators perform mo-
42
+ tion with a fixed phase relation. However, a system not
43
+ initialized in an eigenmode usually stays in a superposi-
44
+ tion of several eigenmodes with different eigenfrequencies
45
+ resulting in a beating pattern. Moreover, if the number
46
+ of coupled oscillators is large, the system dynamics does
47
+ not need to exhibit perfect revivals in general and syn-
48
+ chronized motion is absent.
49
+ Hence in a closed system
50
+ of oscillators, only for an eigenmode as initial condition
51
+ one obtains a time-independent phase relation between
52
+ the oscillators. However if the system is not closed, but
53
+ subject to gain and loss, the open system dynamics allow
54
55
56
57
+ for a situation where all eigenmodes but one are damped.
58
+ Then, synchronization is possible as long as the respec-
59
+ tive eigenstate is present in the initial state. However, in
60
+ order to achieve a situation where all but one mode are
61
+ damped, one needs to carefully balance gain and loss.
62
+ In contrast to a self-sustained system where the non-
63
+ linearity counteracts the dissipation (or gain) in order to
64
+ stabilize periodic orbits, a single linear harmonic oscilla-
65
+ tor only exhibits the following dynamics in the absence
66
+ of periodic driving: Either the dissipation exceeds the
67
+ gain, such that the amplitude of the dissipative systems
68
+ shrinks and eventually reaches a single point in phase
69
+ space, or in other way around, where the gain exceeds
70
+ the dissipation, the oscillation amplitude infinitely grows.
71
+ In the special case where both are equivalent the system
72
+ is effectively described by closed system dynamics with
73
+ infinitely many closed orbits in phase space depending
74
+ on the initial energy of system. However, when coupling
75
+ between linear oscillators are introduced, many more so-
76
+ lutions are possible.
77
+ Here, we investigate a network of linear harmonic os-
78
+ cillators subject to gain and loss. Generally, one would
79
+ consider each oscillator to couple to its own environment
80
+ and direct coupling between two or more entities in the
81
+ network.
82
+ However, a purely dissipative coupling leads
83
+ to intriguing phenomena also for self-sustained oscilla-
84
+ tors like for example oscillator death [1]. In our model
85
+ of linear oscillators, it allows for the emergence of dissi-
86
+ pation free subspaces in parameter space. Within these
87
+ subspaces we find periodic motion of all oscillators in the
88
+ network, that is starting from an (nearly) arbitrary initial
89
+ state the system reaches a regime during time propaga-
90
+ tion in which all oscillators exhibit synchronized motion
91
+ for a long time. At this point, let us specify the notion
92
+ of synchronization we use throughout this work:
93
+ — With ’long time’ we mean times long compared to the
94
+ eigenfrequencies of the individual oscillators and we fo-
95
+ cus on the case where all oscillators have small deviations
96
+ from a common ’mean frequency’. In the ideal case they
97
+ oscillate forever.
98
+ — With ’synchronoized’ we mean that the oscillators
99
+ have a fixed phase relation.
100
+ Ideally we want that all
101
+ oscillators have the same amplitude. If this is the case,
102
+ arXiv:2301.13614v1 [nlin.CD] 30 Jan 2023
103
+
104
+ 2
105
+ then we denote it as full synchronization. If the system
106
+ is not in a fully synchronized state, we will characterize
107
+ its degree of synchronization by a suitable measure.
108
+ — With ’arbitrary’ initial state we mean that for most
109
+ initial states synchronization is achieved, yet there exist
110
+ some special initial conditions that do not lead to syn-
111
+ chronization.
112
+ We
113
+ note
114
+ that
115
+ within
116
+ the
117
+ above
118
+ definitions
119
+ for
120
+ uncoupled oscillators one only finds synchronization,
121
+ when there is no gain and loss and all oscillators have
122
+ the same frequency.
123
+ The remainder of the paper is organized as follows: In
124
+ Sec. II A we summarize some general considerations of
125
+ synchronization for linearly coupled harmonic oscillators
126
+ important for our work, followed by the specific model
127
+ under investigation in Sec. II B. In the subsequent Sec. III
128
+ we discuss our results, which includes the special case of
129
+ two coupled oscillators in Sec. III A and the more general
130
+ case of many oscillators in Sec. III B. Finally, we conclude
131
+ in Sec. IV.
132
+ II.
133
+ MODEL AND BASIC FORMALISM
134
+ A.
135
+ General considerations of synchronization in
136
+ linear oscillator models
137
+ To introduce the basic concepts and notation, we con-
138
+ sider N harmonic oscillators in a network, each labeled
139
+ by a subscript n = 1,...,N. The motional state of each
140
+ oscillator is characterized by a time dependent complex
141
+ amplitude an(t) = ∣an(t)∣eiφn(t). If all oscillators in the
142
+ network oscillate with a common real frequency ωsyn
143
+ while their relative amplitudes remain constant, we will
144
+ refer to it as synchronization. Using a vector notation
145
+ ⃗a(t) = [a1(t),...,aN(t)]⊺, such synchronized motion may
146
+ be expressed as
147
+ ⃗a(t) = f(t)⃗asyne−iωsynt,
148
+ (1)
149
+ where f(t) is a real function that takes into account the
150
+ possibility that the amplitudes decay (or grow) over time,
151
+ which we will discuss in Sec. II B in more detail. In the
152
+ case of f(t) = 1 the motion represents a periodic steady
153
+ state, which we refer to as ideal synchronized motion.
154
+ The above notion is not sufficient to fully characterize
155
+ synchronized motion as for example a single oscillatory
156
+ site in the network (while all other oscillators are at rest)
157
+ also fulfills Eq. (1). It is thus necessary to also quan-
158
+ tify the degree of synchronization of a vector ⃗a, which we
159
+ denote by S(⃗a). To this end, we use the inverse partici-
160
+ pation ratio [16]
161
+ S(⃗a) =
162
+ 1
163
+ ∑N
164
+ n=1 ∣an∣4 ,
165
+ (2)
166
+ which takes values between 1 and N. Here, a value of
167
+ S = 1 corresponds to the aforementioned case of a single
168
+ oscillator in motion, whereas a value of S = N indicates
169
+ FIG. 1.
170
+ Illustration of potentially attainable synchronized
171
+ motion in a network of N = 3 oscillators. The inverse partici-
172
+ pation ratio S(⃗a) increases from top to bottom in accordance
173
+ with the transition from partially to fully synchronized mo-
174
+ tion.
175
+ fully synchronized motion, i.e. all nodes have the same
176
+ amplitude (without phase). Values of S = ˜N < N cor-
177
+ respond to partial synchronization of approximately ˜N
178
+ oscillators. In Fig. 1, we illustrate different degrees of
179
+ synchronization and their respective dynamics in a net-
180
+ work of three oscillators.
181
+ The time evolution of a linearly coupled network of
182
+ harmonic oscillators in the presence of gain and loss is
183
+ generally expressed as
184
+ d
185
+ dt⃗a = −iW ⃗a,
186
+ (3)
187
+ where we assume the non-Hermitian matrix W to be
188
+ time-independent. Then, the state of the system at time
189
+ t is simply given by
190
+ ⃗a(t) = e−iW t⃗a(0),
191
+ (4)
192
+ where ⃗a(0) denotes the initial state at time t = 0. Thus,
193
+ the dynamics of the network is fully characterized by the
194
+ matrix W, in particular by its eigenvalues and eigenvec-
195
+ tors. Since W is (in general) non-Hermitian, there exist
196
+ right and left eigenvectors defined via
197
+ W ⃗cj =wj⃗cj
198
+ and
199
+ ⃗z†
200
+ jW = ⃗z†
201
+ jwj.
202
+ (5)
203
+
204
+ 0003
205
+ Here, † indicates the complex conjugated and transpose,
206
+ and the eigenvectors are normalized according to
207
+ ⃗c†
208
+ j⃗cj = 1
209
+ and
210
+ ⃗z†
211
+ j′⃗cj = δj′j.
212
+ (6)
213
+ Note, that in general ⃗c†
214
+ j ≠ ⃗z†
215
+ j. The matrix W can now be
216
+ expressed as W = ∑j wj⃗cj ⃗z†
217
+ j, such that the time evolution
218
+ of Eq. (4) is conveniently given by
219
+ ⃗a(t) = ∑
220
+ j
221
+ ⃗cje−iwjt ⃗z†
222
+ j⃗a(0),
223
+ (7)
224
+ where ⃗z†
225
+ j⃗a(0) is the initial weight of the eigenstate j.
226
+ While the real part of the complex eigenvalue wj deter-
227
+ mines the oscillation frequency of eigenmode j, the imag-
228
+ inary part Im[wj] determines whether the oscillatory mo-
229
+ tion is damped (Im[wj] < 0), growing (Im[wj] > 0) or
230
+ oscillates forever (Im[wj] = 0).
231
+ In order to obtain a time evolution of the form of
232
+ Eq. (1) with f(t) = 1 after some initial transient time, i.e.
233
+ dynamically reach the eigenstate with Im[wsync] = 0, the
234
+ initial state needs to have non-vanishing overlap with the
235
+ synchronized eigenstate [⃗z†
236
+ synca(0) ≠ 0]. Furthermore, all
237
+ other eigenstates present in the initial state need to have
238
+ Im[wj] < 0, such that they are damped. In the following,
239
+ we will therefore search for conditions and parameters
240
+ under which one eigenstate fulfills Im[wsync] = 0 while
241
+ all other eigenstates fulfill Im[wj] < 0. Subsequently, we
242
+ will characterize the degree of synchronization of the re-
243
+ sulting state in terms of S; cf. Eq. (2).
244
+ B.
245
+ Linear oscillators with purely dissipative
246
+ coupling
247
+ After
248
+ the general considerations of the previous
249
+ Sec. II A, let us now specify the network of interest
250
+ throughout the remainder of this work: The individual
251
+ oscillators have frequencies Ωn ∈ R and are arranged on
252
+ a ring. Each oscillator is subject to gain/loss mediated
253
+ via the rate γ ∈ R and interacts with its two nearest
254
+ neighbors via a purely dissipative coupling v ∈ R. For
255
+ simplicity we assume that the coupling and dissipation
256
+ is equal for all oscillators; we are interested in the pos-
257
+ sibility of synchronization when the frequency of each
258
+ oscillator is different, which corresponds to the notion of
259
+ synchronization as an adjustment of rhythms due to the
260
+ presence of interactions. The equation of motion of the
261
+ n-th oscillator is then given by
262
+ d
263
+ dtan =(−iΩn − γ)an − v(an+1 + an−1),
264
+ (8)
265
+ with a0 ≡ aN and aN+1 ≡ a1 to fulfill periodic boundary
266
+ conditions. Note that positive values of γ represent loss
267
+ whereas negative values correspond to gain. To simplify
268
+ notation we express all energies in units of v and take v
269
+ to be positive (the case of negative v will be discussed
270
+ later), i.e. ωn = Ωn/v, g = γ/v and τ = tv. Furthermore,
271
+ we parameterize the frequencies as ωn = ¯ω + ∆n. Then,
272
+ Eq. (8) becomes
273
+ d
274
+ dτ an =[−i(¯ω + ∆n) − g]an − (an+1 + an−1).
275
+ (9)
276
+ Our goal in the following is to determine the values of
277
+ g for a given set of frequency differences ∆n, such that
278
+ the oscillators perform synchronized motion in the sense
279
+ discussed in Sec. II A.
280
+ As the term (−i¯ω − g) is independent of the oscilla-
281
+ tor index n, it only trivially contributes to the overall
282
+ dynamics; specifically oscillations with frequency ¯ω and
283
+ damping/growing with rate g. In matrix representation,
284
+ Eq. (9) can be written in the form of Eq. (3) with t → τ
285
+ and W = (¯ω − ig)I + M, where
286
+ M =
287
+
288
+ ⎜⎜⎜⎜⎜
289
+
290
+ ∆1
291
+ −i
292
+ 0
293
+ ...
294
+ −i
295
+ −i
296
+ ∆2
297
+ −i
298
+ ...
299
+ 0
300
+ 0
301
+ −i
302
+
303
+ −i
304
+ 0
305
+ ...
306
+ −i ∆N
307
+
308
+ ⎟⎟⎟⎟⎟
309
+
310
+ (10)
311
+ Note, that the (left and right) eigensvectors of W and
312
+ M are identical and their eigenvalues are simply shifted,
313
+ i.e., if M⃗cj = λj⃗cj then W ⃗cj = wj⃗cj with
314
+ wj = ¯ω + Re[λj] + i(−g + Im[λj]),
315
+ v > 0.
316
+ (11)
317
+ Moreover, as M only depends on ∆n, the eigenvectors
318
+ and thus the degree of synchronization S(⃗c) is indepen-
319
+ dent of g.
320
+ Let us summarize the general conditions of the pre-
321
+ vious Sec. II A for synchronized motion tailored to the
322
+ specifics of our system discussed here:
323
+ (i) There exists a single eigenstate ⃗csync of W with
324
+ purely real eigenvalue. This corresponds to a state
325
+ ⃗csync that fulfills −g+Im[λsync] = 0, where M⃗csync =
326
+ λsync⃗csync.
327
+ (ii) All other eigenstates of W have negative imaginary
328
+ part for the set of parameters determined in (i).
329
+ That corresponds to −g+Im[λj] < 0 for all j ≠ sync.
330
+ (iii) The synchronization measure S(⃗csync) should be as
331
+ large as possible. Ideally S(⃗csync) = N.
332
+ So far, we have taken v to be positive. For negative
333
+ values of v we define the scaled energies in terms of −v
334
+ such that ωn = ¯ω + ∆n = −Ωn/v, g = −γn/v, and τ = −tv.
335
+ Then, Eq. (9) becomes
336
+ d
337
+ dτ an = [−i(¯ω + ∆n) − g]an + (an+1 + an−1),
338
+ (12)
339
+ where the first term remains identical while the sign
340
+ changes in front of the oscillator couplings. As a result,
341
+ the eigenvalues of W [cf. Eqs. (10) and (11)] are given
342
+ by
343
+ wj = ¯ω + Re[λj] + i(−g − Im[λj]),
344
+ v < 0.
345
+ (13)
346
+
347
+ 4
348
+ Here, the real part of the eigenvalues (as well as the cor-
349
+ responding eigenstates and thus the measure S) remains
350
+ unchanged, while the imaginary part simply changes its
351
+ sign. Thus, eigenstates that are decaying for v > 0, are
352
+ growing for v < 0 and vice versa.
353
+ III.
354
+ RESULTS
355
+ In the following we first discuss the case of N = 2 in
356
+ Sec. III A, which provides a clear picture of the basic
357
+ mechanism underlying synchronization of linear oscilla-
358
+ tors interacting via dissipative couplings. Subsequently
359
+ in Sec. III B, we consider a ring of N > 2 oscillators and
360
+ show that also in this case synchronized motion may be
361
+ achieved and follows similar arguments as before.
362
+ A.
363
+ Two coupled oscillators (N = 2)
364
+ Without loss of generality, we may choose the scaled
365
+ frequency differences of the two oscillators to be ∆1 =
366
+ +∆ and ∆2 = −∆, such that matrix M governing the
367
+ dynamics [cf. Eq. (10)] is given by
368
+ M = (∆
369
+ −i
370
+ −i −∆)
371
+ (14)
372
+ Here, we have chosen v > 0. However, from the discussion
373
+ in Sec. II B we know that a negative value of v simply
374
+ results in a change of sign of the imaginary part of the
375
+ eigenvalues. The two eigenvalues and corresponding right
376
+ eigenvectors of M are given by
377
+ λ± = ±
378
+
379
+ ∆2 − 1
380
+ (15)
381
+ ⃗c± =
382
+ 1
383
+
384
+ 1 + ∣∆ ±
385
+
386
+ ∆2 − 1∣2
387
+ (i(∆ ±
388
+
389
+ ∆2 − 1)
390
+ 1
391
+ )
392
+ (16)
393
+ If ∣∆∣ < 1 ( ∣∆∣ > 1) the eigenvalues λ± are both purely
394
+ imaginary (real) and non-degenerate.
395
+ In contrast, for
396
+ ∆ = ±1 not only are the eigenstates degenerate but also
397
+ the corresponding eigenvectors coalesce, i.e., these values
398
+ of ∆ correspond to exceptional points. The impact of
399
+ exceptional points on synchronization goes beyond the
400
+ scope of the present work and we will focus in the follow-
401
+ ing on the cases ∣∆∣ > 1 and ∣∆∣ < 1.
402
+ a.
403
+ Overview:
404
+ As discussed in Sec. II B, the eigenen-
405
+ ergies w± = ¯ω+Re[λ±]+i(−g+Im[λ±]) describe the overall
406
+ possibility of long lasting synchronized motion in terms
407
+ of oscillation frequency and damping, while S quantifies
408
+ the degree of synchronization. Let us start by consid-
409
+ ering the imaginary part of the eigenenergies w± given
410
+ by Im[w±] = −g + Im[λ±], which determines the (expo-
411
+ nential) damping or growing. In Figs. 2 (a) and (b) we
412
+ show Im(w−) and Im(w+), respectively, as a function of
413
+ the frequency difference ∆ and the dissipation strength
414
+ g. Note, that ∆ as well as g can take on positive and
415
+ FIG. 2. Top row: Density plots of the imaginary part Im(w±)
416
+ as a function of the frequency difference ∆ and the dissipation
417
+ strength g: (a) w− and (b) w+. Dissipation-free synchroniza-
418
+ tion is found along the white line. Middle row: Correspond-
419
+ ing real part (c) Re(w−) and (d) Re(w+) as a function of ∆,
420
+ which corresponds to the oscillation frequency of the respec-
421
+ tive eigenvector. Last row: Degree of synchronization S as
422
+ function of ∆ of the eigenvalue (e) ⃗c− and (f) ⃗c+. The largest
423
+ value is found for ∣∆∣ < 1 corresponding to fully synchronized
424
+ motion.
425
+ negative values. The red areas in Fig. 2(a) and (b) indi-
426
+ cate positive values corresponding to amplitude growth
427
+ whereas the blue areas indicate negative values and thus
428
+ amplitude damping. The two regions are separated by
429
+ a white region, where amplitudes neither increase nor
430
+ decrease. We discuss this most relevant region for dissi-
431
+ pation free synchronization in more detail below.
432
+ As expected from the discussion above, quite different
433
+ behavior of Im[w±] is observed depending on whether
434
+ ∣∆∣ > 1 or ∣∆∣ < 1.
435
+ Similarly, a pronounced difference
436
+ is found in the behavior of the real part Re[w±] = ¯ω +
437
+ Re[λ±], which describes the oscillation frequency of the
438
+ eigenmodes and is shown in Fig. 2(c) and (d). For ∣∆∣ < 1
439
+ the frequency remains unchanged and both eigenstates
440
+ oscillate with the mean frequency ¯ω. However, for ∣∆∣ > 1
441
+ the frequency of the − state [cf. Fig. 2(c)] is decreasing,
442
+ while that of the + state [cf. Fig. 2(d)] is increasing. Both
443
+ follow the functional form of a square-root with opposite
444
+ sign, cf. Eq. (15). Lastly, in Fig. 2(e) and (f) we show
445
+ the degree of synchronization S as function of ∆, which
446
+
447
+ -state
448
+ +state
449
+ 2
450
+ 2.0
451
+ (a)
452
+ (b)
453
+ 1
454
+ 1.0
455
+ Im(W=)
456
+ 0
457
+ 0.0
458
+ -1
459
+ -1.0
460
+ -2
461
+ -2.0
462
+ 2
463
+ (c)
464
+ (d)
465
+ 0
466
+ -2
467
+ +
468
+ t
469
+ 2.5
470
+ (e)
471
+ (f)
472
+ 2.0
473
+ 1.5
474
+ 1.0
475
+ -2
476
+ -1
477
+ 0
478
+ 1
479
+ 2
480
+ -2
481
+ -1
482
+ 0
483
+ 1
484
+ 2
485
+ V
486
+ V5
487
+ is given by [cf. Eq. (16)]
488
+ S(⃗c±,∆) = { 2
489
+ , ∣∆∣ < 1
490
+ 2
491
+ ∆2
492
+ 2∆2−1
493
+ , ∣∆∣ > 1 .
494
+ (17)
495
+ As expected, the maximum value lies within the range
496
+ of ∣∆∣ < 1 and rapidly decreases as ∣∆∣ increases, indi-
497
+ cating the absence of synchronization. After this broad
498
+ overview we will in the following discuss in more detail
499
+ the potential of synchronized motion in the system of
500
+ N = 2 oscillators, focusing on the three criteria (i)–(iii)
501
+ formulated in Sec. II B.
502
+ b.
503
+ Detailed discussion of the regime ∣∆∣ > 1:
504
+ In this
505
+ case, the eigenvalues λ± become purely real [cf. Eq. (15)],
506
+ such that the eigenenergies take the simple form w± =
507
+ (¯ω±
508
+
509
+ ∆2 − 1)−ig. Most importantly, the imaginary part
510
+ is solely given by −g for both states and is independent
511
+ of ∆, which can also be seen in Figs. 2(a) and (b). Thus,
512
+ both eigenstates show the same dynamical response to
513
+ dissipation, i.e., either both are dissipation free (g = 0) or
514
+ the amplitudes decay/increase with the same rate given
515
+ by −g. Although there exists a dissipation free subspace
516
+ for g = 0, and thus requirement (i) is fulfilled, require-
517
+ ment (ii) cannot be fulfilled simultaneously. The reasons
518
+ is that both states have different oscillation frequencies
519
+ ¯ω ±
520
+
521
+ ∆2 − 1 and none of them is decaying, resulting in a
522
+ beating pattern. We show an example of such a time evo-
523
+ lution of the real amplitudes Re(an) governed by Eq. (9)
524
+ in Fig. 3(a) for ∆ = 1.1 and g = 0.
525
+ c.
526
+ Detailed discussion of the regime ∣∆∣ < 1:
527
+ Af-
528
+ ter we have ruled out the possibility of synchroniza-
529
+ tion [according to our conditions (i)–(iii)] in the previ-
530
+ ous regime, we now discuss the case of ∣∆∣ < 1, where
531
+ dissipation free synchronized motion is indeed possible.
532
+ For ∣∆∣ < 1 the eigenvalues λ± are purely imaginary [cf.
533
+ Eq. (15)] and dissipation free states are determined by
534
+ 0 = −g ±
535
+
536
+ ∣1 − ∆2∣, such that condition (i) may be ful-
537
+ filled. In contrast to the previous case, we need to differ-
538
+ entiate between the two states: Dissipation vanishes for
539
+ the + state if g = g+ ≡
540
+
541
+ ∣1 − ∆2∣, and for the − state if
542
+ g = g− ≡ −
543
+
544
+ ∣1 − ∆2∣. Each of these solutions describes a
545
+ half circle with radius one, cf. Figs. 2(a) and (b).
546
+ We now examine whether condition (ii) is also ful-
547
+ filled in this regime.
548
+ When the − state is dissipation
549
+ free, the amplitude of the + state is growing exponen-
550
+ tially as Im[w+(g−)] = −g− +
551
+
552
+ 1 − ∆2 = 2
553
+
554
+ 1 − ∆2 > 0.
555
+ This is also verified by Fig. 2: Along the white region
556
+ in panel (a) within the regime ∣∆∣ < 1, the area in panel
557
+ (b) is red. In contrast, along the white region in panel
558
+ (b), the area in panel (a) is blue, i.e. while the + state
559
+ is dissipation free, the − state is damped. Specifically,
560
+ Im[w−(g+)] = −g+ −
561
+
562
+ 1 − ∆2 = −2
563
+
564
+ 1 − ∆2 < 0.
565
+ Thus,
566
+ synchronized motion for ∣∆∣ < 1 is found whenever the
567
+ condition g =
568
+
569
+ 1 − ∆2 is fulfilled. Moreover, this state
570
+ has a degree of synchronization of S = 2 and is therefore
571
+ fully synchronized for all ∣∆∣ < 1.
572
+ In Fig. 3(b) we show the dynamics for the parameters
573
+ ∆ = 0.6 and g = 0.8 when starting in the initial state
574
+ FIG. 3. Examples of different dissipation free dynamics found
575
+ for the case of N = 2 oscillators. We plot the real amplitude
576
+ Re(an(τ)) of the first oscillator in red (n = 1) and the second
577
+ one in blue (n = 2).
578
+ (a) For ∆ = 1.1 and g = 0, the pres-
579
+ ence of two oscillation frequencies within the dissipation free
580
+ subspace leads to beating. (b) For ∆ = 0.6 and g = 0.8, only
581
+ a single eigenstate with its respective oscillation frequency is
582
+ dissipation free, while the other is damped leading to a pe-
583
+ riodic steady state of both oscillators, i.e., synchronization.
584
+ Parameters:
585
+ ¯ω = 10, ⃗a(0) = (1, 0)⊺.
586
+ These results are ob-
587
+ tained by direct integration of the differential equation.
588
+ It
589
+ agrees perfectly with the results obtained via diagonalization.
590
+ ⃗a(0) = (1,0)⊺.
591
+ As discussed previously, we expect to
592
+ find synchronized motion for these parameters. Indeed,
593
+ after a short transient time of τ ≳ 2 a stationary oscil-
594
+ latory motion emerges where both oscillators have the
595
+ same amplitude. Note the phase shift between the two
596
+ oscillators, which may be understood as follows: Consid-
597
+ ering the + state ⃗c+ [cf. Eq. (16)], the long time dynamics
598
+ is given by ⃗async(t) = ⃗c+ exp[−iω+t]; cf. Eq. (7). Then,
599
+ Re[⃗async(t)] =N (cos(ω+t + φ)
600
+ cos(ω+t) ),
601
+ (18)
602
+ where the phase difference φ fulfills tan(φ) = −
603
+
604
+ 1 − ∆2/∆
605
+ and N = (1 + ∣∆ +
606
+
607
+ ∆2 − 1∣2)−1/2 is the normalization
608
+ constant from Eq. (16).
609
+ B.
610
+ Many coupled oscillators on a ring
611
+ In this section, we generalize our results from the pre-
612
+ vious Sec. III A for the case of two coupled oscillators to
613
+ large numbers of oscillators arranged on a ring. Also for
614
+ the case of N oscillators, the dynamics is governed by
615
+ Eqs. (9)–(11). In the following we will first discuss the
616
+ case of equal frequencies of all oscillators. Afterwards, we
617
+ discuss the more relevant case of frequency differences.
618
+
619
+ 5.0
620
+ (a)
621
+ Re(ai)
622
+ Re(a2)
623
+ 2.5
624
+ Re(ai)
625
+ 0.0
626
+ -2.5
627
+ -5.0
628
+ 2
629
+ (b)
630
+ Re(ai)
631
+ Re(a2)
632
+ 1
633
+ Re(ai)
634
+ 188888888888888
635
+ 0
636
+ -1
637
+ -2
638
+ 0
639
+ 2
640
+ 4
641
+ 6
642
+ 8
643
+ 10
644
+ 26
645
+ 1.
646
+ Identical frequencies of all oscillators
647
+ To gain a basic understanding of the eigenstates and
648
+ eigenvector structure we now consider the case when all
649
+ frequencies are identical, i.e. ∆n = ∆. Then, the eigen-
650
+ values and (right) eigenvectors of W are given by
651
+ wj = (¯ω + ∆) − i(g ± 2cos(2πj
652
+ N )),
653
+ v ≷ 0,
654
+ (19)
655
+ ⃗cj =
656
+ 1
657
+
658
+ N
659
+ N
660
+
661
+ n=1
662
+ ei 2π
663
+ N jn⃗en,
664
+ (20)
665
+ where ⃗en is the nth unit-vector. As all eigenstates are
666
+ independent of ∆ or g. One sees that most eigenstates
667
+ are degenerate.
668
+ For even N only the eigenstates with
669
+ j = N and j = N/2 are not degenerate; for odd N only
670
+ the state with j = N is not degenerate. Moreover, the real
671
+ part of the eigenenergies wj, i.e. the oscillation frequen-
672
+ cies, is simply shifted by ∆ for all eigenstates. However,
673
+ the imaginary part of wj, which dictates the dissipation
674
+ and more importantly the possibility of dissipation free
675
+ dynamics, requires a more careful analysis.
676
+ a.
677
+ Positive v:
678
+ The imaginary part of the jth eigen-
679
+ value Im[wj] = 0 if g = gj ≡ −2cos(2πj/N). Then, all
680
+ other eigenvalues wj′ with j′ ≠ j have imaginary part
681
+ given by
682
+ Im[wj′(gj)] = 2cos(2πj
683
+ N ) − 2cos(2πj′
684
+ N ).
685
+ (21)
686
+ Furthermore, we need to distinguish the two cases of
687
+ odd and even N:
688
+ For an odd number of oscillators
689
+ and j ≠ (N ± 1)/2 there is always at least one j′ with
690
+ Im[wj′(gj)] > 0, and thus condition (ii) is not fulfilled.
691
+ On the other hand, if j = (N ± 1)/2 all other eigenstates
692
+ are damped except for j′ = j ∓ 1. Yet, this state is also
693
+ dissipation free and condition (ii) cannot be fulfilled. For
694
+ even N, however, there exists a non-degenerate eigen-
695
+ state j = N/2 that fulfills (i) and (ii). Then, g = 2 and
696
+ ⃗csyn ≡ ⃗cN/2 =
697
+ 1
698
+
699
+ N (−1,1...,−1,1)⊺, which corresponds
700
+ to anti-phase synchronization between nearest neighbors
701
+ with the same frequency ¯ω + ∆.
702
+ b.
703
+ Negative v:
704
+ In contrast to the previous case. the
705
+ imaginary part of the jth eigenstate now is equal to zero
706
+ if g = gj ≡ +2cos(2πj/N) and thus Eq. (21) becomes
707
+ Im[wj′(gj)] = −2cos(2πj
708
+ N ) + 2cos(2πj′
709
+ N )
710
+ (22)
711
+ for all other eigenvalues wj′ with j′ ≠ j. Here, only if
712
+ j = N are all other states damped and conditions (i)
713
+ and (ii) fulfilled. The corresponding eigenstate is ⃗csyn ≡
714
+ ⃗cN =
715
+ 1
716
+
717
+ N (1,...,1)⊺, i.e., in-phase synchronization of all
718
+ oscillators with frequency ¯ω + ∆.
719
+ 2.
720
+ Oscillators with different frequencies
721
+ In this section, we discuss the case of arbitrary fre-
722
+ quency differences ∆n for each oscillator on the ring. In
723
+ this case, the matrix M [cf.
724
+ Eq. (10)] can no longer
725
+ be diagonalized analytically. Therefore, we discuss the
726
+ basic behavior along a few examples of ∆n and solve
727
+ the eigenvalue problem numerically. Yet, these examples
728
+ demonstrate that dissipation free synchronized motion
729
+ also exists in such a general setup.
730
+ A convenient way to investigate how the properties
731
+ of synchronization are affected by changes of ∆n, is to
732
+ parametrize the frequency difference according to
733
+ ∆n = sn∆,
734
+ (23)
735
+ and analyze the behavior of the eigenvalues and eigen-
736
+ vectors of W as a function of ∆ for a given (and fixed)
737
+ set of sn. Furthermore, we choose v to be negative, such
738
+ that for ∆ = 0 there exists a fully synchronized eigenstate
739
+ if g = 2 (see the discussion in Sec. III B 1b). Note that a
740
+ negative value of v implies gj = Im[λj].
741
+ In the following we consider as example the case of
742
+ N = 5 oscillators and show in Fig. 4 the results of the nu-
743
+ merical diagonalization of the matrix M for three differ-
744
+ ent realizations of ⃗s = (s1,...,s5) (different columns). We
745
+ choose the largest difference between neighboring values
746
+ of sn to be equal to one, i.e. max[sn−sn+1] = 1. Then, for
747
+ ∆ < 1 all frequency differences between neighboring os-
748
+ cillators are always smaller than the dissipative coupling
749
+ between them (which has magnitude one).
750
+ The case of N = 2 in our network of oscillators allows us
751
+ to represent the full parameter space as shown in Fig. 2
752
+ and identify the dissipation free subspaces and synchro-
753
+ nization within. However, for larger system sizes (as con-
754
+ sidered now) a representation similar to Fig. 2 becomes
755
+ very space consuming. Yet, a dissipation free subspace is
756
+ always necessary for synchronization, which corresponds
757
+ to the white lines in Figs. 2(a) and (b).
758
+ Thus, in or-
759
+ der to determine whether conditions (i)–(iii) are fulfilled,
760
+ it is sufficient to only search along the parameters for
761
+ which each eigenstate becomes dissipation free. In par-
762
+ ticular, the relevant information of Fig. 2(a) and (b) may
763
+ be conveniently combined to contain only g± = Im[λ±] as
764
+ function of ∆. Accordingly, the top row of Fig. 4 shows
765
+ the imaginary part of all eigenvalues Im[λj] as function
766
+ of the parameter ∆ and the middle row shows the re-
767
+ spective real parts Re[λj].
768
+ Lastly, in the bottom row
769
+ we plot the degree of synchronization S of each eigen-
770
+ vector also as function of ∆. The eigenvalues of M are
771
+ sorted in descending order of their imaginary parts, i.e.
772
+ Im[λ1] > Im[λ2] > ⋅⋅⋅ > Im[λN].
773
+ In the following we discuss different regimes of ∆ and
774
+ its impact on the possibility of synchronized motion in
775
+ accordance with conditions (i)–(iii).
776
+ We focus on the
777
+ eigenstate ⃗c1 with largest imaginary part Im[λ1] (high-
778
+ lighted as thick blue lines in Fig. 4). The reason is that
779
+ for g = Im[λ1] the eigenstate ⃗c1 becomes dissipation free
780
+ while all other eigenstates are simultaneously damped. In
781
+ contrast, if we would choose g such that another eigen-
782
+ state ⃗cj≠1 would become dissipation free, there is at least
783
+ one eigenstate that is exponentially growing. It is thus
784
+
785
+ 7
786
+ 2
787
+ 1
788
+ 0
789
+ 1
790
+ 2
791
+ Im( i)
792
+ 1
793
+ 2
794
+ 3
795
+ 4
796
+ 5
797
+ 10
798
+ 5
799
+ 0
800
+ 5
801
+ 10
802
+ Re( i)
803
+ 0
804
+ 1
805
+ 2
806
+ 3
807
+ 4
808
+ 5
809
+ 1
810
+ 2
811
+ 3
812
+ 4
813
+ 5
814
+ (ci)
815
+ s=[0.14, 0.2, 1.2, -0.46, -1.1]
816
+ 1
817
+ 2
818
+ 3
819
+ 4
820
+ 5
821
+ 0
822
+ 1
823
+ 2
824
+ 3
825
+ 4
826
+ 5
827
+ s=[-0.24, -0.07, 0.93, -0.08, -0.54]
828
+ 1
829
+ 2
830
+ 3
831
+ 4
832
+ 5
833
+ 0
834
+ 1
835
+ 2
836
+ 3
837
+ 4
838
+ 5
839
+ s=[-0.04, 0.22, 1.22, -0.77, -0.63]
840
+ FIG. 4. Examples of dissipation free and (fully) synchronized dynamics in a ring of N = 5 oscillators with random frequency
841
+ disorder. The three different columns correspond to three different set of (scaled) frequency realizations ⃗s. The value of v is
842
+ taken to be negative. In the top row we show the imaginary part Im[λj] of the eigenvalues λj of the matrix M as a function
843
+ ∆. The middle row shows the corresponding real part Re[λj] and the bottom row the degree of synchronization S(⃗cj) of the
844
+ corresponding eigenstates ⃗cj. For all three considered realizations, there exists an eigenstate (blue) with the maximum value of
845
+ S (bottom row) for small values of ∆ ≲ 1. This eigenstate also has the largest imaginary part of its associated eigenvalue (top
846
+ row), which allows the tuning g in such a way that it becomes dissipation free while all other eigenstates are damped.
847
+ sufficient to only analyze the possibility of synchroniza-
848
+ tion of ⃗c1 in the following.
849
+ a.
850
+ No frequency difference (∆ = 0):
851
+ This means
852
+ that there are no variations in the oscillator frequen-
853
+ cies and the situation is exactly the same as discussed
854
+ in Sec. III B 1b. Consequently, the eigenvalues of W are
855
+ given by Eq. (19). From the discussion in Sec. III B 1b,
856
+ we know that if g = 2 = Im[λsyn] there exists a dissi-
857
+ pation free synchronized state ⃗csyn ≡
858
+ 1
859
+
860
+ 5(1,...,1)⊺ with
861
+ associated real eigenvalue wsyn = ¯ω, i.e. all oscillators are
862
+ in phase and oscillate with frequency ¯ω. This is exactly
863
+ what we observe in Fig. 4: the eigenvalue with largest
864
+ imaginary part has imaginary part Im[λ1] = 2 (blue thick
865
+ lines in the top row). Note that Im[λ2] = Im[λ3] and
866
+ Im[λ4] = Im[λ5]. Furthermore, Re[λj] = 0 (middle row)
867
+ which implies an oscillation frequency of ¯ω.
868
+ b.
869
+ Small frequency differences (0 < ∆ < 1):
870
+ In this
871
+ regime, the disorder in the frequency differences between
872
+ nearest neighboring oscillators always remains smaller
873
+ than the coupling between them (which is 1). We thus
874
+ expect that the degree of synchronization also remains
875
+ large [S(⃗c1) ≈ N], i.e. the full delocalization of the eigen-
876
+ state ⃗c1 persists. In the bottom row of Fig. 4 we observe
877
+ exactly this behavior of the thick blue line correspond-
878
+ ing to ⃗c1: For small values of ∆, S(⃗c1) is maximal and
879
+ slowly decreases as ∆ approaches the value of 1. Thus,
880
+ the synchronized state remains close to be fully synchro-
881
+ nized within this regime [condition (iii)]. Note, that the
882
+ values for which S(⃗c1) starts to decrease depends on the
883
+ specific realization of disorder ⃗s.
884
+ The imaginary part of the corresponding eigenvalue
885
+ (top row) continues to be the largest value of all eigen-
886
+ values (blue thick line), Im[λ1] > Im[λj≠1].
887
+ Thus, for
888
+ g = Im[λ1] the eigenstate ⃗c1 becomes dissipation free
889
+ while all other eigenstates are damped, i.e. conditions (i)
890
+ and (ii) are fulfilled. As ∆ increases, Im[λ1] decreases
891
+ resulting from the larger amount of frequency disorder.
892
+ Simultaneously, the real part Re[λ1] remains close to 0
893
+ such that the oscillation frequency of the synchronized
894
+ state ⃗c1 also continues to be close ¯ω. Note, the value of
895
+ Re[λ1] only affects the oscillation frequency.
896
+ c.
897
+ Large frequency differences (∆ ≥ 1):
898
+ As ∆ is
899
+ increased further, the frequency difference exceeds the
900
+ nearest neighbor interaction such that – similar to (An-
901
+ derson) localization in finite systems [17] – the degree of
902
+ synchronization S(⃗c1) of the previously delocalized eigen-
903
+ state ⃗c1 rapidly decreases as ∆ increases; see blue thick
904
+
905
+ 8
906
+ lines in the bottom row of Fig. 4. Hence, only partial
907
+ synchronization is possible in this regime and condition
908
+ (iii) is not fulfilled.
909
+ At the same time, the largest imaginary value Im[λ1]
910
+ continues to decrease as function of ∆.
911
+ Yet, close to
912
+ ∆ = 1 it remains well separated from the second largest
913
+ imaginary value Im[λ2] such that a suitable choice of g
914
+ still allows for dissipation free dynamics with a sinlge os-
915
+ cillation frequency. However, Im[λ1] may coalesce with
916
+ Im[λ2] for larger values of ∆ depending on the specific
917
+ realization of ⃗s. An example of such a degeneracy is ob-
918
+ served for ∆ ≈ 1.6 in the top right panel of Fig. 4. As a re-
919
+ sult, both eigenstates would be dissipation free resulting
920
+ in the beating pattern discussed previously in Sec. III A.
921
+ However, as mentioned above, only partial synchroniza-
922
+ tion is possible in this regime anyways.
923
+ d.
924
+ Very large frequency differences (∆ ≫ 1):
925
+ In the
926
+ regime of very large frequency differences, we expect that
927
+ the degree of synchronization takes its minimum value
928
+ S(⃗cj) = 1 for all eigenstates j since the scaling follows
929
+ ∆ ≫ v. This implies that the values ∆n = ∆sn are much
930
+ larger than the dissipative coupling strength v. Then, M
931
+ is approximately diagonal with eigenvectors ⃗cj nearly lo-
932
+ calized. Note that in this limit there is no synchronized
933
+ state.
934
+ We have checked numerically that for ∆ larger
935
+ than the smallest difference between the sn the synchro-
936
+ nization measure of all eigenstates approaches one, as
937
+ expected (not shown here).
938
+ Lastly, to demonstrate that the dynamics of the sys-
939
+ tem of oscillators is consistent with our discussion of
940
+ the different regimes above (obtained from analyzing the
941
+ eigenvectors and eigenfrequencies), we show in Fig. 5 ex-
942
+ amples of Re[an(τ)] as a function of the scaled time τ
943
+ for ⃗s = (1.14,0.20,1.20,−0.46,−1.1) (corresponding to the
944
+ first column of Fig. 4) for three different values of ∆. In
945
+ all cases, we choose the initial state ⃗a0 = (1,1,2,−1,−1).
946
+ Panel (a) corresponds to the case of vanishing fre-
947
+ quency difference, i.e. ∆ = 0. We choose the dissipation
948
+ g = 2 such that only the eigenstate with largest imagi-
949
+ nary part is dissipation free. As expected after a short
950
+ transient time of τ ≈ 2.5 all oscillators are in-phase syn-
951
+ chronized.
952
+ In panel (b), we increase the frequency difference to
953
+ be ∆ = 0.5. Hence, the synchronized state is dissipation
954
+ free for g = 1.91.
955
+ Analogues to the previous case (a),
956
+ all oscillators are synchronized after a transient time of
957
+ τ ≈ 2.5, yet with a small phase shift. Importantly, all
958
+ oscillators have the same amplitude consistent with the
959
+ finding of Fig. 4 that the degree of synchronization is
960
+ maximal [S(⃗c1) = 5 for this value of ∆].
961
+ Contrarily, in panel (c) where ∆ = 1.1 (and g = 1.51
962
+ to match the condition of dissipation free dynamics) the
963
+ amplitudes vary among the oscillators. This is in accor-
964
+ dance with S(⃗c1) < 5. However, still only a single oscil-
965
+ lation frequency is present (after some transient time).
966
+ This is an example of partial synchronization.
967
+ FIG. 5. Dynamical behavior of Re(ai(τ)) given by Eq.(14)
968
+ for different values of the scaling factor ∆.
969
+ In all three
970
+ cases the mean frequency of the oscillators is ¯ω = 10 and
971
+ the disorder is the same of the first panel of Fig. (4), namely
972
+ ⃗s = (1.14, 0.20, 1.20, −0.46, −1.1) The coupling strength v
973
+ is taken to be negative and all frequencies are given in units
974
+ of ∣v∣. The initial condition is ⃗a0 = (1, 1, 2, −1, −1). Panels (a)
975
+ and (b) show fully synchronized motion, while panel (c) is an
976
+ example of partial synchronization.
977
+ IV.
978
+ CONCLUSIONS
979
+ In this work we have investigated the possibility of
980
+ long-lived synchronized motion in networks of harmonic
981
+ oscillators, which are subject to gain/loss and interact
982
+ via nearest neighbor dissipative couplings. In this con-
983
+ text, we refer to synchronization as the existence of a
984
+ single eigenstate of the dynamical matrix, which is dis-
985
+ sipation free.
986
+ Furthermore, if it attains the maximum
987
+ value of the (inverse) participation ratio we refer to it
988
+ as ‘fully synchronized’. We find that in the case of only
989
+ two coupled oscillators, synchronization may always be
990
+ achieved by tuning the gain appropriately as long as the
991
+ frequency difference between the two oscillators is smaller
992
+ than their interaction strength.
993
+ A similar behavior may be observed in larger net-
994
+ works, i.e. many oscillators arranged on a ring with near-
995
+ est neighbor interactions, yet the possibility of synchro-
996
+ nization then depends on the specifics of the system at
997
+ hand: If all oscillators are identical, synchronized col-
998
+
999
+ 1
1000
+ 2
1001
+ 3
1002
+ 4
1003
+ 5
1004
+ 2
1005
+ (a)
1006
+ A=0 g=2
1007
+ Re(ai)
1008
+ 2
1009
+ (b)
1010
+ A=0.5
1011
+ Re(
1012
+ 3
1013
+ A=1.1 g=1.51
1014
+ 2
1015
+ Re(ai)
1016
+ 0
1017
+ -2
1018
+
1019
+ 0
1020
+ 2
1021
+ 4
1022
+ 6
1023
+ 8
1024
+ 10
1025
+ 12
1026
+ 149
1027
+ lective motion may be achieved for an even number of
1028
+ sites with repulsive dissipative couplings (v positive) or
1029
+ an odd number of sites with attractive dissipative in-
1030
+ teractions (v negative). For small frequency differences
1031
+ compared to the coupling between the oscillators, this
1032
+ behavior remains, which we show specifically for the case
1033
+ of N = 5, yet it should also hold for larger networks.
1034
+ However, as the number of coupled oscillators increases,
1035
+ it becomes increasingly difficult to achieve full synchro-
1036
+ nization and may only be observed for very small fre-
1037
+ quency differences. For larger frequency differences, the
1038
+ (inverse) participation ratio decreases significantly such
1039
+ that only partial synchronization may be achieved. This
1040
+ is in accordance with Anderson localization, where on-
1041
+ site disorder results in localized eigenstates.
1042
+ However,
1043
+ as the dynamical matrix in this work is non-Hermitian,
1044
+ Anderson localization is not directly applicable.
1045
+ Here,
1046
+ future work is needed to study the interplay of synchro-
1047
+ nization and localization, in particular in the thermody-
1048
+ namic limit and arbitrary small frequency perturbations.
1049
+ Synchronization as discussed in this work is intimately
1050
+ related to the existence of dissipation free dynamics and
1051
+ thus isolated points/submanifolds in parameter space.
1052
+ Hence, they require a very precise tuning of gain and
1053
+ loss in order to obtain periodic steady states.
1054
+ This is
1055
+ however hard to achieve in any realistic experiment and
1056
+ the synchronized state will experience some gain or loss.
1057
+ We can relax the condition Im[wj] = 0 by solely requir-
1058
+ ing ∣Im[wj]∣ ≪ ∣Re[wj]∣, which means that the change of
1059
+ amplitude of oscillation is small over many oscillations.
1060
+ In addition, we then require Im[wj] ≪ Im[wsync], which
1061
+ means that all other eigenstates decay much faster than
1062
+ the ’synchronized’ one. In principle, one may relax the
1063
+ condition even further and demand that there exists only
1064
+ one state with Im[wj] > 0, while all other states fulfill
1065
+ Im[wi] ≤ 0.
1066
+ Then the synchronized state would grow
1067
+ while all other states are exponentially damped.
1068
+ ACKNOWLEDGMENTS
1069
+ C.W.W. acknowledges support from the Max-Planck
1070
+ Gesellschaft via the MPI-PKS Next Step fellowship and is
1071
+ financially supported by the Deutsche Forschungsgemein-
1072
+ schaft (DFG, German Research Foundation) – Project
1073
+ No. 496502542 (WA 5170/1-1). A.E. acknowledges sup-
1074
+ port from the DFG via a Heisenberg fellowship (Grant
1075
+ No EI 872/10-1).
1076
+ [1] A. Pikovsky, J. Kurths, M. Rosenblum, and J. Kurths,
1077
+ Synchronization: a universal concept in nonlinear sci-
1078
+ ences (Cambridge university press, Cambridge Univer-
1079
+ sity Press, 2003).
1080
+ [2] S. H. Strogatz, Nonlinear dynamics and chaos: with ap-
1081
+ plications to physics, biology, chemistry, and engineering
1082
+ (CRC Press, 2018).
1083
+ [3] M. Bennett, M. F. Schatz, H. Rockwood, and K. Wiesen-
1084
+ feld, Proc. R. Soc. Lond. A 458, 563 (2002).
1085
+ [4] S. H. Strogatz and I. Stewart, Sci. Am. 269, 102 (1993).
1086
+ [5] M. Rosenblum and A. Pikovsky, Contemp. Phys. 44, 401
1087
+ (2003).
1088
+ [6] A. Arenas, A. D´ıaz-Guilera, J. Kurths, Y. Moreno, and
1089
+ C. Zhou, Physics reports 469, 93 (2008).
1090
+ [7] K. Thornburg, M. M¨oller, R. Roy, T. Carr, R.-D. Li, and
1091
+ T. Erneux, Phys. Rev. E 55, 3865 (1997).
1092
+ [8] J. J. Lynch and R. A. York, IEEE Microw. Guide Wave
1093
+ Lett. 5, 213 (1995).
1094
+ [9] A.
1095
+ Cawthorne,
1096
+ P.
1097
+ Barbara,
1098
+ S.
1099
+ Shitov,
1100
+ C.
1101
+ Lobb,
1102
+ K. Wiesenfeld, and A. Zangwill, Phys. Rev. B 60, 7575
1103
+ (1999).
1104
+ [10] R. Fazio and H. Van Der Zant, Phys. Rep. 355, 235
1105
+ (2001).
1106
+ [11] A. Slavin, Nature Nanotech. 4, 479 (2009).
1107
+ [12] T. Nishikawa and A. E. Motter, New J. Phys. 17, 015012
1108
+ (2015).
1109
+ [13] J. C. Bellamy, IEEE Commun. Mag. 33, 70 (1995).
1110
+ [14] L. Narula and T. E. Humphreys, IEEE J. Sel. Top. Signal
1111
+ Process. 12, 749 (2018).
1112
+ [15] A. Jenkins, Phys. Rep. 525, 167 (2013).
1113
+ [16] B. Kramer and A. MacKinnon, Rep. Prog. Phys. 56, 1469
1114
+ (1993).
1115
+ [17] S. M¨obius, S. Vlaming, V. Malyshev, J. Knoester, and
1116
+ A. Eisfeld, arXiv:1404.4475 [cond-mat.dis-nn] (2014).
1117
+
8tFRT4oBgHgl3EQfpzcC/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
CdFAT4oBgHgl3EQftB4M/content/tmp_files/2301.08661v1.pdf.txt ADDED
@@ -0,0 +1,1412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Emergence of complex network topologies from flow-weighted optimization of network
2
+ efficiency
3
+ Sebastiano Bontorin,1, 2 Giulia Cencetti,1 Riccardo Gallotti,1 Bruno Lepri,1 and Manlio De Domenico3, 4, ∗
4
+ 1Fondazione Bruno Kessler, Via Sommarive 18, 38123 Povo (TN), Italy
5
+ 2Department of Physics, University of Trento, Via Sommarive 14, 38123 Povo (TN), Italy
6
+ 3University of Padua, Via Francesco Marzolo 8, 35131, Padua, Italy
7
+ 4Padua Center for Network Medicine, University of Padua
8
+ (Dated: January 23, 2023)
9
+ Transportation and distribution networks are a class of spatial networks that have been of interest
10
+ in recent years. These networks are often characterized by the presence of complex structures such
11
+ as central loops paired with peripheral branches, which can appear both in natural and man-made
12
+ systems, such as subway and railway networks.
13
+ In this study, we investigate the conditions for
14
+ the emergence of these non-trivial topological structures in the context of human transportation
15
+ in cities. We propose a minimal model for spatial networks generation, where a network lattice
16
+ acts as a spatial substrate and edge velocities and distances define an effective temporal distance
17
+ which quantifies the efficiency in exploring the urban space. Complex network topologies can be
18
+ recovered from the optimization of joint network paths and we study how the interplay between a flow
19
+ probability between two nodes in space and the associated travel cost influences the resulting optimal
20
+ network. In the perspective of urban transportation we simulate these flows by means of human
21
+ mobility models to obtain Origin-Destination matrices. We find that when using simple lattices, the
22
+ obtained optimal topologies transition from tree-like structures to more regular networks, depending
23
+ on the spatial range of flows. Remarkably, we find that branches paired to large loops structures
24
+ appear as optimal structures when the network is optimized for an interplay between heterogeneous
25
+ mobility patterns of small range travels and longer range ones typical of commuting. Finally, we
26
+ show that our framework is able to recover the statistical spatial properties of the Greater London
27
+ Area subway network.
28
+ I.
29
+ INTRODUCTION
30
+ Cities represent one of the most fascinating man-made
31
+ complex systems, exhibiting complex features ranging
32
+ on different scales: from their structure and dynamical
33
+ behavior, up to the scaling of socio-economic factors
34
+ with their size [1–5]. These features represent a strong
35
+ hint
36
+ towards the existence of universal
37
+ underlying
38
+ mechanics behind apparently very different cities [6–8].
39
+ Out of these structural properties, one of the most
40
+ relevant, as it plays a fundamental role mediating the
41
+ complex interplay between human dynamics [9, 10] and
42
+ mobility in urban context, are transportation networks
43
+ [11–15]. These networks are a class of spatial networks
44
+ whose properties have been investigated in the literature
45
+ during the last two decades [14, 16].
46
+ In particular,
47
+ they have been studied under the lens of optimality
48
+ conditions and minimization of cost-based functionals
49
+ [16], in order to identify specific features behind efficient
50
+ networks.
51
+ The concept of optimal networks [2] and
52
+ energy-like minimization [17] has its natural under-
53
+ standing in the physics language.
54
+ States of a system
55
+ which minimize a functional defining trade-offs between
56
+ system’s observables (e.g., free energy) represent the
57
+ most likely to be observed states of many real world
58
+ systems. While in some complex systems, such as cities,
59
+ these physical variables can not be derived from first
60
+ ∗ Corresponding author: [email protected]
61
+ principles, these analogies and concepts can still offer
62
+ a valid perspective and provide an embedding of these
63
+ systems in a space where the interplay between their
64
+ structure and dynamics can be unfolded and better
65
+ understood. Simple laws have been studied [16, 18] to
66
+ better understand the emergence of hierarchy and the
67
+ role of traffic in the network state.
68
+ Moreover, global
69
+ and local optimization criteria lie in the evolution of
70
+ man-made systems where policy makers and planners
71
+ can adopt some of these criteria in their plans [14].
72
+ Transportation networks are often characterized by
73
+ the presence of complex structures [19–21] such as loops
74
+ paired with branches [22], which can appear both in
75
+ natural [23] and man-made systems [14], like railway
76
+ and subway networks.
77
+ These structures represent the
78
+ key topological elements behind efficient public trans-
79
+ portation systems [20]. In this study we investigate the
80
+ conditions for the emergence of these non-trivial struc-
81
+ tures [18, 24] in the context of human transportation in
82
+ cities. We aim to reconstruct these topologies by means
83
+ of an optimal configuration [25] of the network state.
84
+ Under the assumption of a fixed total cost and a limited
85
+ set of high-capacity connections (e.g., a constraint in
86
+ the expenditure available on infrastructure), the optimal
87
+ configuration is the assignation of connections’ velocities,
88
+ or edges’ weights, such that the joint amount of time
89
+ required to travel between two nodes is minimized for
90
+ all pairs of nodes. Moreover, as these networks represent
91
+ the arteries in urban exploration/navigation via public
92
+ arXiv:2301.08661v1 [physics.soc-ph] 20 Jan 2023
93
+
94
+ 2
95
+ transportation, we study the role of traffic [18, 26] in
96
+ weighting the importance of specific set of connected
97
+ edges (paths).
98
+ We model the urban morphological
99
+ structures which generate heterogeneous distributions of
100
+ human mobility in space, biasing these optimal networks
101
+ to converge towards specific topological features.
102
+ We
103
+ aim to explore the minimum requirements and the
104
+ conditions for these optimization processes to reproduce
105
+ the empirical structures aforementioned.
106
+ At variance with the recent works on network effi-
107
+ ciency, we adopt some fundamentally different model-
108
+ ing choices. We evaluate the efficiency in terms of time
109
+ necessary to explore the network, where edges’ weights
110
+ act as travel speeds. We also weight path efficiency by
111
+ the traffic probability between nodes.
112
+ The underlying
113
+ network lattice (as represented in its simplest form by
114
+ the triangular lattice in the next sections) acts as a sub-
115
+ strate that allows the network to evolve [27] and pos-
116
+ sibly exhibit the network topological features typical to
117
+ real world systems.
118
+ On this framework, we show how
119
+ introducing simple probabilities biasing the optimal effi-
120
+ ciency between points in space force a transition between
121
+ a tree-like topology and a network resembling a simple
122
+ lattice.
123
+ We show also that the modeling of traffic-like
124
+ flows forces the emergence of preferential shared paths in
125
+ space. The optimal configuration of these shared paths
126
+ leads to complex topologies, which ultimately shows fea-
127
+ tures seen in real systems. Features such as a bi-modality
128
+ in the edges’ velocity distribution, characteristic of multi-
129
+ layered transportation, and the central core with loops
130
+ paired with branches typical of subway systems [20, 22]
131
+ are recovered.
132
+ We finally show an application of the
133
+ model within the Greater London Area, finding similari-
134
+ ties of the optimal model with its London Underground
135
+ network.
136
+ II.
137
+ FRAMEWORK FOR URBAN SPATIAL
138
+ MORPHOLOGY
139
+ We introduce here a general framework for spatial net-
140
+ works with the aim of recovering a minimal model for ur-
141
+ ban morphologies that encode both transportation prop-
142
+ erties and urban features such as population and density
143
+ of points of interest (POIs). To this aim, we begin from
144
+ the definition of a network substrate which acts as an
145
+ effective discretization of the spatial dimension. Its sim-
146
+ plest form can be found in an hexagonal 2-dimensional
147
+ tiling [28] and its planar dual, the triangular lattice.
148
+ More formally, in this network substrate each tile in space
149
+ is represented by a node, connected to its set of neighbor-
150
+ ing nodes (see Fig. 1). The existence of a physical edge
151
+ between nodes/tiles i and j is encoded in the adjacency
152
+ matrix A where Aij = 1 if the regions are neighbors in the
153
+ lattice. Distances and metrics are therefore computed on
154
+ top of this network substrate and are biased by nodes’
155
+ features.
156
+ Simulated
157
+ Annealing:
158
+
159
+ minimization
160
+ E({we})
161
+ {we}init : we = 1.0 ∀e
162
+
163
+
164
+ {we}optimal
165
+ we
166
+ dij
167
+ cΠ = ∑
168
+ e∈Π
169
+ de
170
+ we
171
+ de
172
+ we
173
+ Euclidean distance
174
+ Network
175
+ distance
176
+ Urban
177
+ morphology
178
+ Urban street
179
+ network
180
+ Minimal
181
+ model
182
+ A
183
+ B
184
+ C
185
+ FIG. 1.
186
+ Spatial network model for urban mor-
187
+ phology: A) Mapping population distribution and urban
188
+ transportation network to a minimal spatial network where
189
+ nodes encode urban features. Example with hexagonal tiling
190
+ mapped to the triangular lattice. B) Network-based distance
191
+ cΠij versus euclidean distance dij; edges weights/velocity we
192
+ are depicted as widths. C) Edges weights of the lattice sub-
193
+ strate are optimized via simulated annealing to unravel spatial
194
+ features of the optimal transportation network.
195
+ Nodes of this network can encode spatial features
196
+ at the urban scale, such as population or amenities’
197
+ density in a given node. We therefore have a minimal
198
+ representation
199
+ of
200
+ a
201
+ urban
202
+ morphological
203
+ structure
204
+ (see Fig.
205
+ 1), and a network substrate that acts as a
206
+ transportation system and can be optimized to generate
207
+ optimal transportation networks [27].
208
+ The path-based
209
+ temporal distance on top of the transportation network
210
+ acts as the fundamental metric we aim to minimize.
211
+ The
212
+ rationale
213
+ behind
214
+ a
215
+ network-based
216
+ distance
217
+ is
218
+ grounded on the assumption that in the context of
219
+ public transportation, urban systems are not navigated
220
+ by considering geographical distance but rather by eval-
221
+ uating the travel-time between departure and arrival.
222
+ More specifically, multi-layer transportation networks
223
+ [11, 21] are characterized by layers having a hierarchical
224
+ organization with different characteristic speeds [29].
225
+
226
+ 3
227
+ Spatial probability
228
+ of targets
229
+ from sample source s
230
+ psj(L, β)
231
+ β = 0.1
232
+ β = 5.0
233
+ β = 0.1
234
+ β = 5.0
235
+ source
236
+ Optimized Network
237
+ States
238
+
239
+ G({we})
240
+ A
241
+ B
242
+ pij ∝ e−β
243
+ rij
244
+ ⟨r⟩
245
+ spatial
246
+ network
247
+ (HEX)
248
+ pij ∝ e−β
249
+ Lij
250
+ ⟨L⟩
251
+ non-spatial
252
+ network
253
+ (ER)
254
+ 0.0
255
+ 1.0
256
+ psj
257
+ FIG. 2.
258
+ Optimization of synthetic networks: The role of β is studied for two network models: the triangular lattice
259
+ (HEX) and the non-spatial (ER) network. A) Heatmap of target nodes probabilities pij from source node (yellow) under two
260
+ different β values: as the penalty parameter grows, farther nodes are more penalized and flows tend to stay close to the source.
261
+ B) Samples of the associated optimized network states: when flows are not affected by distances (β = 0.1) source nodes target
262
+ all the other nodes in the network with approximately equal probability, the optimal network converges to a tree-like structure.
263
+ With larger β (β = 5.0), trip probabilities are more localized and the presence of loops appear in the optimal structure.
264
+ Thus, an effective temporal distance becomes fundamen-
265
+ tal in determining accessibility and efficiency in urban
266
+ space exploration.
267
+ In this model, we denote e as an edge in the network,
268
+ and we as the associated edge’s weight which can be seen
269
+ as a velocity of the edge in the transportation network.
270
+ de is the euclidean distance of edge e between the nodes
271
+ it is connecting; here edge weights are visually mapped
272
+ as widths of the links. Information about edges distance
273
+ in this framework can be relevant when generalizing to
274
+ the case of random spatial networks where edges have
275
+ different lengths.
276
+ In the case of a general non-spatial
277
+ network, where there is no notion of spatial distances,
278
+ the model can be adapted by fixing de = 1. Finally, we
279
+ define ΩΓij as the set of paths connecting the two nodes.
280
+ We then maximize the efficiency of this underlying sub-
281
+ strate. The transportation efficiency between two nodes
282
+ i−j is computed as a cost in terms of time [30], and we do
283
+ not account for congestion, which can be introduced in a
284
+ possible extension of this framework. We find the path
285
+ (a set of connected edges starting from source node i and
286
+ ending in destination node j) with the smallest cumula-
287
+ tive time, where the time delay introduced by choosing
288
+ en edge e is measured as a ratio between its euclidean dis-
289
+ tance and its weight, representing a proxy of speed. Refer
290
+ to Fig. 1 for a graphic depiction. Here G({we}) is used
291
+ to indicate the network configuration with the associated
292
+ set of edges weights {we}. We therefore aim to find the
293
+ assignment of weights {we} as a trade-off between net-
294
+ β = 0.01
295
+ β = 1.5
296
+ β = 10
297
+ A
298
+ B
299
+ FIG. 3.
300
+ Loop Dimension vs β: Minimum cycle basis
301
+ is used as a network’s observable to study the appearance of
302
+ loops. For each point the median and its absolute deviation
303
+ are shown. A) The average size (number of edges) of the loops
304
+ that constitute the cycle basis. B) Cycle basis dimension as
305
+ number of loops.
306
+ The optimal network ranges from a tree
307
+ structure to a lattice-like with small loops, as the probability
308
+ of long range movement decreases (large β). A transition in
309
+ the cycle basis property is observed at β ∼ 1.0 for the trian-
310
+ gular lattice under study, where the optimal network results
311
+ in an intermediate state with large loops.
312
+
313
+ Average Loop Size vs β
314
+ 8 -
315
+ Size [# edges]
316
+ 6
317
+ 2
318
+ 0
319
+ 10-3
320
+ 10-2
321
+ 10-1
322
+ 100
323
+ 101
324
+ 102
325
+ Cycle Basis Dimension vs β
326
+ Dimension [# cycles]
327
+ 10
328
+ TI
329
+ 8
330
+ 6
331
+ 4
332
+ 2
333
+ 0
334
+ 10-3
335
+ 10-2
336
+ 10-1
337
+ 100
338
+ 101
339
+ 102
340
+ β4
341
+ work efficiency in transportation, by minimizing the set
342
+ of costs {cij} of travelling between pair of nodes i − j
343
+ where each element cij is defined as:
344
+ cij({we}) =
345
+ min
346
+ Π∈ΩΓij
347
+
348
+ � �
349
+ e∈Πij
350
+ de
351
+ we
352
+
353
+ � ,
354
+ (1)
355
+ and in absence of further information, the optimiza-
356
+ tion procedure is the equivalent of minimizing the travel
357
+ costs �
358
+ ij cij. Here we add a novel ingredient, in which
359
+ we couple the optimization of the network temporal dis-
360
+ tances with a traffic flow or probability between pairs
361
+ of nodes. Operationally, when dealing with real world
362
+ Origin-Destination (OD) matrices, this probability can
363
+ be then mapped to a traffic Tij between two points. Tij
364
+ represents the probability of a person from node i to
365
+ travel to node j, and a traffic can be recovered when in-
366
+ formation about populations in source and target nodes
367
+ is added. Tij effectively acts as a rank in the importance
368
+ of a specific path in the network. As paths connecting
369
+ different pairs of nodes may share common edges of the
370
+ network substrate, complex topologies emerge from the
371
+ shared paths jointly optimizing the network efficiency.
372
+ The flow-weighted transportation efficiency therefore be-
373
+ comes:
374
+ E(G({we})) =
375
+ 1
376
+ N(N − 1)
377
+ N
378
+
379
+ i
380
+ N
381
+
382
+ j̸=i
383
+ Tij · cij({we})
384
+ (2)
385
+ We also require that the total network infrastructure
386
+ cost, defined as the cumulative sum of edges weights
387
+ per unit length, multiplied by edge distance CG
388
+ =
389
+
390
+ e∈G dewe is conserved.
391
+ This is a generalization of a
392
+ standard optimization process, in the sense that when
393
+ Tij = 1, ∀(i, j), the efficiency is optimized for all possible
394
+ trip pairs (i, j) with equal importance, where the Mini-
395
+ mum Spanning Tree often represents the optimal solution
396
+ [14].
397
+ Before tackling the problem of traffic-like (OD) flows,
398
+ we study a simpler definition of Tij, which allows to un-
399
+ derstand the role of distance in the optimization process,
400
+ in absence of other nodes’ features:
401
+ Tij ∝ e−βdij.
402
+ (3)
403
+ The coefficient β appearing in Eq. 3 is introduced as
404
+ a penalizing parameter and determines how relevant is
405
+ the pair-wise distance dij when computing probabilities.
406
+ We can understand it as the inverse of a characteristic
407
+ traveling distance for an agent on the network β ∼
408
+ 1
409
+ d0 .
410
+ While several alternatives on the integration of distance
411
+ in spatial-dependent probabilities (such as power-laws
412
+ Tij ∼ d−γ
413
+ ij ) can be employed, we focus on the exponential
414
+ dependence as it represents the foundational result from
415
+ the maximum entropy derivation of gravity flows [31].
416
+ The introduction of gravity-like flows will be discussed
417
+ in Section IV.
418
+ In the following, we introduce the application of the
419
+ model on simple substrates to explore the role of β in
420
+ absence of spatial urban features.
421
+ III.
422
+ OPTIMIZATION OF SIMPLE NETWORK
423
+ SUBSTRATES
424
+ In order to asses the role of the characteristic distance
425
+ parameter β in the emergence of specific topologies, we
426
+ compute networks statistics on a set of generative models
427
+ for both spatial and non spatial networks. As hexagonal
428
+ tiling of space is preferable when an analysis includes as-
429
+ pects of connectivity [28], the first model we study is a
430
+ triangular lattice. The reason behind this choice is that
431
+ it represents the planar dual [14, 32] of the hexagonal lat-
432
+ tice. Therefore, as space is discretized in hexagonal tiles,
433
+ the spatial network connecting its centers is the triangu-
434
+ lar lattice, which is isotropic and presents less equivalent
435
+ degenerate paths of a rectangular lattice.
436
+ As a direct
437
+ reference to hexagonal tiling, we refer to this network
438
+ as HEX (see Fig. 2). We also extend the analysis also
439
+ to the case of a random network model where nodes are
440
+ not embedded in a metric space. Specifically, we study
441
+ an Erd˝os-R´enyi (ER) network topology, where the def-
442
+ inition of distance between nodes Lij can be defined in
443
+ terms of topological shortest path distance [33].
444
+ As a first benchmark we simplify flows as a spatial
445
+ probability Tij = pij that decays exponentially with dis-
446
+ tance and does not consider nodes features, the resulting
447
+ equation for pij is:
448
+ pij =
449
+ e−βdij/⟨d⟩
450
+ �N−1
451
+ k̸=i e−βdik/⟨d⟩
452
+ (4)
453
+ Where ⟨d⟩ =
454
+ 1
455
+ N(N−1)
456
+
457
+ i̸=j dij is the average distance
458
+ of points in the network and acts as a normalization fac-
459
+ tor (euclidean distance ⟨r⟩ in case of a spatial network or
460
+ topological ⟨L⟩ for the ER network).
461
+ Therefore pij encodes how much of the nearby space is
462
+ explored by a single source node. An illustration of the
463
+ spatial dependence of target probabilities and samples of
464
+ the resulting optimal topologies are presented in Fig. 2.
465
+ For a range of β values the optimization process is
466
+ performed on an ensemble of these models. To assess the
467
+ emergence of complex structures we observe the number
468
+ of loops that emerge in the optimal state.
469
+ This mea-
470
+ sure is relevant in the context of spatial networks, where
471
+ loops break the symmetry introduced by optimal struc-
472
+ tures such as trees. We compute the minimum cycle ba-
473
+ sis set as a metric to observe the emergence of loops [36]:
474
+ i.e. the minimum set of loops (where a single loop is en-
475
+ coded in a set of edges that defines a closed path in the
476
+ graph) such that any other closed path in the network can
477
+
478
+ 5
479
+ Optimal networks and edges weights distribution P(we)
480
+ Exponential
481
+ Distribution
482
+ Morphology
483
+ A
484
+ B
485
+ β = 3.0
486
+ 3-Points
487
+ Steiner Tree
488
+ Morphology
489
+ Steiner node
490
+ 10.0
491
+ 0.0
492
+ Wj
493
+ β = 4.0
494
+ β = 0.1
495
+ G-KDE
496
+ FIG. 4.
497
+ Minimal models of urban morphology and attractiveness distributions under study (3-Points and
498
+ Exponential decay): Morphology of POIs, where attractiveness Wj is mapped with color intensity (yellow being higher).
499
+ Optimized edges weights distributions P({we}) are characterized by the bi-modal nature that reveals the multi-layered structure
500
+ of the optimal transportation networks when close-range flows are paired with long-range traffic typical of commuting towards
501
+ city centers (Insets P(Tij) with peaks on large-flows due to POIs). Gaussian KDE is shown in orange as a visual aid. A)
502
+ 3-points polycentric distribution of POIs, resembling the euclidean Steiner Tree problem [34, 35] for three points. The network
503
+ is optimized with β = 0.1 and β = 4.0, and shows the appearance of branches connecting the POIs paired to large loops in
504
+ the periphery. B) Optimal state and distribution of speeds with exponential decay of wj from the center and an exemplifying
505
+ result with β = 3.0. The optimal topology is characterized by a central loop paired with branches.
506
+ be reconstructed via combination of this cycle basis [36].
507
+ Specifically, we investigate the cycle basis dimension (the
508
+ number of loops that constitute this set) and the average
509
+ loop size, against a range of β values. This metric allows
510
+ to quantify the emergence of spatial topological features
511
+ that differentiate the optimal state from a tree structure.
512
+ Results for these synthetic systems are presented in Fig.
513
+ 3. Additional boxplots are shown in SM Figures 1-2. A
514
+ tree-like topology is recovered when the flows probabil-
515
+ ities are distributed uniformly across all nodes in space
516
+ (when β → 0 and distance is therefore not a penalizing
517
+ variable in Eq. 4), while loops emerge when farther tar-
518
+ gets become less likely to be explored and the network
519
+ is globally optimized for close-range trips. Notably, in
520
+ Fig. 3 around β ≈ 1.0, we observe a sharp transition
521
+ in the average loop size in the HEX lattice under analy-
522
+ sis: connections appear between neighboring nodes which
523
+ are far from the tree-root as it becomes more efficient to
524
+ have a direct link. In this β regime the tree topology
525
+ does not guarantee the most efficient configuration for
526
+ peripheral nodes, which have their high probability tar-
527
+ gets in their direct neighborhood (see Eq. 4). Thus in the
528
+ optimization process edges appear between leaves nodes
529
+ which are in separated branches: this ultimately breaks
530
+ the tree structure and leads to the emergence of large-
531
+ scale loops. Eventually the optimal network converges
532
+ to a simpler structure with small loops as the network is
533
+ optimized for nodes to target only direct neighbors in the
534
+ lattice. Finally, in SM Section 2 we show an application
535
+ on the case of a single target node in the perimeter of
536
+ the lattice, where the model reproduces leaves venation
537
+ patterns [14, 37].
538
+ IV.
539
+ SPATIAL ATTRACTIVENESS AND
540
+ TRAFFIC-LIKE FLOWS
541
+ In the context of urban systems, optimal transporta-
542
+ tion networks need to be devised to accommodate traffic
543
+ flows [26] towards specific areas of interest, e.g. due to
544
+ high commercial and business land use density. Hence we
545
+ extend the efficiency optimization framework in the case
546
+ where we have more realistic traffic on top of the urban
547
+ networks, as the presence of nodes with high attractive-
548
+ ness (POIs) biases the flows towards them. In urban sce-
549
+ narios we adopt spatial-interaction models to mimic more
550
+ traffic-like flows. In these models, flows are obtained via a
551
+ gravity-like equation: Tij ∝ pipj exp (−βdij) [10] which
552
+ can be derived from first principles via entropy maxi-
553
+ mization, thus representing the most likely set of flows
554
+ to be observed. In the context of urban exploration, the
555
+ gravity equation can be mapped to a model for spatial in-
556
+ teraction [31, 38] where nodes with a given attractiveness
557
+ Wj compete as possible targets for traffic:
558
+ Tij ∝ 1
559
+ Z PiWj exp (−βdij)
560
+ (5)
561
+ Normalization Z accounts for all possible trip al-
562
+ ternatives �
563
+ k Wk exp (−βdik).
564
+ Pi is the population
565
+ density in node i and Wj encodes a suitable definition
566
+ of benefit/attractiveness of node j as a possible target
567
+ [38].
568
+ Tij is therefore the fraction of population in
569
+
570
+ β: 0.1
571
+ 102
572
+ ("1)
573
+ P
574
+ 101
575
+ 0.0
576
+ 0.1
577
+ Tijbeta is:4.0
578
+ 100
579
+ Gaussian KDE
580
+ P(We)
581
+ 10-1
582
+ 2
583
+ 3
584
+ 4
585
+ Weβ: 4.0
586
+ 101
587
+ P(Tij)
588
+ 100
589
+ 10-1
590
+ 10-2
591
+ 0.0
592
+ 0.2
593
+ 0.4
594
+ Tij.beta is:0.01
595
+ 100
596
+ Gaussian KDE
597
+ P(We)
598
+ 10-1
599
+ 10-2
600
+ 2
601
+ 4
602
+ 6
603
+ 8
604
+ We6
605
+ node i commuting/travelling on average to node j. To
606
+ better understand the role of nodes’ attractiveness, we
607
+ start with the simplest assumption of equal population
608
+ distribution on all nodes: Pi = 1.0 ∀i; we will introduce
609
+ more realistic population distribution in the next section
610
+ with the London case study.
611
+ We apply these models on the triangular lattice to
612
+ unravel the optimal topologies that emerge when traf-
613
+ fic probabilities are biased towards some nodes having
614
+ high attractiveness (simulating POIs) and we study two
615
+ spatial configurations for nodes’ Wj. In the first configu-
616
+ ration high Wj is assigned to three nodes (POIs) placed
617
+ at the vertices of an equilateral triangle. We study the 3-
618
+ points distribution as it mimics a prototypical polycentric
619
+ distribution of city-centers, and it can be linked to the
620
+ solution of the euclidean Steiner Tree problem [34, 35].
621
+ The Steiner Tree is a class of problems where given a set
622
+ of N points in a plane the goal is to find the set of lines
623
+ connecting the points with minimum cumulative length.
624
+ In our case, the solution would lie in the central node of
625
+ the lattice being the Fermat point [35] and the Steiner
626
+ node, which connects the three vertices of the high Wj
627
+ triangle, as illustrated in Fig. 4 panel A. The second case
628
+ is a distribution of Wj that decays exponentially from the
629
+ center, mimicking a more realistic morphology for a ur-
630
+ ban monocentric structure.
631
+ The two morphologies are
632
+ depicted in Fig. 4.
633
+ We find that due to nodes in the network biasing the
634
+ traffic flows, as it can be seen in the insets of Fig. 4 A,
635
+ the traffic flows get divided into two types: a close range
636
+ paired to a long range set of flows, due to POI polariza-
637
+ tion. We show in Fig. 4 optimal solutions for values of
638
+ β = 0.1, 4.0. Interestingly, optimal solutions are char-
639
+ acterized by three central lines branching from the cen-
640
+ ter (which therefore acts as Steiner node) and connecting
641
+ the three nodes with high attractiveness, therefore resem-
642
+ bling the solution of the Steiner tree problem. Moreover,
643
+ in the case of more localized flows (β = 4.0) these lines
644
+ are also paired with large scale loops connecting farther
645
+ nodes. We also find that the heterogeneity of traffic flows
646
+ forces the appearance of a second mode in the distribu-
647
+ tion of speeds we (see Fig. 4). The two peaks in the
648
+ optimal P(we) can be interpreted as two different lev-
649
+ els of speed, which suggests that the entire process can
650
+ be decomposed in two distinct mechanisms which can
651
+ be mapped as a bi-layer network: one layer at high ca-
652
+ pacity with long-range/commuting trajectories and the
653
+ other one at low velocity with short-range paths. These
654
+ two layers can be ideally separated, hinting towards a
655
+ possible extension of the model to multilayer networks.
656
+ V.
657
+ GREATER LONDON AREA: GENERATIVE
658
+ MODEL FOR THE SUBWAY SYSTEM
659
+ We extend in this section the application of the model
660
+ by integrating data from a real urban structure. Specifi-
661
+ cally, we model the morphology of Greater London Area
662
+ (GLA) on top of our framework and apply the efficiency
663
+ optimization process with the aim of understanding if
664
+ the temporal efficiency optimization of the spatial sub-
665
+ strate paired with realistic flows is sufficient to yield a
666
+ transportation network with similar topological features
667
+ (such as a central core paired with peripheral branches
668
+ [22]) as the London subway system. To extend the model
669
+ to real urban scenarios, we first obtain the distribution
670
+ of amenities [39] from OpenStreetMap [40] and we use
671
+ this density of points in space as a proxy to estimate
672
+ the attractiveness Wj of a tile. Census data for Greater
673
+ London Area yards from 2014 is used to recover popu-
674
+ lation density Pi. These densities are then mapped to
675
+ Uber’s H3 tiling to recover the spatial discretization in
676
+ hexagonal tiles, such that we can have direct mapping to
677
+ the nodes on a triangular lattice, as in the examples dis-
678
+ cussed in previous sections. We thus have the ingredients
679
+ to finally simulate the spatial interaction flows Tij in Eq.
680
+ 5. In Fig. 5 the integration of urban data describing the
681
+ London’s morphology in the model is explained and we
682
+ provide a depiction of the OD matrix that arises from
683
+ the spatial interaction model.
684
+ With the aim of repro-
685
+ ducing real features, we impose an upper limit on edge
686
+ weight, so that the distribution of weights is bounded
687
+ during the optimization process: we ∈ (0, w∗). This bet-
688
+ ter simulates the upper bound in speed of real multilayer
689
+ systems. Further explanation of data recovery and inte-
690
+ gration in the model is provided in SM Section 3. We
691
+ find (see SM Figure 6) that {we} distribution displays a
692
+ bi-modal shape, and this allows the analysis of the gen-
693
+ erated network in a sub-graph defined by the set of high
694
+ speed edges. In Fig. 5, panel C, we show a sample re-
695
+ sult for β = 0.35 of this sub-graph. The characterization
696
+ of the network into a central core paired with peripheral
697
+ branches as the optimal state can be visually observed.
698
+ The model’s subgraph of high speed edges is compared
699
+ to the real tube network in the Greater London Area
700
+ [21] to assess the similarities between the optimal struc-
701
+ ture and the real subway system. We quantify this sim-
702
+ ilarity by means of spatial scaling laws [22], these are
703
+ convenient to highlight the recovery of the central core
704
+ structure characterized by loops, paired with quasi mono-
705
+ dimensional lines branching from the core. We investi-
706
+ gate the distribution of nodes stations using the profile
707
+ function N(r) that quantifies the total number of stations
708
+ at a distance r from the network barycenter, computed
709
+ as the average location of all station nodes [22]. Results
710
+ of this scaling analysis for the real and simulated net-
711
+ works are presented in Fig. 6. The two scaling regimes
712
+ indicate the separation of core and branches: the scaling
713
+ of r2 in the core center and a second trend due to mono-
714
+ dimensional branches for r > rC, where rC is the radius
715
+ of the core structure. The second trend can be computed
716
+ analytically via an integral curve for N(r > rC) which
717
+ can be approximated by a power law rγ (γ = 1.25±0.02,
718
+ see SM Section 4), as in Ref. [22]. The curve of N(r)
719
+ is consistent with the real network and confirms scaling
720
+
721
+ 7
722
+ Efficiency optimized
723
+ Network state on GLA
724
+ Morphology
725
+ Pi
726
+ Wj
727
+ POI Distribution
728
+ (OSM)
729
+ Census Data
730
+ Tij ∝ PiWα
731
+ j exp (−βdij)
732
+ OD fluxes
733
+ Network optimization
734
+ under realistic fluxes
735
+ Greater
736
+ London Area
737
+ +
738
+ Metro network
739
+ A
740
+ B
741
+ C
742
+ FIG. 5.
743
+ Optimal network model for Greater London Area subway system: Application of the efficiency optimization
744
+ with realistic flows on the urban structure of the Greater London Area. A) Urban morphology data is recovered from Census
745
+ and OSM and population and POI densities are mapped to the H3 tiling. B) Data is mapped to the triangular lattice, with
746
+ nodes having features which allow the calculation of traffic-like flows, a sample OD matrix is shown where Tij are computed
747
+ with β = 0.35. C) Optimal network state for the London model, where only edges and nodes corresponding to the second
748
+ mode are shown (see SM Section 3). Central core structure with loops paired with peripheral branches can be visually seen.
749
+ laws prediction from [22].
750
+ core
751
+ branches
752
+ FIG. 6.
753
+ Scaling properties of GLA Tube stations:
754
+ Profile of the number of stations (nodes in the optimal dis-
755
+ cretized network (see SM Section 3) reproducing GLA under-
756
+ ground) versus the distance from the barycenter. The scaling
757
+ of N(r) profile of the model is compared with the real net-
758
+ work system. Scaling properties predicted in [22] are verified,
759
+ finding the two different scaling regimes separated at
760
+ r
761
+ rC ∼ 1
762
+ for core paired with branches systems, where rC is the core
763
+ radius (characterized by r2 scaling), and NC is the number of
764
+ stations in the core. The scaling exponent γ = 1.25 ± 0.02 is
765
+ obtained as a linear fit of the integral curve [22] for r > rC
766
+ (see SM Section 4 for more details).
767
+ VI.
768
+ DISCUSSION
769
+ Starting from simple conditions on temporal efficiency
770
+ on a spatial network substrate, we show that network op-
771
+ timization paired with traffic-like flows weighting the im-
772
+ portance of specific connections in space can reproduce
773
+ complex networks features from man-made transporta-
774
+ tion networks.
775
+ Specifically, we devise a framework for
776
+ spatial networks where nodes can encode features of ur-
777
+ ban systems and can ultimately lead to the study of opti-
778
+ mal topologies in real scenarios. A key novelty lies in the
779
+ optimization process happening on a spatial substrate,
780
+ such that edges of the resulting optimal network are op-
781
+ timized to improve the efficiency of the shared space by
782
+ all nodes in the network. We show how the probabili-
783
+ ties of moving from one point to another in space force a
784
+ transition between a tree-like and a lattice-like topology
785
+ in the optimal network. Fixing certain target points in
786
+ space with a higher attractiveness for flows can repro-
787
+ duce theoretical results such as the Steiner tree solution
788
+ or leaves venation patterns. We also show that extend-
789
+ ing these probabilities using urban spatial information
790
+ and traffic-like flows modeling forces the emergence of
791
+ shared preferential paths that are organized as complex
792
+ topologies, resulting from traffic weighted optimization
793
+ of network time efficiency, which ultimately exhibits the
794
+ characteristics seen in real systems. We recover features
795
+ such as a bi-modality in the speed distribution of the
796
+ edges of the network, characteristic of multilayer trans-
797
+ portation. Or the appearance of a central core with loops
798
+ coupled to branches typical of underground systems, as
799
+ in the case of the London underground system. We find
800
+ that branches paired to large loops structures appear as
801
+ optimal structures when the network is optimized for an
802
+
803
+ Tiling (HEX3-Res6) and POI Density
804
+ OSM amenities distribution
805
+ Mapping to the hex lattice model
806
+ W; extraction - here log(density)
807
+ London Greater Area
808
+ 52.0
809
+ 51.8
810
+ Lat
811
+ 51.
812
+ 51.2
813
+ 51.0
814
+ 0.2
815
+ 0.0
816
+ 0.2
817
+ Lon51.70
818
+ 51.65
819
+ 51.60
820
+ 51.55
821
+ 51.5
822
+ 51.45
823
+ 51.40
824
+ 51.35
825
+ 51.30
826
+ 0.4
827
+ -0.2
828
+ 0.D
829
+ 0.21.0
830
+ 0.9
831
+ M
832
+ 0.8
833
+ 0.7
834
+ Traffic Density
835
+ 0.6
836
+ 0.5
837
+ 0.4
838
+ 0.3
839
+ 0.21.0
840
+ 0.9
841
+ 0.8
842
+ 0.7
843
+ Traffic Density
844
+ 0.6
845
+ 0.5
846
+ 0.4
847
+ 0.3
848
+ 0.2Rescaled number of stations at distance r from barycenter
849
+ Power law r2
850
+ 101
851
+ Model
852
+ LGA Tube
853
+ Powerlaw ry
854
+ 100
855
+ 10-1
856
+ .OL
857
+ 10-1
858
+ 1008
859
+ interplay of traffic flows mixed between small range trav-
860
+ els and longer range ones typical of commuting.
861
+ This
862
+ novel framework for the optimization of spatial networks
863
+ in urban contexts may show further improvements and
864
+ extensions to better accommodate the concepts of multi-
865
+ layer and shared space. It could be extended also to the
866
+ case of inter-cities transportation, where specific nodes in
867
+ the network substrate represent cities. To conclude, in
868
+ this work the problem is addressed in a theoretical way
869
+ with the aim of reproducing and understanding some fea-
870
+ tures observed in real spatial networks, but future works
871
+ can exploit this framework as a basis to understand how
872
+ to generate optimal transportation networks in a urban
873
+ planning scenario.
874
+ Competing Financial Interests
875
+ The authors declare no competing financial interests
876
+ Data Availability
877
+ The data used in this work are publicly available from
878
+ the original references
879
+ Code Availability
880
+ The code to perform the analysis will be available upon
881
+ request.
882
+ [1] M. Batty, Science 319, 769 (2008).
883
+ [2] M. Barthelemy, Nature Reviews Physics 1, 406 (2019).
884
+ [3] L. Bettencourt and G. West, Nature 467, 912 (2010).
885
+ [4] W.
886
+ Pan,
887
+ G.
888
+ Ghoshal,
889
+ C.
890
+ Krumme,
891
+ M.
892
+ Cebrian,
893
+ and A. Pentland, Nature Communications 4 (2013),
894
+ 10.1038/ncomms2961.
895
+ [5] E. Arcaute, E. Hatna, P. Ferguson, H. Youn, A. Johans-
896
+ son, and M. Batty, Journal of The Royal Society Inter-
897
+ face 12, 20140745 (2015).
898
+ [6] L. M. A. Bettencourt, Science 340, 1438 (2013).
899
+ [7] A. Bassolas, H. Barbosa-Filho, B. Dickinson, X. Doti-
900
+ walla, P. Eastham, R. Gallotti, G. Ghoshal, B. Gip-
901
+ son, S. A. Hazarie, H. Kautz, O. Kucuktunc, A. Lieber,
902
+ A. Sadilek, and J. J. Ramasco, Nature Communications
903
+ 10 (2019), 10.1038/s41467-019-12809-y.
904
+ [8] L. M. A. Bettencourt, Science Advances 6 (2020),
905
+ 10.1126/sciadv.aat8812.
906
+ [9] M. Schl¨apfer, L. Dong, K. O’Keeffe, P. Santi, M. Szell,
907
+ H. Salat, S. Anklesaria, M. Vazifeh, C. Ratti, and G. B.
908
+ West, Nature 593, 522 (2021).
909
+ [10] H. Barbosa, M. Barthelemy, G. Ghoshal, C. R. James,
910
+ M. Lenormand, T. Louail, R. Menezes, J. J. Ramasco,
911
+ F. Simini,
912
+ and M. Tomasini, Physics Reports 734, 1
913
+ (2018).
914
+ [11] L. Alessandretti, L. G. N. Orozco, M. Saberi, M. Szell,
915
+ and F. Battiston, Environment and Planning B: Urban
916
+ Analytics and City Science , 239980832211081 (2022).
917
+ [12] M. Lee, H. Barbosa, H. Youn, P. Holme, and G. Ghoshal,
918
+ Nature Communications 8 (2017), 10.1038/s41467-017-
919
+ 02374-7.
920
+ [13] R. Gallotti, P. Sacco, and M. D. Domenico, Complexity
921
+ 2021, 1 (2021).
922
+ [14] M. Barth´elemy, Physics Reports 499, 1 (2011).
923
+ [15] R. G. Morris and M. Barthelemy, Physical Review Let-
924
+ ters 109 (2012), 10.1103/physrevlett.109.128703.
925
+ [16] M. T. Gastner and M. E. J. Newman, Physical Review
926
+ E 74 (2006), 10.1103/physreve.74.016117.
927
+ [17] M. Barth´elemy and A. Flammini, Journal of Statisti-
928
+ cal Mechanics: Theory and Experiment 2006, L07002
929
+ (2006).
930
+ [18] R. Louf, P. Jensen, and M. Barthelemy, Proceedings of
931
+ the National Academy of Sciences 110, 8824 (2013).
932
+ [19] J. R. Banavar, A. Maritan, and A. Rinaldo, Nature 399,
933
+ 130 (1999).
934
+ [20] A. Pei, F. Xiao, S. Yu, and L. Li, Scientific Reports 12
935
+ (2022), 10.1038/s41598-022-12053-3.
936
+ [21] R. Gallotti and M. Barthelemy, Scientific Data 2 (2015),
937
+ 10.1038/sdata.2014.56.
938
+ [22] C. Roth, S. M. Kang, M. Batty,
939
+ and M. Barthelemy,
940
+ Journal of The Royal Society Interface 9, 2540 (2012).
941
+ [23] A. Tero, S. Takagi, T. Saigusa, K. Ito, D. P. Bebber,
942
+ M. D. Fricker, K. Yumiki, R. Kobayashi, and T. Naka-
943
+ gaki, Science 327, 439 (2010).
944
+ [24] R. Louf and M. Barthelemy, Scientific Reports 4 (2014),
945
+ 10.1038/srep05561.
946
+ [25] A. A. Ibrahim, A. Lonardi, and C. D. Bacco, Algorithms
947
+ 14, 189 (2021).
948
+ [26] X. Zhang,
949
+ A. Adamatzky,
950
+ F. T. Chan,
951
+ Y. Deng,
952
+ H. Yang, X.-S. Yang, M.-A. I. Tsompanas, G. C. Sir-
953
+ akoulis, and S. Mahadevan, Scientific Reports 5 (2015),
954
+ 10.1038/srep10794.
955
+ [27] M. Szell, S. Mimar, T. Perlman, G. Ghoshal,
956
+ and
957
+ R. Sinatra, Scientific Reports 12 (2022), 10.1038/s41598-
958
+ 022-10783-y.
959
+ [28] C. P. Birch, S. P. Oom, and J. A. Beecham, Ecological
960
+ Modelling 206, 347 (2007).
961
+ [29] R.
962
+ Gallotti,
963
+ A.
964
+ Bazzani,
965
+ S.
966
+ Rambaldi,
967
+ and
968
+ M.
969
+ Barthelemy,
970
+ Nature
971
+ Communications
972
+ 7
973
+ (2016),
974
+ 10.1038/ncomms12600.
975
+ [30] Y. Ren, M. Ercsey-Ravasz, P. Wang, M. C. Gonz´alez,
976
+ and Z. Toroczkai, Nature Communications 5 (2014),
977
+ 10.1038/ncomms6347.
978
+ [31] A. Wilson, Transportation Research 9, 167 (1975).
979
+ [32] M. P. Viana, E. Strano, P. Bordin, and M. Barthelemy,
980
+ Scientific Reports 3 (2013), 10.1038/srep03495.
981
+ [33] M. Newman, Networks (Oxford University Press, 2010).
982
+ [34] S. E. Dreyfus and R. A. Wagner, Networks 1, 195 (1971).
983
+ [35] M. Brazil, R. L. Graham, D. A. Thomas, and M. Zachari-
984
+ asen, Archive for History of Exact Sciences 68, 327
985
+ (2013).
986
+ [36] T. Kavitha, K. Mehlhorn, D. Michail, and K. E. Paluch,
987
+ Algorithmica 52, 333 (2007).
988
+ [37] E. Katifori, G. J. Sz¨oll˝osi,
989
+ and M. O. Magnasco,
990
+ Physical Review Letters 104 (2010), 10.1103/phys-
991
+ revlett.104.048704.
992
+
993
+ 9
994
+ [38] D. Piovani, C. Molinero, and A. Wilson, PLOS ONE 12,
995
+ e0185787 (2017).
996
+ [39] C. A. Hidalgo, E. Casta˜ner,
997
+ and A. Sevtsuk, Habitat
998
+ International 106, 102205 (2020).
999
+ [40] OpenStreetMap
1000
+ contributors,
1001
+ “Planet
1002
+ dump
1003
+ re-
1004
+ trieved
1005
+ from
1006
+ https://planet.osm.org
1007
+ ,”
1008
+ https:
1009
+ //www.openstreetmap.org (2017).
1010
+
1011
+ 10
1012
+ SUPPLEMENTARY MATERIAL FOR “EMERGENCE OF COMPLEX NETWORKS TOPOLOGIES
1013
+ FROM FLOW-WEIGHTED OPTIMIZATION OF NETWORK EFFICIENCY”
1014
+ S1.
1015
+ APPLICATION ON SIMPLE NETWORK SUBSTRATES
1016
+ In this Section we report more detailed results for the optimization of simple network substrates that was discussed
1017
+ in the Main text, specifically for the triangular (HEX) lattice (planar dual of the hexagonal tiling of space) and
1018
+ Erd˝os-R´enyi (ER) network. The aim is to explore the resulting topologies that emerge both in spatial and non spatial
1019
+ networks when simple probabilities are taken into consideration (see Main text).
1020
+ S1.1.
1021
+ HEX Lattice and ER Network
1022
+ Results of model application on the triangular (HEX) lattice and results of model application on an Erd˝os-R´enyi
1023
+ (ER) non-spatial network. For the non-spatial generative model, an ensemble of 20 networks with N = 30 nodes and
1024
+ edge probability ρ = 0.2 is generated. For the spatial case, the optimization process is repeated 20 times for each
1025
+ value of β on the triangular lattice with N = 37 nodes.
1026
+ SM Fig. 1 and SM Fig. 2 show boxplots for the distributions of metrics computed on the cycle basis in panels A
1027
+ on both figures. In panels B, samples of the optimized network states are shown for different values of β.
1028
+
1029
+ 11
1030
+ Hex Lattice
1031
+ N = 37
1032
+ A
1033
+ B
1034
+ Supplementary Figure S1.
1035
+ Cycle basis properties and samples - HEX lattice. Boxplot statistics for the cycle basis
1036
+ dimension and optimal network samples for triangular lattice across different β values.
1037
+
1038
+ Average Loop Size vs β
1039
+ 10
1040
+ 8 -
1041
+ edges]
1042
+ 6
1043
+ #
1044
+ Size
1045
+ 4 :
1046
+ 2
1047
+ 0
1048
+ 0.0001 0.001
1049
+ 0.01
1050
+ 0.1
1051
+ 0.5
1052
+ 1.0
1053
+ 1.5
1054
+ 2.0
1055
+ 3.0
1056
+ 4.0
1057
+ 5.0
1058
+ 10.0
1059
+ 15.0100.0
1060
+ Cycle Basis Dimension vs β
1061
+ Dimension [# cycles]
1062
+ 10.0
1063
+ 7.5
1064
+ 5.0
1065
+ 2.5
1066
+ 0.0
1067
+ 0.0001 0.001
1068
+ 0.01
1069
+ 0.1
1070
+ 0.5
1071
+ 1.0
1072
+ 1.5
1073
+ 2.0
1074
+ 3.0
1075
+ 4.0
1076
+ 5.0
1077
+ 10.0
1078
+ 15.0
1079
+ 100.0
1080
+ βB: 10.0
1081
+ Yβ: 0.001
1082
+ 7β: 1.0
1083
+ YYβ: 4.0β: 0.1β: 2.0
1084
+ 蒸β: 5.0
1085
+ 茶β: 0.5β: 3.012
1086
+ ER Network
1087
+ N = 30, ρ = 0.2
1088
+ A
1089
+ B
1090
+ Supplementary Figure S2.
1091
+ Cycle basis properties and samples - ER network. Boxplot statistics for the cycle basis
1092
+ dimension and optimal network samples for an Erdos-Renyi (ER) network across different β values.
1093
+
1094
+ Average Loop Size vs β
1095
+ 8
1096
+ 6
1097
+ 5
1098
+ #1
1099
+ 4
1100
+ Size
1101
+ S
1102
+ 2
1103
+ 1
1104
+ 0
1105
+ 0.0001 0.001
1106
+ 0.01
1107
+ 0.1
1108
+ 0.5
1109
+ 1.0
1110
+ 1.5
1111
+ 2.0
1112
+ 3.0
1113
+ 4.0
1114
+ 5.0
1115
+ 10.0
1116
+ 15.0
1117
+ 100.0
1118
+ Cycle Basis Dimension vs β
1119
+ Dimension [# cycles]
1120
+ 15
1121
+ 10
1122
+ 5
1123
+ 0
1124
+ 0.0001 0.001
1125
+ 0.01
1126
+ 0.1
1127
+ 0.5
1128
+ 1.0
1129
+ 1.5
1130
+ 2.0
1131
+ 3.0
1132
+ 4.0
1133
+ 5.0
1134
+ 10.0
1135
+ 15.0
1136
+ 100.0
1137
+ ββ: 100.0B: 0.0001β: 1.5β: 5.0β: 0.01β: 3.0β: 10.0β: 0.5β: 4.013
1138
+ S2.
1139
+ LEAVES PATTERNS
1140
+ Here we show an application of the model to reproduce leaf venation patterns [37]. A single attracting node (single
1141
+ target or sink) is considered at one of the perimeter nodes of the lattice, and the substrate is optimized using all the
1142
+ other nodes as sources.
1143
+ Supplementary Figure S3.
1144
+ Optimal networks resembling leaves’ veins patterns. Optimal state when a single target
1145
+ (orange node) in a single spatial extremity is considered. Efficiency is optimized for all nodes in space to reach the target. The
1146
+ resulting optimal state resembles tree-like patterns found in leaves (A), while the distribution of edges weights is shown in (B).
1147
+
1148
+ B
1149
+ beta is:1.0
1150
+ 100
1151
+ 10-1
1152
+ P(We)
1153
+ 102.
1154
+ 100
1155
+ 101
1156
+ We14
1157
+ S3.
1158
+ APPLICATION ON LONDON TUBE NETWORK
1159
+ In this section we discuss more in detail the application of the model on the Greater London Area urban morphology.
1160
+ We show that the London subway network spatial properties can be recovered by means of the optimal network state.
1161
+ In particular, to simulate a real transportation system, an upper bound on edges travel velocity is imposed.
1162
+ S3.1.
1163
+ Data Integration: Census and OpenStreetMap Data
1164
+ To extend the model to real urban scenarios, we gather data regarding the urban morphological structure from
1165
+ OpenStreetMap (OSM) [40] and Census.
1166
+ 1.
1167
+ OSM Data
1168
+ To model the attractiveness of nodes in the lattice, we use the density of Points Of Interest in the urban space. We
1169
+ use amenities [39] points in OSM as a proxy for Points Of Interest (POIs), and a node j attractiveness (Wj) therefore
1170
+ encodes the density of amenities in space.
1171
+ The bounding box for Greater London Area (GLA) is obtained via OSM (https://wiki.openstreetmap.org/
1172
+ wiki/Bounding_Box) and POI densities are recovered inside this box. Specifically we use the following amenities
1173
+ sub-categories:
1174
+ ’cafe’,’college’,’library’,’school’,’university’,’kindergarten’,’restaurant’,’pub’, ’fast
1175
+ food’,’bar’,’bank’,’dentist’,’pharmacy’,’hospital’,’clinic’,’doctor’,’arts
1176
+ centre’,’cinema’,’community centre’,’police’,’post office’,’marketplace’
1177
+ In Fig. S4 the amenities points recovered from OSM are plotted in the bounding box.
1178
+ Lat
1179
+ Lon
1180
+ 51.72
1181
+ 51.25
1182
+ -0.60
1183
+ 0.35
1184
+ Map background is provided by OSM under Open Database License
1185
+ Supplementary Figure S4.
1186
+ OSM amenities query for GLA bounding box.
1187
+ Amenities retrieved from OSM
1188
+ (https://openstreetmap.org/copyright) for the bounding box defined by the following longitude [−0.6, 0.35] and latitude
1189
+ interval [51.25, 51.75].
1190
+
1191
+ tMisisenden
1192
+ Chippin
1193
+ AONB
1194
+ Amersham
1195
+ Ingatestone
1196
+ ghwycombe
1197
+ Billericay
1198
+ consfielo
1199
+ wWickto
1200
+ Basildon
1201
+ arlow
1202
+ Maidenhead
1203
+ Stanford-le
1204
+ endon
1205
+ M4
1206
+ Windsol
1207
+ Bracknel
1208
+ M2
1209
+ ton
1210
+ M20
1211
+ ghtwater
1212
+ Snodland
1213
+ Biggir
1214
+ LHET
1215
+ WestMalling
1216
+ Ma
1217
+ nam
1218
+ eafletLDataby@OpenStreetMapunderODbl15
1219
+ 2.
1220
+ Census Data
1221
+ London Wards - Census Data 2014
1222
+ A
1223
+ B
1224
+ C
1225
+ Supplementary Figure S5.
1226
+ Census data retrieval. A) London’s wards data from Census 2014 (https://data.london.
1227
+ gov.uk/dataset/ward-profiles-and-atlas).
1228
+ B) Points are generated in space following wards polygons with a density
1229
+ proportional to Census data. C) Points are mapped to HEX3 tiles with associated densities of points. Finally, a restriction to
1230
+ a disc is used to enforce symmetry in the distribution of nodes and to ease the computational load.
1231
+ Points are mapped to density in space by using HEX3 tiling (https://eng.uber.com/h3/) with spatial resolution
1232
+ RES = 8. This spatial discretization process via tiling is of particular relevance to map this information to the
1233
+ HEX triangular lattice model which was described in the Main text. Both Fig. S5 and Fig. S4 show how model
1234
+ information Pi and Wj are recovered from data and then mapped to tiles as in shown in panel C in Fig. S5. Tiles
1235
+ covering the urban area are then mapped to the spatial network model as nodes in the triangular lattice (see Fig. 5
1236
+ in Main text).
1237
+ For computational simplicity and to preserve the isotropy of the lattice substrate from its central point, we restrict
1238
+ our analysis to a disc centered in the London’s region with highest attractiveness, that lies approximately in the City
1239
+ Of London district. Results are robust against integration of the remaining GLA region. As the optimal edges weights
1240
+ are influenced by the traffic on the substrate, the discarded regions in the Greater London Area do not add relevant
1241
+ contributions when compared to more central regions with higher population density.
1242
+ S3.2.
1243
+ Results with distributions for traffic and bounded weights distribution
1244
+ To simulate a real system we limit upper edges weights to a fixed value w∗ = 7 ∗ winit where winit is the initial
1245
+ edges weight assigned to the network state (winit = 1.0) before optimization process, such that �
1246
+ e∈G dewe,init = C.
1247
+ In this simulation we work with β = 0.35.
1248
+
1249
+ 'LS
1250
+ 51.6
1251
+ 51.5
1252
+ 51.4
1253
+ 51.3
1254
+ .4
1255
+ 0.2
1256
+ 0.D
1257
+ 0.216
1258
+ A
1259
+ B
1260
+ Pi
1261
+ Wj
1262
+ Urban morphology mapped to
1263
+ The hexagonal lattice
1264
+ Optimal network and
1265
+ edges weights distribution
1266
+ Supplementary Figure S6.
1267
+ Mapping the data to Hex lattice + GLA optimized network. A) Urban morphology
1268
+ data mapped to the HEX tiling and then mapped to the triangular (HEX) lattice. At this step, OD matrix is generated and
1269
+ the network is then optimized. B) Resulting optimal network state with its distribution of edges weights showing a bi-modal
1270
+ shape. Only fast edges having weight larger than a threshold (we > 5) are kept to isolate the sub-graph constituted by a high
1271
+ velocity set of edges, such as a subway system.
1272
+ A
1273
+ London Metro network
1274
+ (Restricted to disc)
1275
+ Model output (pruned)
1276
+ B
1277
+ Supplementary Figure S7.
1278
+ Pruned network vs real tube. A) London subway network, restricted to the disc under
1279
+ study. B) GLA model output, limited to the nodes and edges which have a weight we > 5.0, where this value was chosen
1280
+ from the previously plotted {we} distribution as a threshold to separate the second “mode” with fast edges. This “discretized”
1281
+ network is the model on which statistical measures are performed.
1282
+
1283
+ Model network0.5 .
1284
+ 0.4
1285
+ P(we)
1286
+ 0.3 -
1287
+ 0.2
1288
+ 0.1
1289
+ 0.0
1290
+ 0
1291
+ 1
1292
+ 2
1293
+ 4
1294
+ 5
1295
+ 6
1296
+ 7
1297
+ WeCG Real network
1298
+ Model pruned (2nd mode)17
1299
+ S4.
1300
+ SCALING OF NETWORK STATIONS
1301
+ The spatial organization of the transportation network can be inspected by taking into consideration the number
1302
+ of nodes’ stations N(r) up to a distance r from the barycenter of stations. In the Main text we analyze the scaling
1303
+ regimes for the simulated and real London Tube network [21] following the analysis employed in [22]. Specifically,
1304
+ they obtain functional forms for the scaling properties of N(r) for different distances from the barycenter. We report
1305
+ here the scalings obtained in [22]:
1306
+ N(r) ∼
1307
+
1308
+
1309
+
1310
+
1311
+
1312
+ ρCπr2
1313
+ for r < rC
1314
+ ρCπr2
1315
+ C + NB
1316
+ � r
1317
+ rC
1318
+ dr
1319
+ ∆(r)
1320
+ for
1321
+ rC < r < rmax
1322
+ N
1323
+ for r > rmax
1324
+ (S1)
1325
+ Specifically, they show that in the large distance regime (r > rC and r < rmax ) the number of stations can be
1326
+ approximated by adding the integral curve NB
1327
+ � r
1328
+ rC
1329
+ dr
1330
+ ∆(r) to a constant term. In [22] it is also reported that the large
1331
+ distance behavior can be also, in general, approximated by a scaling law. Therefore we plot here the computation of
1332
+ the integral curve against rescaled values of r, and show that in that regime it can be approximated by a power law,
1333
+ and its exponent can be obtained via a linear fit. The N(r) curve in the Main text was computed on the real London
1334
+ Tube restricted to the disc area as presented in SM Fig. 7. The associated exponent γ = 1.25 ± 0.02 was used in
1335
+ the Main text to highlight the secondary scaling for r > rC. As discussed in Ref. [22], due to ∆(r) in SM Eq. S1
1336
+ being often noisy, this scaling property is not often well reproduced in empirical networks, and it is often restricted
1337
+ to a small region of r values. In Fig. 6 of the Main text, we see that the secondary scaling with the exponent γ
1338
+ approximates N(r) in a limited interval for r > rC. The value of the exponent in the branches region is expected to
1339
+ be γ < 2.0 [22] and our result is consistent with this.
1340
+ Supplementary Figure S8.
1341
+ Scaling of the integral curve. Log-Log plot of the integral curve and its linear fit. With
1342
+ the associated value of the γ exponent approximated, to highlight a secondary scaling region as mentioned in [22].
1343
+
1344
+ Scaling of the integral curve - fit with y = 1.253
1345
+ Power law ry
1346
+ Integral Curve
1347
+ 102
1348
+ 6 × 101
1349
+ 4 × 101
1350
+ 3 × 101
1351
+ 100
1352
+ 2 × 100
1353
+ 3 × 100
1354
+ r
1355
+ Tc18
1356
+ [1] M. Batty, Science 319, 769 (2008).
1357
+ [2] M. Barthelemy, Nature Reviews Physics 1, 406 (2019).
1358
+ [3] L. Bettencourt and G. West, Nature 467, 912 (2010).
1359
+ [4] W. Pan, G. Ghoshal, C. Krumme, M. Cebrian, and A. Pentland, Nature Communications 4 (2013), 10.1038/ncomms2961.
1360
+ [5] E. Arcaute, E. Hatna, P. Ferguson, H. Youn, A. Johansson,
1361
+ and M. Batty, Journal of The Royal Society Interface 12,
1362
+ 20140745 (2015).
1363
+ [6] L. M. A. Bettencourt, Science 340, 1438 (2013).
1364
+ [7] A. Bassolas, H. Barbosa-Filho, B. Dickinson, X. Dotiwalla, P. Eastham, R. Gallotti, G. Ghoshal, B. Gipson, S. A. Hazarie,
1365
+ H. Kautz, O. Kucuktunc, A. Lieber, A. Sadilek, and J. J. Ramasco, Nature Communications 10 (2019), 10.1038/s41467-
1366
+ 019-12809-y.
1367
+ [8] L. M. A. Bettencourt, Science Advances 6 (2020), 10.1126/sciadv.aat8812.
1368
+ [9] M. Schl¨apfer, L. Dong, K. O’Keeffe, P. Santi, M. Szell, H. Salat, S. Anklesaria, M. Vazifeh, C. Ratti,
1369
+ and G. B. West,
1370
+ Nature 593, 522 (2021).
1371
+ [10] H. Barbosa, M. Barthelemy, G. Ghoshal, C. R. James, M. Lenormand, T. Louail, R. Menezes, J. J. Ramasco, F. Simini,
1372
+ and M. Tomasini, Physics Reports 734, 1 (2018).
1373
+ [11] L. Alessandretti, L. G. N. Orozco, M. Saberi, M. Szell, and F. Battiston, Environment and Planning B: Urban Analytics
1374
+ and City Science , 239980832211081 (2022).
1375
+ [12] M. Lee, H. Barbosa, H. Youn, P. Holme, and G. Ghoshal, Nature Communications 8 (2017), 10.1038/s41467-017-02374-7.
1376
+ [13] R. Gallotti, P. Sacco, and M. D. Domenico, Complexity 2021, 1 (2021).
1377
+ [14] M. Barth´elemy, Physics Reports 499, 1 (2011).
1378
+ [15] R. G. Morris and M. Barthelemy, Physical Review Letters 109 (2012), 10.1103/physrevlett.109.128703.
1379
+ [16] M. T. Gastner and M. E. J. Newman, Physical Review E 74 (2006), 10.1103/physreve.74.016117.
1380
+ [17] M. Barth´elemy and A. Flammini, Journal of Statistical Mechanics: Theory and Experiment 2006, L07002 (2006).
1381
+ [18] R. Louf, P. Jensen, and M. Barthelemy, Proceedings of the National Academy of Sciences 110, 8824 (2013).
1382
+ [19] J. R. Banavar, A. Maritan, and A. Rinaldo, Nature 399, 130 (1999).
1383
+ [20] A. Pei, F. Xiao, S. Yu, and L. Li, Scientific Reports 12 (2022), 10.1038/s41598-022-12053-3.
1384
+ [21] R. Gallotti and M. Barthelemy, Scientific Data 2 (2015), 10.1038/sdata.2014.56.
1385
+ [22] C. Roth, S. M. Kang, M. Batty, and M. Barthelemy, Journal of The Royal Society Interface 9, 2540 (2012).
1386
+ [23] A. Tero, S. Takagi, T. Saigusa, K. Ito, D. P. Bebber, M. D. Fricker, K. Yumiki, R. Kobayashi, and T. Nakagaki, Science
1387
+ 327, 439 (2010).
1388
+ [24] R. Louf and M. Barthelemy, Scientific Reports 4 (2014), 10.1038/srep05561.
1389
+ [25] A. A. Ibrahim, A. Lonardi, and C. D. Bacco, Algorithms 14, 189 (2021).
1390
+ [26] X. Zhang, A. Adamatzky, F. T. Chan, Y. Deng, H. Yang, X.-S. Yang, M.-A. I. Tsompanas, G. C. Sirakoulis,
1391
+ and
1392
+ S. Mahadevan, Scientific Reports 5 (2015), 10.1038/srep10794.
1393
+ [27] M. Szell, S. Mimar, T. Perlman, G. Ghoshal, and R. Sinatra, Scientific Reports 12 (2022), 10.1038/s41598-022-10783-y.
1394
+ [28] C. P. Birch, S. P. Oom, and J. A. Beecham, Ecological Modelling 206, 347 (2007).
1395
+ [29] R. Gallotti, A. Bazzani, S. Rambaldi, and M. Barthelemy, Nature Communications 7 (2016), 10.1038/ncomms12600.
1396
+ [30] Y. Ren, M. Ercsey-Ravasz,
1397
+ P. Wang,
1398
+ M. C. Gonz´alez,
1399
+ and Z. Toroczkai, Nature Communications 5 (2014),
1400
+ 10.1038/ncomms6347.
1401
+ [31] A. Wilson, Transportation Research 9, 167 (1975).
1402
+ [32] M. P. Viana, E. Strano, P. Bordin, and M. Barthelemy, Scientific Reports 3 (2013), 10.1038/srep03495.
1403
+ [33] M. Newman, Networks (Oxford University Press, 2010).
1404
+ [34] S. E. Dreyfus and R. A. Wagner, Networks 1, 195 (1971).
1405
+ [35] M. Brazil, R. L. Graham, D. A. Thomas, and M. Zachariasen, Archive for History of Exact Sciences 68, 327 (2013).
1406
+ [36] T. Kavitha, K. Mehlhorn, D. Michail, and K. E. Paluch, Algorithmica 52, 333 (2007).
1407
+ [37] E. Katifori, G. J. Sz¨oll˝osi, and M. O. Magnasco, Physical Review Letters 104 (2010), 10.1103/physrevlett.104.048704.
1408
+ [38] D. Piovani, C. Molinero, and A. Wilson, PLOS ONE 12, e0185787 (2017).
1409
+ [39] C. A. Hidalgo, E. Casta˜ner, and A. Sevtsuk, Habitat International 106, 102205 (2020).
1410
+ [40] OpenStreetMap contributors, “Planet dump retrieved from https://planet.osm.org ,” https://www.openstreetmap.org
1411
+ (2017).
1412
+
CdFAT4oBgHgl3EQftB4M/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
CtE1T4oBgHgl3EQf9wbT/content/tmp_files/2301.03561v1.pdf.txt ADDED
@@ -0,0 +1,2536 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ IEEE INTERNET OF THINGS JOURNAL
2
+ 1
3
+ Ancilia: Scalable Intelligent Video Surveillance for
4
+ the Artificial Intelligence of Things
5
+ Armin Danesh Pazho∗, Student Member, IEEE, Christopher Neff∗, Student Member, IEEE, Ghazal Alinezhad
6
+ Noghre, Student Member, IEEE, Babak Rahimi Ardabili, Student Member, IEEE, Shanle Yao, Mohammadreza
7
+ Baharani, Member, IEEE, Hamed Tabkhi, Member, IEEE
8
+ Abstract—With the advancement of vision-based artificial in-
9
+ telligence, the proliferation of the Internet of Things connected
10
+ cameras, and the increasing societal need for rapid and eq-
11
+ uitable security, the demand for accurate real-time intelligent
12
+ surveillance has never been higher. This article presents Ancilia,
13
+ an end-to-end scalable, intelligent video surveillance system for
14
+ the Artificial Intelligence of Things. Ancilia brings state-of-the-
15
+ art artificial intelligence to real-world surveillance applications
16
+ while respecting ethical concerns and performing high-level
17
+ cognitive tasks in real-time. Ancilia aims to revolutionize the
18
+ surveillance landscape, to bring more effective, intelligent, and
19
+ equitable security to the field, resulting in safer and more secure
20
+ communities without requiring people to compromise their right
21
+ to privacy.
22
+ Index Terms—Surveillance, artificial intelligence, IoT, com-
23
+ puter vision, application, real-world, real-time, edge, anomaly.
24
+ I. INTRODUCTION
25
+ There is a growing need for effective and efficient surveil-
26
+ lance technologies that can be deployed to protect our cities,
27
+ people, and infrastructure. For example, in Itaewon, South
28
+ Korea, a holiday celebration left over 150 dead due to severe
29
+ overcrowding, with many blaming the tragedy on careless
30
+ government oversight [1]. In Moore County, North Carolina,
31
+ directed attacks against two power substations left over 45,000
32
+ residents without power for days as technicians rushed to
33
+ restore power and authorities struggled to find the source of
34
+ the attacks [2]. With enough forewarning through smart video
35
+ surveillance, they could have been prevented.
36
+ With the recent emergence of the Artificial Intelligence
37
+ of Things (AIoT), some surveillance solution providers have
38
+ started adding basic forms of artificial intelligence to their
39
+ systems. However, their methods are still naive and unable
40
+ to enhance security in a truly meaningful way [3]. This is
41
+ because, while a lot of research is conducted on tasks that
42
+ would benefit surveillance systems, most works focus on
43
+ algorithmic improvements in a lab environment instead of
44
+ paying attention to factors that are prevalent in real-world
45
+ scenarios [4], [5]. Most research focuses on a single algorithm
46
+ and how to tweak it to get the best possible results on readily
47
+ available datasets that often do not reflect a real surveillance
48
+ environment. Few works explore how different algorithms af-
49
+ fect the performance of other downstream algorithms in multi-
50
+ The authors are with the Electrical and Computer Engineering Department,
51
+ The University of North Carolina at Charlotte, Charlotte, NC, 28223 USA.
52
+ {adaneshp, cneff1, galinezh, brahimia, mbaharan, htabkhiv}@uncc.edu
53
+ ∗ Corresponding authors have equal contribution.
54
+ algorithm systems. Few still explore the effects of noise (both
55
+ data derived and the system produced) in end-to-end accuracy.
56
+ Beyond this, real-world intelligent surveillance necessitates
57
+ real-time performance. The cognitive abilities of advanced
58
+ artificial intelligence are only helpful if they can be provided
59
+ to security personnel quickly enough to take appropriate action
60
+ before it is too late.
61
+ EdgeN-1
62
+ Camera(s)
63
+ C0
64
+ C1
65
+ CN-1
66
+ Camera(s)
67
+ C0
68
+ C1
69
+ CN-1
70
+ Local
71
+ Node(s)
72
+ L0
73
+ L1
74
+ LN-1
75
+ Local
76
+ Node(s)
77
+ L0
78
+ L1
79
+ LN-1
80
+ Global
81
+ Node
82
+ Global
83
+ Node
84
+ EdgeN-1
85
+ Camera(s)
86
+ C0
87
+ C1
88
+ CN-1
89
+ Local
90
+ Node(s)
91
+ L0
92
+ L1
93
+ LN-1
94
+ Global
95
+ Node
96
+ EdgeN-1
97
+ Camera(s)
98
+ C0
99
+ C1
100
+ CN-1
101
+ Local
102
+ Node(s)
103
+ L0
104
+ L1
105
+ LN-1
106
+ Global
107
+ Node
108
+ EdgeN-1
109
+ Camera(s)
110
+ C0
111
+ C1
112
+ CN-1
113
+ Local
114
+ Node(s)
115
+ L0
116
+ L1
117
+ LN-1
118
+ Global
119
+ Node
120
+ Edge0
121
+ Camera(s)
122
+ C0
123
+ C1
124
+ CN-1
125
+ Camera(s)
126
+ C0
127
+ C1
128
+ CN-1
129
+ Local Node(s)
130
+ L0
131
+ L1
132
+ LN-1
133
+ Local Node(s)
134
+ L0
135
+ L1
136
+ LN-1
137
+ Global Node
138
+ Global Node
139
+ Edge0
140
+ Camera(s)
141
+ C0
142
+ C1
143
+ CN-1
144
+ Local Node(s)
145
+ L0
146
+ L1
147
+ LN-1
148
+ Global Node
149
+ Edge0
150
+ Camera(s)
151
+ C0
152
+ C1
153
+ CN-1
154
+ Local Node(s)
155
+ L0
156
+ L1
157
+ LN-1
158
+ Global Node
159
+ Edge0
160
+ Camera(s)
161
+ C0
162
+ C1
163
+ CN-1
164
+ Local Node(s)
165
+ L0
166
+ L1
167
+ LN-1
168
+ Global Node
169
+ Edge0
170
+ Camera(s)
171
+ C0
172
+ C1
173
+ CN-1
174
+ Camera(s)
175
+ C0
176
+ C1
177
+ CN-1
178
+ Local Node(s)
179
+ L0
180
+ L1
181
+ LN-1
182
+ Local Node(s)
183
+ L0
184
+ L1
185
+ LN-1
186
+ Global Node
187
+ Global Node
188
+ Edge0
189
+ Camera(s)
190
+ C0
191
+ C1
192
+ CN-1
193
+ Local Node(s)
194
+ L0
195
+ L1
196
+ LN-1
197
+ Global Node
198
+ Edge0
199
+ Camera(s)
200
+ C0
201
+ C1
202
+ CN-1
203
+ Local Node(s)
204
+ L0
205
+ L1
206
+ LN-1
207
+ Global Node
208
+ Edge0
209
+ Camera(s)
210
+ C0
211
+ C1
212
+ CN-1
213
+ Local Node(s)
214
+ L0
215
+ L1
216
+ LN-1
217
+ Global Node
218
+ Edge0
219
+ Camera(s)
220
+ C0
221
+ C1
222
+ CN-1
223
+ Camera(s)
224
+ C0
225
+ C1
226
+ CN-1
227
+ Local
228
+ Node(s)
229
+ L0
230
+ L1
231
+ LN-1
232
+ Local
233
+ Node(s)
234
+ L0
235
+ L1
236
+ LN-1
237
+ Global
238
+ Node
239
+ Global
240
+ Node
241
+ Edge0
242
+ Camera(s)
243
+ C0
244
+ C1
245
+ CN-1
246
+ Local
247
+ Node(s)
248
+ L0
249
+ L1
250
+ LN-1
251
+ Global
252
+ Node
253
+ Edge0
254
+ Camera(s)
255
+ C0
256
+ C1
257
+ CN-1
258
+ Local
259
+ Node(s)
260
+ L0
261
+ L1
262
+ LN-1
263
+ Global
264
+ Node
265
+ Edge0
266
+ Camera(s)
267
+ C0
268
+ C1
269
+ CN-1
270
+ Local
271
+ Node(s)
272
+ L0
273
+ L1
274
+ LN-1
275
+ Global
276
+ Node
277
+ Cloud
278
+ Service(s)
279
+ User
280
+ Device(s)
281
+ D0
282
+ D1
283
+ DN-1
284
+ User
285
+ Device(s)
286
+ D0
287
+ D1
288
+ DN-1
289
+ Fig. 1. Conceptual overview of Ancilia.
290
+ In this article, we present Ancilia, the first end-to-end
291
+ scalable, intelligent video surveillance system able to perform
292
+ high-level cognitive tasks in real-time while achieving state-
293
+ of-the-art results. Ancilia takes advantage of the prevalence
294
+ of cameras in the Internet of Things (IoT) and uses localized
295
+ servers and existing cameras, facilitating processing on the
296
+ edge without the need for additional infrastructure upgrades.
297
+ Shown in Fig. 1, Ancilia exists within three logical and
298
+ physical segments: the edge, the cloud, and user devices.
299
+ The edge uses a plethora of advanced artificial intelligence
300
+ algorithms processing data received from cameras to facilitate
301
+ intelligent security. Using a single workstation to perform edge
302
+ processing, Ancilia can monitor up to 4 cameras in real-time at
303
+ 30 FPS, or up to 8 cameras at 15 FPS, in scenarios with both
304
+ medium and heavy crowd density. Ancilia performs high-level
305
+ cognitive tasks (i.e. action recognition, anomaly detection)
306
+ with ∼ 1% deviation in accuracy from current SotA.
307
+ Ancilia is designed from the ground up to respect the pri-
308
+ vacy of the people and communities being surveilled. Ancilia
309
+ does not store any personally identifiable information in any
310
+ databases and does not make use of invasive artificial intelli-
311
+ gence techniques such as facial recognition or gait detection.
312
+ Ancilia strictly provides a pose and locational information for
313
+ high-level tasks (i.e. action recognition, anomaly detection),
314
+ as opposed to identity information, which is common. Ancilla
315
+ arXiv:2301.03561v1 [cs.CV] 9 Jan 2023
316
+
317
+ 0IEEE INTERNET OF THINGS JOURNAL
318
+ 2
319
+ looks at what a person is doing, not who they are. This allows
320
+ Ancilia to act as a buffer to help remove biases based on
321
+ race, ethnicity, gender, age, and socio-economic factors, which
322
+ can lead to a reduction in the unnecessary conflict between
323
+ authorities and marginalized communities that has become
324
+ increasingly problematic. After data is processed on edge and
325
+ sent to the cloud for communication and service management
326
+ with user devices. A mobile app allows user devices to receive
327
+ data from the cloud, including alerts when potential security
328
+ concerns arise.
329
+ In summary, this article has the following contributions:
330
+ • We present Ancilia, the first end-to-end scalable real-
331
+ world intelligent video surveillance system capable of
332
+ performing high-level cognitive tasks in real-time while
333
+ achieving SotA accuracy.
334
+ • We analyze the ethical concerns of intelligent video
335
+ surveillance, both from a privacy and fairness perspective,
336
+ and illustrate how Ancilia’s design is purpose-built to
337
+ address them.
338
+ • We perform an end-to-end empirical evaluation of Ancilia
339
+ using two high-level cognitive tasks directly related to
340
+ intelligent surveillance, action recognition, and anomaly
341
+ detection, investigating the trade-off in accuracy required
342
+ to achieve real-time performance.
343
+ • We perform an exhaustive system-level evaluation of
344
+ Ancilia’s real-time performance and scalability across
345
+ different classes of hardware and increasing scenario
346
+ intensities, displaying how Ancilia is able to meet real-
347
+ time intelligent security needs in different contexts.
348
+ II. RELATED WORK
349
+ There has been a plethora of research regarding the use
350
+ of artificial intelligence for video surveillance [4], [6]–[8].
351
+ [9] proposes the use of region proposal based optical flow to
352
+ suppress background noise and a bidirectional Bayesian state
353
+ transition strategy to model motion uncertainty to enhance
354
+ spatio-temporal feature representations for the detection of
355
+ salient objects in surveillance videos. [10] proposes the use
356
+ of a person detector, tracking algorithm, and mask classifier
357
+ for tracking pedestrians through surveillance video streams.
358
+ In [4], it is determined that in order to address the latency
359
+ concerns of real-time video surveillance, a shift towards edge
360
+ computing is needed. Nikouei et al. [11]–[13] explore the
361
+ feasibility of using low-power edge devices to perform object
362
+ detection and tracking in surveillance scenarios. They argue
363
+ that in worst case 5 FPS is high enough throughput for tracking
364
+ humans in surveillance applications, and as such computation
365
+ can be pushed to the edge. However, their results show that
366
+ even light weight convolutional neural networks can prove
367
+ problematic for low-power devices, often reducing throughput
368
+ below the 5 FPS threshold. [14] proposes a system using low-
369
+ power embedded GPUs to perform detection, tracking, path
370
+ prediction, pose estimation, and multi-camera re-identification
371
+ in a surveillance environment, while placing a focus on real-
372
+ time execution and the privacy of tracked pedestrians. [15]
373
+ proposes a similar system, focusing solely on object detec-
374
+ tion, tracking, and multi-camera re-identification to increase
375
+ throughput. [16] proposes using a combination of lightweight
376
+ object detection models on the edge and more computation-
377
+ ally expensive models in the cloud, splitting computation
378
+ between the two to provide real-time video surveillance in
379
+ a construction site environment. [17] proposes the use of
380
+ background detection, vehicle detection, and kalman filter [18]
381
+ based tracking for parking lot surveillance and determining lot
382
+ occupancy. [19] proposes a system that uses object detection,
383
+ person tracking, scene segmentation, and joint trajectory and
384
+ activity prediction for pedestrians in a surveillance setting.
385
+ The future of intelligent surveillance is heading towards
386
+ systems able to perform high-level cognitive tasks. A recent
387
+ survey focusing on real-world video surveillance [4] asserts
388
+ that while the domain of video surveillance is comprised
389
+ of understanding stationary object, vehicles, individuals, and
390
+ crowds, the ability to determine when anomalous events oc-
391
+ cur is paramount for intelligent surveillance systems. Other
392
+ research has supported this assertion [6]. [20] utilizes the
393
+ Infinite Hidden Markov Model and Bayesian Nonparametric
394
+ Factor Analysis to find patterns in video streams and detect
395
+ abnormal events. [21] proposes active learning and fuzzy
396
+ aggregation to learn what constitutes an anomaly continually
397
+ over time, adapting the scenarios not seen in standard datasets.
398
+ [22] proposes a system to detect suspicious behaviors in a
399
+ mall surveillance setting, using lightweight algorithms such
400
+ as segmentation, blob fusion, and kalman filter based tracking
401
+ [18]. AnomalyNet [23] is a recently proposed recurrent neutral
402
+ network with adaptive iterative hard-thresholding and long
403
+ short-term memory that works directly off pixel information to
404
+ eliminate background noise, capture motion, and learn sparse
405
+ representation and dictionary to perform anomaly detection in
406
+ video surveillance.
407
+ III. ETHICAL CONCERNS
408
+ Video surveillance has always been associated with social
409
+ and ethical concerns, whether in traditional form or more
410
+ recent intelligent formats. Respecting citizens’ privacy and
411
+ autonomy while improving public safety and security are the
412
+ most well-known and enduring ethical issues in this context
413
+ [24]–[27]. Developing a successful smart video surveillance
414
+ solution that addresses the public safety problem and engages
415
+ the community up to a certain level is only possible by
416
+ considering these concerns.
417
+ There is rising attention among scholars to the issue of
418
+ incorporating privacy concerns at the design level, referred to
419
+ as ”privacy by design” [28]. The source of discrimination and
420
+ privacy violation in many data-driven and AI-based systems,
421
+ such as Smart video surveillance technology, is using Personal
422
+ Identifiable Information (PII) [29], [30]. Using PII, such as
423
+ actual footage of people’s daily activities at any stage of the
424
+ technology, can increase the risk of privacy violation. There is
425
+ a long-lasting debate on the ethical challenges of using facial
426
+ recognition technologies in different sectors and how using
427
+ this technology can result in privacy violation [31]–[34].
428
+ Avoiding facial recognition technologies does not guarantee
429
+ the system is entirely privacy persevering. Storing images of
430
+ pedestrians is another source of ethical violation. From the
431
+
432
+ IEEE INTERNET OF THINGS JOURNAL
433
+ 3
434
+ Neural
435
+ Network
436
+ Filter
437
+ Match and
438
+ Combine
439
+ Algorithm
440
+ SQL
441
+ Database
442
+ Node
443
+ Boundary
444
+ Cloud
445
+ Cloud
446
+ (C) Cloud
447
+ Node
448
+ SA
449
+ Flow
450
+ within a
451
+ Node
452
+ Flow
453
+ Between
454
+ Nodes
455
+ Communication
456
+ AR
457
+ R
458
+ IoU
459
+ Confidence
460
+ Confidence
461
+ Object
462
+ Detector
463
+ Pedestrian
464
+ Tracker
465
+ Pose
466
+ Estimator
467
+ Feature
468
+ Extractor
469
+ Downstream
470
+ Tasks
471
+ Crop
472
+ Selection
473
+ (A) Local Node N-1
474
+ F
475
+ PB
476
+ OB
477
+ T
478
+ P
479
+ PT
480
+ C
481
+ PT
482
+ E
483
+ D
484
+ IoU
485
+ Confidence
486
+ Confidence
487
+ Object
488
+ Detector
489
+ Pedestrian
490
+ Tracker
491
+ Pose
492
+ Estimator
493
+ Feature
494
+ Extractor
495
+ Downstream
496
+ Tasks
497
+ Crop
498
+ Selection
499
+ (A) Local Node N-1
500
+ F
501
+ PB
502
+ OB
503
+ T
504
+ P
505
+ PT
506
+ C
507
+ PT
508
+ E
509
+ D
510
+ IoU
511
+ Confidence
512
+ Confidence
513
+ Object
514
+ Detector
515
+ Pedestrian
516
+ Video
517
+ Stream
518
+ Tracker
519
+ Pose
520
+ Estimator
521
+ Feature
522
+ Extractor
523
+ Downstream
524
+ Tasks
525
+ Crop
526
+ Selection
527
+ (A) Local Node 0
528
+ F
529
+ PB
530
+ OB
531
+ T
532
+ P
533
+ PT
534
+ C
535
+ PT
536
+ PB
537
+ E
538
+ D
539
+ IoU
540
+ Confidence
541
+ Confidence
542
+ Object
543
+ Detector
544
+ Pedestrian
545
+ Video
546
+ Stream
547
+ Tracker
548
+ Pose
549
+ Estimator
550
+ Feature
551
+ Extractor
552
+ Downstream
553
+ Tasks
554
+ Crop
555
+ Selection
556
+ (A) Local Node 0
557
+ F
558
+ PB
559
+ OB
560
+ T
561
+ P
562
+ PT
563
+ C
564
+ PT
565
+ PB
566
+ E
567
+ D
568
+ IoU
569
+ Confidence
570
+ Confidence
571
+ Object
572
+ Detector
573
+ Pedestrian
574
+ Video
575
+ Stream
576
+ Tracker
577
+ Pose
578
+ Estimator
579
+ Feature
580
+ Extractor
581
+ Downstream
582
+ Tasks
583
+ Crop
584
+ Selection
585
+ (A) Local Node 0
586
+ F
587
+ PB
588
+ OB
589
+ T
590
+ P
591
+ PT
592
+ C
593
+ PT
594
+ PB
595
+ E
596
+ D
597
+ IoU
598
+ Confidence
599
+ Confidence
600
+ Object
601
+ Detector
602
+ Pedestrian
603
+ Video
604
+ Stream
605
+ Tracker
606
+ Pose
607
+ Estimator
608
+ Feature
609
+ Extractor
610
+ Downstream
611
+ Tasks
612
+ Crop
613
+ Selection
614
+ (A) Local Node 0
615
+ F
616
+ PB
617
+ OB
618
+ T
619
+ P
620
+ PT
621
+ C
622
+ PT
623
+ PB
624
+ E
625
+ D
626
+ Database
627
+ Global
628
+ Tracker
629
+ Statistical
630
+ Analysis
631
+ (B) Global Node
632
+ FL
633
+ FD
634
+ IDG
635
+ FL
636
+ D
637
+ I
638
+ SA
639
+ IoU
640
+ Confidence
641
+ Confidence
642
+ Object
643
+ Detector
644
+ Pedestrian
645
+ Tracker
646
+ Pose
647
+ Estimator
648
+ Feature
649
+ Extractor
650
+ High-level
651
+ Tasks
652
+ Crop
653
+ Selection
654
+ (A) Local Node 0
655
+ BBP
656
+ BBO
657
+ IDL
658
+ P
659
+ P,IDL
660
+ C
661
+ P,IDL
662
+ BBP
663
+ FL
664
+ D
665
+ IoU
666
+ Confidence
667
+ Confidence
668
+ Object
669
+ Detector
670
+ Pedestrian
671
+ Tracker
672
+ Pose
673
+ Estimator
674
+ Feature
675
+ Extractor
676
+ High-level
677
+ Tasks
678
+ Crop
679
+ Selection
680
+ (A) Local Node 0
681
+ BBP
682
+ BBO
683
+ IDL
684
+ P
685
+ P,IDL
686
+ C
687
+ P,IDL
688
+ BBP
689
+ FL
690
+ D
691
+ IoU
692
+ Confidence
693
+ Confidence
694
+ Object
695
+ Detector
696
+ Pedestrian
697
+ Tracker
698
+ Pose
699
+ Estimator
700
+ Feature
701
+ Extractor
702
+ Downstream
703
+ Tasks
704
+ Crop
705
+ Selection
706
+ (A) Local Node N-1
707
+ F
708
+ PB
709
+ OB
710
+ T
711
+ P
712
+ PT
713
+ C
714
+ PT
715
+ E
716
+ D
717
+ IoU
718
+ Confidence
719
+ Confidence
720
+ Object
721
+ Detector
722
+ Pedestrian
723
+ Video
724
+ Stream
725
+ Tracker
726
+ Pose
727
+ Estimator
728
+ Feature
729
+ Extractor
730
+ Downstream
731
+ Tasks
732
+ Crop
733
+ Selection
734
+ (A) Local Node 0
735
+ F
736
+ PB
737
+ OB
738
+ T
739
+ P
740
+ PT
741
+ C
742
+ PT
743
+ PB
744
+ E
745
+ D
746
+ IoU
747
+ Confidence
748
+ Confidence
749
+ Object
750
+ Detector
751
+ Pedestrian
752
+ Video
753
+ Stream
754
+ Tracker
755
+ Pose
756
+ Estimator
757
+ Feature
758
+ Extractor
759
+ Downstream
760
+ Tasks
761
+ Crop
762
+ Selection
763
+ (A) Local Node 0
764
+ F
765
+ PB
766
+ OB
767
+ T
768
+ P
769
+ PT
770
+ C
771
+ PT
772
+ PB
773
+ E
774
+ D
775
+ Database
776
+ Global
777
+ Tracker
778
+ Statistical
779
+ Analysis
780
+ (B) Global Node
781
+ FL
782
+ FD
783
+ IDG
784
+ FL
785
+ D
786
+ I
787
+ SA
788
+ IoU
789
+ Confidence
790
+ Confidence
791
+ Object
792
+ Detector
793
+ Pedestrian
794
+ Tracker
795
+ Pose
796
+ Estimator
797
+ Feature
798
+ Extractor
799
+ High-level
800
+ Tasks
801
+ Crop
802
+ Selection
803
+ (A) Local Node 0
804
+ BBP
805
+ BBO
806
+ IDL
807
+ P
808
+ P,IDL
809
+ C
810
+ P,IDL
811
+ BBP
812
+ FL
813
+ D
814
+ Edge
815
+ Boundary
816
+ Video
817
+ Stream
818
+ from
819
+ Camera
820
+ (D) User
821
+ Device(s)
822
+ AR
823
+ R
824
+ User
825
+ Device
826
+ Fig. 2. Ancilia algorithmic details. N local nodes are connected to a single global node on the edge. The final analyses are transferred to the cloud node to
827
+ feed the application on the user device. Multiple edges may be connected to the could, though this figure only shows one edge for clarity. BBP , BBO, IDL,
828
+ P, C, FL, D, FD, IDG, I, SA, R, and AR refer to bounding boxes for pedestrians, bounding boxes of objects, local identities, poses, person crops that
829
+ passed selection, features from the local node, data from the downstream tasks, features from the database, global identities, information from the database,
830
+ completed statistical analysis, requests from users, and requested attributes respectively.
831
+ discrimination perspective, using any form of PII can con-
832
+ tribute to the issue of marginalization in policing systems [35].
833
+ Therefore, an essential step in designing a non-discriminatory
834
+ system is to ensure the system is not dependent on PII.
835
+ This requires a specific approach toward the design of such
836
+ technology in the choice of algorithm, the type of data used,,
837
+ and the storing of such data.
838
+ Ancilia addresses this by not storing any PII or sending any
839
+ PII across the network. Such data is destroyed after it is used.
840
+ Ancilia utilizes pose-based methods for all high-level cognitive
841
+ tasks, ensuring no PII is ever used in such algorithms. This
842
+ allows for such processing to occur without any potential
843
+ for gender, ethnicity, or class-based discrimination. As such,
844
+ Ancilia is able to address the ethical challenge of privacy in
845
+ smart video surveillance systems while also addressing the
846
+ ethical issue of discrimination.
847
+ IV. ANCILIA ALGORITHMIC FRAMEWORK
848
+ The algorithmic core of Ancilia is separated into two
849
+ conceptual systems: the local nodes containing the algorithmic
850
+ pipeline of each camera and the global node that handles
851
+ all processing that requires understanding of multiple camera
852
+ perspectives. These two systems make up the algorithmic core
853
+ of Ancilia and are the basis on which all higher understanding
854
+ is achieved. A visual representation of this algorithmic core
855
+ can be seen in Fig. 2.
856
+ A. Single Camera Vision Pipeline
857
+ As seen in Fig. 2, the local algorithmic pipeline starts when
858
+ an image is extracted from the camera. The image is first run
859
+ through an object detector to locate people, vehicles, animals,
860
+ and other important objects in the scene. This is important not
861
+ only because it acts as the basis for the rest of the algorithmic
862
+ pipeline but also because it can be used for basic situational
863
+ awareness. Sometimes, just the presence of a certain object
864
+ in a scene is noteworthy, like a person in an unauthorized
865
+ location, a bag left unattended, or the presence of a firearm.
866
+ Ancilia uses YOLOv5 [36] for this purpose (however, it can be
867
+ any detector). The locational coordinates of persons are sent to
868
+ a tracker, where tracklets are created, matching each person
869
+ with their previous detections in prior images. Ancilia uses
870
+ ByteTrack [37]. The tracking allows for understanding how
871
+ a person moves throughout a scene, which is vital for many
872
+ surveillance applications. It also allows Ancilia to understand
873
+ which poses belong to which persons over time, which is
874
+ vital for many high-level tasks that provide much-needed
875
+ situational awareness. Image crops of the people detected in
876
+ the image are also sent to a human pose estimator, where two-
877
+ dimensional pose skeletons are created. Ancilia uses HRNet
878
+ [38] for extracting 2D skeletons. Using pose data for higher-
879
+ level tasks has two major benefits over simply using raw pixel
880
+ data. First, pose data is of much lower dimensionality than
881
+ pixel data, making it much less computationally expensive and
882
+ allowing the Ancilia to function in real-time. Second, pose
883
+ data contains absolutely zero identifiable information, making
884
+ it impossible for high-level tasks to form unintended biases
885
+ based on ethnicity, gender, age, or other identity-based metrics.
886
+ B. Multi-Camera Person Re-identification
887
+ While the tracker tracks people within a single camera,
888
+ locational information cannot accurately re-identify a person
889
+ across multiple cameras. For this, the same person crops that
890
+ were sent to the human pose estimator are also sent to a person
891
+ re-identification feature extractor, where an abstract feature
892
+ representation is created for each person. Only one feature
893
+ representation is created for each person over a period of 30
894
+ frames, and only when the quality of the representation can
895
+ be assured, as poor quality representations are detrimental to
896
+ accurate multi-camera person re-identification. Ancilia uses a
897
+ feature representation filtering algorithm to verify two qualities
898
+
899
+ IEEE INTERNET OF THINGS JOURNAL
900
+ 4
901
+ Pre-
902
+ Processor
903
+ Object
904
+ Detector
905
+ Tracker
906
+ A
907
+ Pose
908
+ Estimator
909
+ Task 0
910
+ Task 1
911
+ Task N-1
912
+ Feature
913
+ Extractor
914
+ Crop
915
+ Selection
916
+ A
917
+ Transfer
918
+ FL
919
+ D0
920
+ D1
921
+ DN-1
922
+
923
+
924
+
925
+
926
+ β1
927
+ β1
928
+ β1
929
+ β1
930
+ β1
931
+ β1
932
+ β1
933
+ β1
934
+ β1
935
+ β1
936
+ β1
937
+ β1
938
+ β1
939
+ β1
940
+ β1
941
+ β1
942
+ β2
943
+ β2
944
+ β1
945
+ β1
946
+ β1
947
+ β1
948
+ Local Node 0
949
+ β3
950
+ β3
951
+ β2
952
+ β2
953
+ β3
954
+ β3
955
+ β1
956
+ β1
957
+ β3
958
+ β3
959
+ β3
960
+ β3
961
+ β3
962
+ β3
963
+ β3
964
+ β3
965
+ δ0
966
+ δ0
967
+ δ1
968
+ δ1
969
+ δN-1
970
+ δN-1
971
+ δ0
972
+ δ0
973
+ δ1
974
+ δ1
975
+ δN-1
976
+ δN-1
977
+ Pre-
978
+ Processor
979
+ Object
980
+ Detector
981
+ Tracker
982
+ A
983
+ Pose
984
+ Estimator
985
+ Task 0
986
+ Task 1
987
+ Task N-1
988
+ Feature
989
+ Extractor
990
+ Crop
991
+ Selection
992
+ A
993
+ Transfer
994
+ FL
995
+ D0
996
+ D1
997
+ DN-1
998
+
999
+
1000
+ β1
1001
+ β1
1002
+ β1
1003
+ β1
1004
+ β1
1005
+ β1
1006
+ β1
1007
+ β1
1008
+ β2
1009
+ β1
1010
+ β1
1011
+ Local Node 0
1012
+ β3
1013
+ β2
1014
+ β3
1015
+ β1
1016
+ β3
1017
+ β3
1018
+ β3
1019
+ β3
1020
+ δ0
1021
+ δ1
1022
+ δN-1
1023
+ δ0
1024
+ δ1
1025
+ δN-1
1026
+ Pre-
1027
+ Processor
1028
+ Object
1029
+ Detector
1030
+ Tracker
1031
+ A
1032
+ Pose
1033
+ Estimator
1034
+ Task 0
1035
+ Task 1
1036
+ Task N-1
1037
+ Feature
1038
+ Extractor
1039
+ Crop
1040
+ Selection
1041
+ A
1042
+ Transfer
1043
+ FL
1044
+ D0
1045
+ D1
1046
+ DN-1
1047
+
1048
+
1049
+
1050
+
1051
+ β1
1052
+ β1
1053
+ β1
1054
+ β1
1055
+ β1
1056
+ β1
1057
+ β1
1058
+ β1
1059
+ β1
1060
+ β1
1061
+ β1
1062
+ β1
1063
+ β1
1064
+ β1
1065
+ β1
1066
+ β1
1067
+ β2
1068
+ β2
1069
+ β1
1070
+ β1
1071
+ β1
1072
+ β1
1073
+ Local Node 0
1074
+ β3
1075
+ β3
1076
+ β2
1077
+ β2
1078
+ β3
1079
+ β3
1080
+ β1
1081
+ β1
1082
+ β3
1083
+ β3
1084
+ β3
1085
+ β3
1086
+ β3
1087
+ β3
1088
+ β3
1089
+ β3
1090
+ δ0
1091
+ δ0
1092
+ δ1
1093
+ δ1
1094
+ δN-1
1095
+ δN-1
1096
+ δ0
1097
+ δ0
1098
+ δ1
1099
+ δ1
1100
+ δN-1
1101
+ δN-1
1102
+ Pre-
1103
+ Processor
1104
+ Object
1105
+ Detector
1106
+ Tracker
1107
+ A
1108
+ Pose
1109
+ Estimator
1110
+ Task 0
1111
+ Task 1
1112
+ Task N-1
1113
+ Feature
1114
+ Extractor
1115
+ Crop
1116
+ Selection
1117
+ A
1118
+ Transfer
1119
+ FL
1120
+ D0
1121
+ D1
1122
+ DN-1
1123
+
1124
+
1125
+ β1
1126
+ β1
1127
+ β1
1128
+ β1
1129
+ β1
1130
+ β1
1131
+ β1
1132
+ β1
1133
+ β2
1134
+ β1
1135
+ β1
1136
+ Local Node 0
1137
+ β3
1138
+ β2
1139
+ β3
1140
+ β1
1141
+ β3
1142
+ β3
1143
+ β3
1144
+ β3
1145
+ δ0
1146
+ δ1
1147
+ δN-1
1148
+ δ0
1149
+ δ1
1150
+ δN-1
1151
+ Pre-
1152
+ Processor
1153
+ Object
1154
+ Detector
1155
+ Tracker
1156
+ A
1157
+ Pose
1158
+ Estimator
1159
+ Task 0
1160
+ Task 1
1161
+ Task N-1
1162
+ Feature
1163
+ Extractor
1164
+ Crop
1165
+ Selection
1166
+ A
1167
+ Transfer
1168
+ FL
1169
+ D0
1170
+ D1
1171
+ DN-1
1172
+
1173
+
1174
+
1175
+
1176
+ β1
1177
+ β1
1178
+ β1
1179
+ β1
1180
+ β1
1181
+ β1
1182
+ β1
1183
+ β1
1184
+ β1
1185
+ β1
1186
+ β1
1187
+ β1
1188
+ β1
1189
+ β1
1190
+ β1
1191
+ β1
1192
+ β2
1193
+ β2
1194
+ β1
1195
+ β1
1196
+ β1
1197
+ β1
1198
+ Local Node 0
1199
+ β3
1200
+ β3
1201
+ β2
1202
+ β2
1203
+ β3
1204
+ β3
1205
+ β1
1206
+ β1
1207
+ β3
1208
+ β3
1209
+ β3
1210
+ β3
1211
+ β3
1212
+ β3
1213
+ β3
1214
+ β3
1215
+ δ0
1216
+ δ0
1217
+ δ1
1218
+ δ1
1219
+ δN-1
1220
+ δN-1
1221
+ δ0
1222
+ δ0
1223
+ δ1
1224
+ δ1
1225
+ δN-1
1226
+ δN-1
1227
+ Pre-
1228
+ Processor
1229
+ Object
1230
+ Detector
1231
+ Tracker
1232
+ A
1233
+ Pose
1234
+ Estimator
1235
+ Task 0
1236
+ Task 1
1237
+ Task N-1
1238
+ Feature
1239
+ Extractor
1240
+ Crop
1241
+ Selection
1242
+ A
1243
+ Transfer
1244
+ FL
1245
+ D0
1246
+ D1
1247
+ DN-1
1248
+
1249
+
1250
+ β1
1251
+ β1
1252
+ β1
1253
+ β1
1254
+ β1
1255
+ β1
1256
+ β1
1257
+ β1
1258
+ β2
1259
+ β1
1260
+ β1
1261
+ Local Node 0
1262
+ β3
1263
+ β2
1264
+ β3
1265
+ β1
1266
+ β3
1267
+ β3
1268
+ β3
1269
+ β3
1270
+ δ0
1271
+ δ1
1272
+ δN-1
1273
+ δ0
1274
+ δ1
1275
+ δN-1
1276
+ Camera
1277
+ Neural
1278
+ Network
1279
+ Algorithm
1280
+ Data
1281
+ Batched
1282
+ Data
1283
+ Sequential
1284
+ Data
1285
+ Frame
1286
+ Batching
1287
+ Object
1288
+ Batching
1289
+ Comm.
1290
+ Process
1291
+ IoT
1292
+ Flow within
1293
+ a Node
1294
+ Transfer to
1295
+ Global Node
1296
+ Frame
1297
+ Unbatching
1298
+ De-identified
1299
+ Data
1300
+ Queue
1301
+ Comm.
1302
+ Process
1303
+ Pre-
1304
+ Processor
1305
+ Object
1306
+ Detector
1307
+ Tracker
1308
+ A
1309
+ Pose
1310
+ Estimator
1311
+ Task 0
1312
+ Task 1
1313
+ Task N-1
1314
+ Feature
1315
+ Extractor
1316
+ Crop
1317
+ Selection
1318
+ A
1319
+ Transfer
1320
+ FL
1321
+ D0
1322
+ D1
1323
+ DN-1
1324
+
1325
+
1326
+
1327
+
1328
+ β1
1329
+ β1
1330
+ β1
1331
+ β1
1332
+ β1
1333
+ β1
1334
+ β1
1335
+ β1
1336
+ β1
1337
+ β1
1338
+ β1
1339
+ β1
1340
+ β1
1341
+ β1
1342
+ β1
1343
+ β1
1344
+ β2
1345
+ β2
1346
+ β1
1347
+ β1
1348
+ β1
1349
+ β1
1350
+ Local Node 0
1351
+ β3
1352
+ β3
1353
+ β2
1354
+ β2
1355
+ β3
1356
+ β3
1357
+ β1
1358
+ β1
1359
+ β3
1360
+ β3
1361
+ β1
1362
+ β1
1363
+ β1
1364
+ β1
1365
+ β1
1366
+ β1
1367
+ δ0
1368
+ δ0
1369
+ δ1
1370
+ δ1
1371
+ δN-1
1372
+ δN-1
1373
+ δ0
1374
+ δ0
1375
+ δ1
1376
+ δ1
1377
+ δN-1
1378
+ δN-1
1379
+ Pre-
1380
+ Processor
1381
+ Object
1382
+ Detector
1383
+ Tracker
1384
+ A
1385
+ Pose
1386
+ Estimator
1387
+ Task 0
1388
+ Task 1
1389
+ Task N-1
1390
+ Feature
1391
+ Extractor
1392
+ Crop
1393
+ Selection
1394
+ A
1395
+ Transfer
1396
+ FL
1397
+ D0
1398
+ D1
1399
+ DN-1
1400
+
1401
+
1402
+ β1
1403
+ β1
1404
+ β1
1405
+ β1
1406
+ β1
1407
+ β1
1408
+ β1
1409
+ β1
1410
+ β2
1411
+ β1
1412
+ β1
1413
+ Local Node 0
1414
+ β3
1415
+ β2
1416
+ β3
1417
+ β1
1418
+ β3
1419
+ β1
1420
+ β1
1421
+ β1
1422
+ δ0
1423
+ δ1
1424
+ δN-1
1425
+ δ0
1426
+ δ1
1427
+ δN-1
1428
+ Fig. 3. A detailed view of system design in Ancilia’s local nodes. β and δ refer to different batch sizes. FL and D represent local features and data received
1429
+ from downstream tasks respectively.
1430
+ for person crops. First, a person crop must contain a high-
1431
+ quality view of the person. To this end, the filter algorithm uses
1432
+ the 2D pose skeleton and verifies that at least 9 keypoints were
1433
+ detected with at least 60% confidence. The filter algorithm
1434
+ looks at the overlap (i.e. Intersection of Union) of the bounding
1435
+ boxes generated by the object detector. An individual’s bound
1436
+ box must have an Intersection over Union (IoU) of no more
1437
+ than 0.1 with any other person. If those two conditions are met,
1438
+ the person crop is determined to be of high enough quality to
1439
+ produce an adequate feature representation. If more than one
1440
+ crop is deemed suitable for a single person during a 30 frame
1441
+ window, the one with the most confident pose is selected. The
1442
+ features created by the feature extractor are sent to the global
1443
+ node for multi-camera person re-identification. Ancilia uses
1444
+ OSNet [39] to extract feature representations.
1445
+ C. higher Level Tasks
1446
+ High-level tasks are executed on the local node, and have
1447
+ access to the object, tracking, and pose data generated in the
1448
+ previous steps. Since the decision of which high-level tasks are
1449
+ needed is highly application dependent, we do not consider
1450
+ these tasks to be part of the Ancilia algorithmic core, but
1451
+ instead an extension to be customized based on intended use.
1452
+ In this paper, we use action recognition and anomaly detection
1453
+ as two common examples of high-level tasks that are highly
1454
+ relevant to intelligent surveillance. For action recognition, we
1455
+ choose PoseConv3D [40] and CTR-CGN [41], two state-of-
1456
+ the-art networks that can utilize the 2D human pose skeletons
1457
+ provided by Ancilia. For anomaly detection, we use GEPC
1458
+ [42] and MPED-RNN [43], which are based on 2D human
1459
+ pose skeletons.
1460
+ V. SYSTEM DESIGN
1461
+ Beyond the algorithmic design, Ancilia can be analyzed
1462
+ from a system-level design and implementation perspective.
1463
+ The local node in particular has a complex system design, as
1464
+ seen in Fig. 3. The global node and cloud are much simpler,
1465
+ as shown in Fig. 2.
1466
+ A. Parallelism
1467
+ A key design objective of Ancilia is to achieve higher
1468
+ efficiency by balancing throughput and latency. Ancilia uses
1469
+ pipelining to take advantage of process parallelism. Each
1470
+ major task is implemented as a separate process, which
1471
+ executes concurrently with other processes. These processes
1472
+ communicate with each other using queues to utilize memory
1473
+ resources better and enable fast inter-process communication.
1474
+ While pipelining is a well-known technique for optimization,
1475
+ the overhead associated with its implementation means a
1476
+ balance needs to be found. Figure 3 shows a detailed view of
1477
+ the system design on the local node. Each pipeline stage is sep-
1478
+ arated by a queue with a size limit of λ1 elements, preventing
1479
+ any potential overflow from uneven execution speed between
1480
+ pipeline stages. By default, Ancilia uses a λ1 value of 4. As
1481
+ is common, Ancilia offloads highly parallel tasks that rely on
1482
+ neural networks (i.e. object detection, pose estimation, feature
1483
+ extraction, and many high-level tasks) to Graphics Processing
1484
+ Units (GPUs) for execution.
1485
+ B. Data Batching
1486
+ Batching is another technique Ancilia implement to better
1487
+ utilize hardware resources. Generally, batching is able to
1488
+ greatly increase the throughput of a system at the cost of end-
1489
+ to-end latency. However, many high-level tasks (e.g. action
1490
+
1491
+ IEEE INTERNET OF THINGS JOURNAL
1492
+ 5
1493
+ recognition, anomaly detection) require multiple video frames
1494
+ worth of input data (often called a window) before the can start
1495
+ processing, so the latency that would be incurred by batching
1496
+ input frames is already inherent in these high-level task, as
1497
+ long as the frame batch and high-level task window are of
1498
+ the same size. Further, as frame batching ultimately increases
1499
+ the throughput, the end-to-end latency is decreased when
1500
+ compared processing each frame sequentially. While object
1501
+ detection works on entire frames, all other neural networks in
1502
+ Ancilia work off individual objects. These objects are batched
1503
+ together before being input to the network, greatly increasing
1504
+ hardware utilization. There can be multiple object batches
1505
+ within a single frame batch, based on how many of the relevant
1506
+ objects are detected in the video.
1507
+ C. Local Node
1508
+ Once the local node receives the video stream from the
1509
+ camera, the proprocessor is responsible for all basic image
1510
+ processing necessary before sending the frames through the
1511
+ algorithmic core. After preprocessing, frames are batched in
1512
+ sequential segements of size β1. Ancilia sets β1 = 30 to match
1513
+ the window size of high-level tasks. This is also convenient
1514
+ as most modern security and IoT cameras record video at
1515
+ either 30 or 60 FPS. The batched frames are sent to the object
1516
+ detector, which outputs a list of objects with class labels and
1517
+ bounding box coordinates. Bounding boxes for pedestrians are
1518
+ sent to the tracker, while bounding boxes for other objects
1519
+ are passed through the system for use in high-level tasks
1520
+ and statistical analysis. A crop of each pedestrian from the
1521
+ original frame is passed through to the pose estimator. At
1522
+ the tracker, bounding boxes for pedestrians are unbatched
1523
+ to fit the tracker’s sequential operation. The tracker groups
1524
+ the pedestrians and either matches them with previously seen
1525
+ pedestrians or assigns them a unique local ID. Afterwards,
1526
+ the pedestrians are once again batched by frame and sent to
1527
+ the pose estimator. At the pose estimator, the object batching
1528
+ is performed on the person crops, with a batch size of
1529
+ β2 = 32. These batches are fed to the pose estimator, which
1530
+ outputs human pose skeletons for each person crop. Then the
1531
+ pedestrian bounding boxes, person crops, local IDs, and human
1532
+ pose skeletons are once again batched by frame and combined
1533
+ with the object bounding boxes from the object detector.
1534
+ The pedestrian bounding boxes, person crops, local IDs, and
1535
+ pose skeletons are sent to crop selection, while the pedestrian
1536
+ bounding boxes, object bounding boxes, local IDs, and pose
1537
+ skeletons are sent to each high-level task as necessary. While
1538
+ the person crops are necessary for the feature extraction that
1539
+ enables multi-camera re-identification, no identifiable data is
1540
+ sent to any of the high-level tasks, keeping in line with the
1541
+ ethical concerns mentioned in Sec. III.
1542
+ Crop selection filters out low-quality person crops based on
1543
+ bounding box overlap and keypoint confidence, as described
1544
+ in Sec. IV. The remaining crops are then batched, with size β3
1545
+ being dynamic based on the number of persons in the scene,
1546
+ and sent to the feature extractor. Once features are extracted,
1547
+ the are sent for transfer to the server. Each high-level task
1548
+ receives data at the granularity of a frame batch, and sends
1549
+ data to the server at whatever granularity that task requires.
1550
+ Each high-level task has its own process and works in parallel
1551
+ with other tasks as well as with crop selection and feature
1552
+ extraction. Communication is completely decoupled from the
1553
+ pipeline, so once the data is sent the local node pipeline
1554
+ continues to function as normal. Importantly, no identifiable
1555
+ information is ever sent to the global node, keeping in line
1556
+ with the privacy and ethical concerns mentioned in Sec. III
1557
+ D. Global Node
1558
+ All received data is stored in a relational database on the
1559
+ global node. The matching algorithm described in Sec. IV
1560
+ compares the received features with existing features in the
1561
+ database over the period λ5 and assigns a global ID based on
1562
+ the results. The default value for lλ5 is set to 1 hour, but this
1563
+ should be changed to suit the needs of the application. An
1564
+ assortment of algorithms performs statistical analysis using
1565
+ the relational database, as detailed in Sec. IV. The analysis
1566
+ is transmitted to the cloud node using APIs provided by the
1567
+ cloud service provider. By default, Ancilia uses Amazon Web
1568
+ Services, but this can be altered based on user/application
1569
+ needs. The cloud (e.g. Amazon Web Services (AWS)) receives
1570
+ analyzed data from the global node.
1571
+ VI. EXPERIMENTAL RESULTS
1572
+ A. Algorithmic Core
1573
+ The algorithmic core of Ancilia consist of multiple algo-
1574
+ rithms, each of which works off of data generated by the
1575
+ previous algorithms. As these algorithms leverage imperfect
1576
+ neural networks, they generate noise that accumulates through
1577
+ the system. To understand the source of this noise, we must
1578
+ first look at the accuracy of each of these core algorithms
1579
+ in isolation. Table I shows the accuracies of the algorithmic
1580
+ core’s four main tasks: object detection, pedestrian tracking,
1581
+ human pose estimation, and person re-identification. The table
1582
+ also shows the accuracies of the top SotA models in each
1583
+ task. These SotA methods are not suitable for intelligent
1584
+ surveillance applications, as their excessive computation and
1585
+ vast parameters make real-time execution impossible, but the
1586
+ comparison allows us to see the maximum potential allowable
1587
+ by current research and the accuracy loss incurred to keep
1588
+ Ancilia performing in real-time.
1589
+ TABLE I
1590
+ ACCURACY OF ANCILIA’S ALGORITHMIC CORE NETWORKS IN
1591
+ ISOLATION. ACCURACIES OF OBJECT DETECTION AND POSE ESTIMATION
1592
+ ARE USING THE COCO DATASET [44], TRACKING USING MOT20 [45],
1593
+ AND PERSON REID USING DUKEMTMC [46]. SOTA REPRESENT THE
1594
+ HIGHEST ACCURACIES CURRENTLY ACHIEVABLE WITH
1595
+ STATE-OF-THE-ART METHODS WHEN COMPUTATION AND LATENCY ARE
1596
+ NOT A CONCERN.
1597
+ Task
1598
+ Method
1599
+ Metric
1600
+ Accuracy
1601
+ SotA
1602
+ Object Detection
1603
+ YOLOv5 [36]
1604
+ mAP
1605
+ 49.0
1606
+ 65.0 [47]
1607
+ Tracking
1608
+ ByteTrack [37]
1609
+ MOTA
1610
+ 77.8
1611
+ 77.9 [48]
1612
+ Pose Estimation
1613
+ HRNet [38]
1614
+ AP
1615
+ 75.1
1616
+ 81.1 [49]
1617
+ Person ReID
1618
+ OSNet [39]
1619
+ Top-1
1620
+ 88.6
1621
+ 95.6 [50]
1622
+ Object detection sees the biggest hit to accuracy, with a 16%
1623
+ drop from SotA. This is intuitive, as YOLOv5 [36] is not only
1624
+
1625
+ IEEE INTERNET OF THINGS JOURNAL
1626
+ 6
1627
+ the largest model in the algorithmic core, but also the only
1628
+ one that operates on the raw camera stream. So while larger
1629
+ models are available and would be able to produce higher
1630
+ accuracy, even a slight increase in model size or computation
1631
+ would result in a noticeable decrease in throughput. Human
1632
+ pose estimation sees a decrease in accuracy for a similar
1633
+ reason, though much smaller in scale at only 6%. While
1634
+ HRNet [38] is not run on the raw camera stream, it is run
1635
+ individually for each person detected by the object detector.
1636
+ As such, maintaining a small model size is preferable. Person
1637
+ re-identification sees a slightly larger drop in accuracy than
1638
+ human pose estimation at 7%. While this is partly due to
1639
+ using a lightweight model, OSNet [39], the SotA model for
1640
+ person reID is also lightweight. However, the SotA uses a
1641
+ centroid based retrieval method not suitable for pen-set reID,
1642
+ of which most surveillance scenarios are. Pedestrian tracking
1643
+ sees almost no drop in accuracy, approximately 0.1%. This
1644
+ stems from the comparative ease of tracking pedestrians in
1645
+ a single camera, where a simple, lightweight algorithm like
1646
+ ByteTrack [37] see almost no performance difference from
1647
+ the top of the line SotA approaches.
1648
+ B. High-level Tasks
1649
+ To better understand how the noise generated by the al-
1650
+ gorithmic core effects overall performance, and thus how
1651
+ well Ancilia performs in the realm of real-world intelligent
1652
+ surveillance, we examine the performance of two high-level
1653
+ cognitive surveillance tasks when running on Ancilia. For
1654
+ Ancilia to be a benefit to intelligent surveillance tasks, we
1655
+ must ensure that excess false alarms or missed positive events
1656
+ do not occur. To assess this, we choose action recognition
1657
+ and anomaly detection, as these tasks can utilize the human
1658
+ pose information generated by the algorithmic core, resulting
1659
+ in faster and less biased inference. Since both these methods
1660
+ utilize temporal batches of human poses for each individual,
1661
+ these experiments will directly reflect the quality of the object
1662
+ detection, tracking, re-identification, and pose estimation data
1663
+ generated by Ancilia.
1664
+ 1) High-level Task - Action Recognition: We select two
1665
+ state-of-the-art action recognition models, PoseConv3d [40]
1666
+ and CTR-GCN [41], and train them using data generated with
1667
+ Ancilia. For each model, we train and test with full (30 FPS)
1668
+ and half (15 FPS) throughput on NTU60-XSub [51]. Both
1669
+ models use a window size of 30 and are trained for 24 epochs
1670
+ using Stochastic Gradient Descent (SGD) with a momentum of
1671
+ 0.9 and Cosine Annealing scheduling. PoseConv3d and CTR-
1672
+ GCN have weight decay of 3e−4 and 5e−4 and an initial
1673
+ learning rate of 0.4 and 0.2, respectively.
1674
+ The results of these experiments can be seen in Tab. II.
1675
+ We report the Top-1 and Top-5 accuracy and compare the
1676
+ results using data generated by Ancilia to the original data
1677
+ available through the PYSKL toolbox [52]. We can see that
1678
+ Ancilia is able to provide data of comparable quality to the
1679
+ original; action recognition as a high-level task in Ancilia sees
1680
+ around 1% drop in accuracy compared to the original data
1681
+ using PoseConv3D [40] at full throughput, and around 3% at
1682
+ half throughput. Using CTR-GCN [41], Ancilia sees a 2.5%
1683
+ TABLE II
1684
+ TOP-1 AND TOP-5 ACCURACIES ON NTU60-XSUB [51] IN FULL AND
1685
+ HALF THROUGHPUT MODES FOR POSECONV3D [40] AND CTR-GCN
1686
+ [41].
1687
+ Model
1688
+ Data
1689
+ FPS
1690
+ Top-1 (%)
1691
+ Top-5 (%)
1692
+ PoseConv3D [40]
1693
+ [52]
1694
+ 15
1695
+ 91.96
1696
+ 99.47
1697
+ 30
1698
+ 92.76
1699
+ 99.57
1700
+ Ours
1701
+ 15
1702
+ 88.79
1703
+ 98.82
1704
+ 30
1705
+ 91.99
1706
+ 99.28
1707
+ CTR-GCN [41]
1708
+ [52]
1709
+ 15
1710
+ 86.36
1711
+ 98.46
1712
+ 30
1713
+ 83.07
1714
+ 98.26
1715
+ Ours
1716
+ 15
1717
+ 81.58
1718
+ 97.52
1719
+ 30
1720
+ 80.44
1721
+ 97.2
1722
+ drop at full throughput and a 4.8% drop at half throughput,
1723
+ compared to the original data. From this we can infer that
1724
+ PoseConv3D is more robust to noise than CTR-GCN, however
1725
+ both performed reasonably well with data generated from
1726
+ Ancilia, demonstrating its efficacy for intelligent surveillance
1727
+ applications.
1728
+ Another interesting observation is that CTR-GCN [41]
1729
+ actually performed noticeably better at half throughput than
1730
+ at full throughput. This means that CTR-GCN is more suited
1731
+ to taking advantage of the higher temporal window allowed
1732
+ when using half throughput. This is something to consider
1733
+ when choosing an action recognition model when a real-time
1734
+ throughput of 30 FPS cannot be guaranteed.
1735
+ 2) High-level Task - Anomaly Detection: Using the Shang-
1736
+ haiTech dataset [53] we train two state-of-the-art anomaly
1737
+ detection models, GEPC [42] and MPED-RNN [43], using
1738
+ both data generated by Ancilia and the data provided by the
1739
+ original authors. The same training strategy from Sec. VI-B1
1740
+ is used, with both models trained in full (20 FPS) and half (10
1741
+ FPS) modes. GEPC is trained for 25 epochs with a window
1742
+ size of 30 and stride of 20 using Adam optimizer with a
1743
+ learning rate of 1e-4, weight decay of 1e-5, and batch size
1744
+ of 512. MPED-RNN is trained with an input window size of
1745
+ 30, a reconstruction window of 12, and a prediction window of
1746
+ 6. The model is trained for 5 epochs using the Adam optimizer
1747
+ with a learning rate of 1e−3 and a batch size of 265.
1748
+ TABLE III
1749
+ AUC ROC, AUC PR, AND EER ON SHANGHAITECH DATASET [53] IN
1750
+ FULL AND HALF THROUGHPUT MODES FOR GEPC [42] AND MPED-RNN
1751
+ [43].
1752
+ Model
1753
+ Data
1754
+ FPS
1755
+ AUC ROC
1756
+ AUC PR
1757
+ EER
1758
+ GEPC [42]
1759
+ [42]
1760
+ 10
1761
+ 0.6906
1762
+ 0.5951
1763
+ 0.35
1764
+ 20
1765
+ 0.7372
1766
+ 0.6427
1767
+ 0.31
1768
+ Ours
1769
+ 10
1770
+ 0.6888
1771
+ 0.5905
1772
+ 0.35
1773
+ 20
1774
+ 0.7223
1775
+ 0.6023
1776
+ 0.32
1777
+ MPED-RNN [43]
1778
+ [43]
1779
+ 10
1780
+ 0.6645
1781
+ 0.5733
1782
+ 0.37
1783
+ 20
1784
+ 0.7023
1785
+ 0.5869
1786
+ 0.36
1787
+ Ours
1788
+ 10
1789
+ 0.6685
1790
+ 0.5661
1791
+ 0.37
1792
+ 20
1793
+ 0.6679
1794
+ 0.5487
1795
+ 0.37
1796
+ The results of this experiment can be seen in Tab. III.
1797
+ In line with current practices, we report Area Under the
1798
+ Receiver Operating Characteristic Cure (AUC ROC), Area
1799
+
1800
+ IEEE INTERNET OF THINGS JOURNAL
1801
+ 7
1802
+ 1
1803
+ 2
1804
+ 3
1805
+ 4
1806
+ 5
1807
+ 6
1808
+ 7
1809
+ 8
1810
+ 0
1811
+ 10
1812
+ 20
1813
+ 30
1814
+ 40
1815
+ 50
1816
+ 60
1817
+ Normal
1818
+ Heavy
1819
+ Extreme
1820
+ Nodes
1821
+ Throughput (FPS)
1822
+ (a) Server A
1823
+ 1
1824
+ 2
1825
+ 3
1826
+ 4
1827
+ 5
1828
+ 6
1829
+ 7
1830
+ 8
1831
+ 15
1832
+ 20
1833
+ 25
1834
+ 30
1835
+ Normal
1836
+ Heavy
1837
+ Extreme
1838
+ Nodes
1839
+ Throughput (FPS)
1840
+ (b) Server B
1841
+ 1
1842
+ 2
1843
+ 3
1844
+ 4
1845
+ 5
1846
+ 6
1847
+ 7
1848
+ 8
1849
+ 0
1850
+ 10
1851
+ 20
1852
+ 30
1853
+ 40
1854
+ 50
1855
+ 60
1856
+ Normal
1857
+ Heavy
1858
+ Extreme
1859
+ Nodes
1860
+ Throughput (FPS)
1861
+ (c) Workstation
1862
+ Fig. 4. Throughput of Ancilia across different crowd densities. Hardware details can be seen in Tab. IV.
1863
+ TABLE IV
1864
+ SYSTEM CONFIGURATIONS. STATS ARE PER CPU/GPU OF THE LISTED TYPE.
1865
+ Processor
1866
+ GPU
1867
+ Name
1868
+ Model
1869
+ Cores
1870
+ Clock Speed
1871
+ Model
1872
+ CUDA Cores
1873
+ VRAM
1874
+ Server A
1875
+ 2× EPYC 7513
1876
+ 32
1877
+ 2.6 GHz
1878
+ 4× V100
1879
+ 5120
1880
+ 32 GB
1881
+ Server B
1882
+ 2× Xeon E5-2640 v4
1883
+ 10
1884
+ 2.4 GHz
1885
+ 2× Titan V
1886
+ 5120
1887
+ 12 GB
1888
+ Workstation
1889
+ Threadripper Pro 3975WX
1890
+ 32
1891
+ 3.50 GHz
1892
+ 3× A6000
1893
+ 10752
1894
+ 48 GB
1895
+ Under the Precision-recall Curve (AUC PR), and the Equal
1896
+ Error Rate (EER). With GEPC, we can see that Ancilia more
1897
+ than measures up to the task, with only a 1.5% drop in AUC
1898
+ ROC at full throughput and less than a 0.2% drop in AUC ROC
1899
+ at half throughput. AUC PR shows a more substantial drop
1900
+ of 4% at full throughput, but goes down to less than 0.5% at
1901
+ half throughput. Equal Error Rates are almost identical, seeing
1902
+ almost no change (less than 0.01) when using Ancilia. MPED-
1903
+ RNN, which displayed lower overall accuracy in all regards to
1904
+ begin with, sees a more significant drop in AUC ROC at full
1905
+ throughput, losing 3.5%. However, at half throughput the AUC
1906
+ ROC actually increases when using Ancilia, though only by
1907
+ 0.5%. The AUC PR results mirror that of GEPC, dropping
1908
+ 3.8% at full throughput and 0.7% at half throughput. The
1909
+ Equal Error Rates are once again nearly identical. Being able
1910
+ to perform a high-level task such as anomaly detection while
1911
+ maintaining accuracies so close to current SotA in research,
1912
+ demonstrates Ancilia’s ability to produce quality data, suitable
1913
+ for intelligent surveillance applications.
1914
+ C. Real-time System Performance
1915
+ Algorithmic accuracy is vital for ensuring the information
1916
+ provided by high-level cognitive tasks is beneficial for surveil-
1917
+ lance applications. However, Ancilia’s ability to perform in
1918
+ real-time is equally important. We conduct a series of ex-
1919
+ periments, evaluating the runtime performance of Ancilia on
1920
+ different hardware, with different scenario intensities, and for
1921
+ increasing number of local nodes per hardware device. We
1922
+ focus on the performance of the local node, as the global node
1923
+ is completely decoupled from the algorithmic pipeline and has
1924
+ no noticeable effect on throughput or latency.
1925
+ We choose three different hardware configurations for these
1926
+ experiments: a high-end server, a lower-end server, and a high-
1927
+ end workstation, as seen in Tab. IV. For our scenarios, we
1928
+ use the DukeMTMC-video dataset [46] and pick three scenes
1929
+ with different crowd densities: normal density, heavy density,
1930
+ and extreme density. The distribution of detection density
1931
+ in each scenario can be seen in Fig. 5. Note that what is
1932
+ considered ”normal density” will change based on application
1933
+ environment, which is why we report on such a wide range.
1934
+ Each video lasts for
1935
+ 32k frames, with
1936
+ 7k frames warm-up
1937
+ and cool-down. We test using 1, 2, 4, 6, and 8 local nodes on
1938
+ a single system, showing how throughput and latency scale
1939
+ in such cases. Each experiment is conducted three times, the
1940
+ throughput and latency averaged across runs. The results of
1941
+ these experiments can be seen in Tab. V and Fig. 4. The
1942
+ distribution of throughput in these scenes can be seen in Fig. 5.
1943
+ Under normal crowd density, Server A and Workstation
1944
+ are both able to achieve over 50 FPS with a single local
1945
+ node, with an end-to-end latency of 5.39 and 4.90 seconds
1946
+ respectively. This is well above the 30 and 20 FPS required
1947
+ by action recognition and anomaly detection algorithms at
1948
+ full throughput, and the latency is low enough to be suitable
1949
+ for most surveillance applications where the main concern is
1950
+ notify authorities in time for appropriate response. Server A
1951
+ is able to handle 6 local nodes in the normal scenario while
1952
+ maintaining above 30 FPS, while Workstation can do so with
1953
+ 4 nodes. Server A is able to maintain above 26 FPS while
1954
+ running all 8 local nodes, while Workstation drops to just
1955
+ below 18 FPS at 8 local nodes. Server B falls just short of
1956
+ 30 FPS even with a single node in normal crowd density.
1957
+ However, it is able to maintain above 20 FPS while handling
1958
+ two nodes simultaneously. Due to having only a single GPU
1959
+ and limited VRAM, Server B was unable to run 4 or more
1960
+ nodes concurrently.
1961
+ Heavy crowd density proves more challenging, with both
1962
+ Server A and Workstation only able to achieve above 30
1963
+ FPS with up to 4 nodes. The end-to-end latency is also
1964
+ longer than it was under normal crowd density, with Server
1965
+ A seeing almost double the latency and Workstation seeing
1966
+ around a 20% to 80% increase in most cases. Server A and
1967
+ Workstation are able to mainatin above 20 FPS at 8 and 6
1968
+
1969
+ IEEE INTERNET OF THINGS JOURNAL
1970
+ 8
1971
+ TABLE V
1972
+ AVERAGE THROUGHPUT AND LATENCY WHEN RUNNING MULTIPLE LOCAL NODES ON A SINGLE SERVER.
1973
+ Server A
1974
+ Server B
1975
+ Workstation
1976
+ Crowd Density
1977
+ Nodes
1978
+ FPS
1979
+ Latency (s)
1980
+ FPS
1981
+ Latency (s)
1982
+ FPS
1983
+ Latency (s)
1984
+ Normal
1985
+ (70 detections
1986
+ per second)
1987
+ 1
1988
+ 52.94
1989
+ 5.39
1990
+ 29.76
1991
+ 9.73
1992
+ 54.91
1993
+ 4.90
1994
+ 2
1995
+ 48.99
1996
+ 5.97
1997
+ 22.07
1998
+ 12.7
1999
+ 45.06
2000
+ 5.97
2001
+ 4
2002
+ 38.91
2003
+ 7.29
2004
+ -
2005
+ -
2006
+ 31.67
2007
+ 8.53
2008
+ 6
2009
+ 31.35
2010
+ 12.55
2011
+ -
2012
+ -
2013
+ 23.10
2014
+ 13.61
2015
+ 8
2016
+ 26.51
2017
+ 17.94
2018
+ -
2019
+ -
2020
+ 17.98
2021
+ 24.95
2022
+ Heavy
2023
+ (216 detections
2024
+ per second)
2025
+ 1
2026
+ 40.16
2027
+ 15.66
2028
+ 26.48
2029
+ 11.93
2030
+ 48.52
2031
+ 5.95
2032
+ 2
2033
+ 41.22
2034
+ 10.87
2035
+ 19.55
2036
+ 14.51
2037
+ 41.45
2038
+ 7.10
2039
+ 4
2040
+ 34.54
2041
+ 14.48
2042
+ -
2043
+ -
2044
+ 30.01
2045
+ 11.20
2046
+ 6
2047
+ 27.05
2048
+ 22.42
2049
+ -
2050
+ -
2051
+ 20.99
2052
+ 30.65
2053
+ 8
2054
+ 20.02
2055
+ 34.68
2056
+ -
2057
+ -
2058
+ 15.77
2059
+ 46.28
2060
+ Extreme
2061
+ (744 detections
2062
+ per second)
2063
+ 1
2064
+ 17.80
2065
+ 36.04
2066
+ 14.50
2067
+ 43.81
2068
+ 25.68
2069
+ 24.78
2070
+ 2
2071
+ 20.90
2072
+ 30.73
2073
+ 13.40
2074
+ 47.57
2075
+ 23.52
2076
+ 26.97
2077
+ 4
2078
+ 17.27
2079
+ 38.15
2080
+ -
2081
+ -
2082
+ 17.56
2083
+ 39.43
2084
+ 6
2085
+ 9.56
2086
+ 76.71
2087
+ -
2088
+ -
2089
+ 8.94
2090
+ 94.49
2091
+ 8
2092
+ 6.49
2093
+ 130.08
2094
+ -
2095
+ -
2096
+ 6.31
2097
+ 134.82
2098
+ nodes respectively, while Workstation drops to just above 15
2099
+ FPS at 8 nodes. Interestingly, with heavy crowd density we
2100
+ start to see unusual behavior with Server A having worse
2101
+ performance with a singe node than it does with 2 nodes.
2102
+ This is caused by the abundance of CPU and GPU resources
2103
+ available to the server and a single node being unable to
2104
+ fully utilize them. As such, the behavior of Server A in the
2105
+ heavy and extreme crowd density scenarios does not start to
2106
+ match the expected behavior and mimic the other systems until
2107
+ multiple nodes are being run simultaneously. This behavior
2108
+ is not too concerning, considering it does not make sense
2109
+ to purchase such a high-end server class machine for only
2110
+ running a single local node, when a more latency focused
2111
+ workstation would be both cheaper and more effective. Server
2112
+ B behaves similarly to how it did with normal crowd density,
2113
+ except that it falls slightly below 20 FPS when running two
2114
+ nodes. Assuming only half throughput was needed for high-
2115
+ level tasks, Server B would still be suitable for running up to
2116
+ two nodes.
2117
+ With the extreme crowd density scenario, Ancilia begins
2118
+ to struggle. None of the systems are able to achieve above
2119
+ 30 FPS even with a single camera, putting full throughput
2120
+ action recognition out of reach. Server A is able to achieve
2121
+ above 20 FPS with 2 nodes (but notably not with 1) and
2122
+ Workstation is able to do so with 1 or 2 nodes. Both Server
2123
+ A and Workstation can maintain above 15 FPS at 4 nodes,
2124
+ but both drop to around 9 and 6 FPS at 6 and 8 nodes,
2125
+ respectively. [54] argues that 5 FPS is suitable for tacking
2126
+ pedestrians, and while that is true, high-level tasks that rely
2127
+ on detailed human motion, such as action recognition and
2128
+ anomaly detection, often struggle for accuracy when running
2129
+ below 10 FPS. Another issue is with the increased latency.
2130
+ Running only 1 node, Server A and Workstation have latencies
2131
+ of 36 seconds and 25 seconds respectively, which is suitable
2132
+ for many surveillance applications, but might be too much
2133
+ for those that require sharper response times. The latency
2134
+ increase to over 2 minutes for both systems with 8 nodes.
2135
+ Combined with the low throughput, it becomes difficult to
2136
+ recommend running more than 4 nodes on a single system with
2137
+ Ancilia when operating under extreme crowd density, expect
2138
+ 0
2139
+ 0.05
2140
+ 0.1
2141
+ 0.15
2142
+ 0.2
2143
+ 0.25
2144
+ 0.3
2145
+ 0.35
2146
+ 0.4
2147
+ 0.45
2148
+ 0
2149
+ 5
2150
+ 10
2151
+ 15
2152
+ 20
2153
+ Extreme
2154
+ Heavy
2155
+ Normal
2156
+ Average Number of Detections/Batch
2157
+ Probability
2158
+ 0
2159
+ 0.02
2160
+ 0.04
2161
+ 0.06
2162
+ 0.08
2163
+ 0.1
2164
+ 0.12
2165
+ 0.14
2166
+ 0.16
2167
+ 20
2168
+ 30
2169
+ 40
2170
+ 50
2171
+ 60
2172
+ 70
2173
+ 80
2174
+ 90
2175
+ Extreme
2176
+ Heavy
2177
+ Normal
2178
+ Throughput (FPS)
2179
+ Probability
2180
+ Fig. 5.
2181
+ The performance of Ancilia in terms of throughput and detection
2182
+ distribution.
2183
+ for applications where low throughput and high latency are
2184
+ not as much of a concern. Server B is unable to achieve 15
2185
+ FPS, but does stay above 10 FPS for both 1 and 2 nodes,
2186
+ making it suitable for half throughput in anomaly detection.
2187
+ However, the latencies of 44 and 48 seconds might be too
2188
+ much for some applications. This is the extreme scenario, and
2189
+ it understandably provides quite the challenge for real-time
2190
+ execution.
2191
+ Overall, Ancilia is able to meet the needs of high-level cog-
2192
+ nitive tasks while still achieving performance suitable for real-
2193
+
2194
+ IEEE INTERNET OF THINGS JOURNAL
2195
+ 9
2196
+ time intelligent surveillance applications. Exact performance is
2197
+ dependent on both the hardware used and the intensity of the
2198
+ scene, but these results show that even for the most extreme of
2199
+ scenarios, Ancilia can be used to provide intelligent assistance
2200
+ to surveillance applications.
2201
+ VII. CONCLUSION
2202
+ In this article we presented Ancilia, an end-to-end scal-
2203
+ able intelligent video surveillance system for the Artificial
2204
+ Intelligence of Things. Through empirical evaluation, Ancilia
2205
+ has demonstrated its ability to bring state-of-the-art artificial
2206
+ intelligence to real-world surveillance applications. Ancilia
2207
+ performs high-level cognitive tasks (i.e. action recognition and
2208
+ anomaly detection) in real-time, all while respecting ethical
2209
+ and privacy concerns common to surveillance applications.
2210
+ ACKNOWLEDGMENTS
2211
+ This research is supported by the National Science Foun-
2212
+ dation (NSF) under Award No. 1831795 and NSF Graduate
2213
+ Research Fellowship Award No. 1848727.
2214
+ REFERENCES
2215
+ [1] C.
2216
+ Alexandre,
2217
+ “The
2218
+ public
2219
+ safety
2220
+ implications
2221
+ of
2222
+ the
2223
+ itaewon
2224
+ tragedy,” Dec 2022. [Online]. Available: https://thediplomat.com/2022/
2225
+ 12/the-public-safety-implications-of-the-itaewon-tragedy/
2226
+ [2] N. Salahieh, J. Miller, and H. Yan, “As north carolinians regain power,
2227
+ investigators probe terrorism and threats against power substations
2228
+ across
2229
+ the
2230
+ us.
2231
+ one
2232
+ expert
2233
+ explains
2234
+ what
2235
+ needs
2236
+ to
2237
+ be
2238
+ done,”
2239
+ Dec 2022. [Online]. Available: https://www.cnn.com/2022/12/08/us/
2240
+ power-outage-moore-county-investigation-thursday/index.html
2241
+ [3] S.
2242
+ Feldstein
2243
+ and
2244
+ C.
2245
+ E.
2246
+ for
2247
+ International
2248
+ Peace,
2249
+ The
2250
+ Global
2251
+ Expansion of AI Surveillance.
2252
+ Carnegie Endowment for International
2253
+ Peace, 2019. [Online]. Available: https://books.google.com/books?id=
2254
+ W9JQzQEACAAJ
2255
+ [4] M. R. Patrikar, Devashree R. Parate, “Anomaly detection using edge
2256
+ computing in video surveillance system: review,” International Journal
2257
+ of Multimedia Information Retrieval, pp. 85–110, 2022. [Online].
2258
+ Available: https://doi.org/10.1007/s13735-022-00227-8
2259
+ [5] A. Danesh Pazho, G. Alinezhad Noghre, A. A. Purkayastha, J. Vempati,
2260
+ O. Martin, and H. Tabkhi, “A comprehensive survey of graph-based
2261
+ deep learning approaches for anomaly detection in complex distributed
2262
+ systems,” arXiv preprint arXiv:2206.04149, 2022.
2263
+ [6] X. Li and Z.-m. Cai, “Anomaly detection techniques in surveillance
2264
+ videos,” in 2016 9th International Congress on Image and Signal Pro-
2265
+ cessing, BioMedical Engineering and Informatics (CISP-BMEI), 2016,
2266
+ pp. 54–59.
2267
+ [7] T. Li, H. Chang, M. Wang, B. Ni, R. Hong, and S. Yan, “Crowded
2268
+ scene analysis: A survey,” IEEE Trans. Cir. and Sys. for Video
2269
+ Technol., vol. 25, no. 3, p. 367–386, mar 2015. [Online]. Available:
2270
+ https://doi.org/10.1109/TCSVT.2014.2358029
2271
+ [8] B. S. Shobha and R. Deepu, “A review on video based vehicle detection,
2272
+ recognition and tracking,” in 2018 3rd International Conference on
2273
+ Computational Systems and Information Technology for Sustainable
2274
+ Solutions (CSITSS), 2018, pp. 183–186.
2275
+ [9] J. Zhang, C. Xu, Z. Gao, J. J. P. C. Rodrigues, and V. H. C. de Al-
2276
+ buquerque, “Industrial pervasive edge computing-based intelligence iot
2277
+ for surveillance saliency detection,” IEEE Transactions on Industrial
2278
+ Informatics, vol. 17, no. 7, pp. 5012–5020, 2021.
2279
+ [10] G. T. Draughon, P. Sun, and J. P. Lynch, “Implementation of a computer
2280
+ vision framework for tracking and visualizing face mask usage in urban
2281
+ environments,” in 2020 IEEE International Smart Cities Conference
2282
+ (ISC2), 2020, pp. 1–8.
2283
+ [11] R. Xu, S. Y. Nikouei, Y. Chen, A. Polunchenko, S. Song, C. Deng,
2284
+ and T. R. Faughnan, “Real-time human objects tracking for smart
2285
+ surveillance at the edge,” in 2018 IEEE International Conference on
2286
+ Communications (ICC), 2018, pp. 1–6.
2287
+ [12] S. Y. Nikouei, Y. Chen, S. Song, R. Xu, B.-Y. Choi, and T. Faughnan,
2288
+ “Smart surveillance as an edge network service: From harr-cascade, svm
2289
+ to a lightweight cnn,” in 2018 IEEE 4th International Conference on
2290
+ Collaboration and Internet Computing (CIC), 2018, pp. 256–265.
2291
+ [13] S. Y. Nikouei, Y. Chen, S. Song, B.-Y. Choi, and T. R. Faughnan,
2292
+ “Toward intelligent surveillance as an edge network service (isense)
2293
+ using lightweight detection and tracking algorithms,” IEEE Transactions
2294
+ on Services Computing, vol. 14, no. 6, pp. 1624–1637, 2021.
2295
+ [14] C. Neff, M. Mendieta, S. Mohan, M. Baharani, S. Rogers, and H. Tabkhi,
2296
+ “Revamp2t: Real-time edge video analytics for multicamera privacy-
2297
+ aware pedestrian tracking,” IEEE Internet of Things Journal, vol. 7,
2298
+ no. 4, pp. 2591–2602, 2020.
2299
+ [15] B. Gaikwad and A. Karmakar, “Smart surveillance system for real-time
2300
+ multi-person multi-camera tracking at the edge,” in Journal of Real-Time
2301
+ Image Processing, vol. 18, 2021.
2302
+ [16] Y. Zhao, Y. Yin, and G. Gui, “Lightweight deep learning based intel-
2303
+ ligent edge surveillance techniques,” IEEE Transactions on Cognitive
2304
+ Communications and Networking, vol. 6, no. 4, pp. 1146–1154, 2020.
2305
+ [17] R. Ke, Y. Zhuang, Z. Pu, and Y. Wang, “A smart, efficient, and
2306
+ reliable parking surveillance system with edge artificial intelligence on
2307
+ iot devices,” IEEE Transactions on Intelligent Transportation Systems,
2308
+ vol. 22, no. 8, pp. 4962–4974, 2021.
2309
+ [18] R. E. Kalman, “A new approach to linear filtering and prediction
2310
+ problems,” Transactions of the ASME–Journal of Basic Engineering,
2311
+ vol. 82, no. Series D, pp. 35–45, 1960.
2312
+ [19] J. Liang, L. Jiang, J. C. Niebles, A. G. Hauptmann, and L. Fei-Fei,
2313
+ “Peeking into the future: Predicting future person activities and locations
2314
+ in videos,” in Proceedings of the IEEE/CVF Conference on Computer
2315
+ Vision and Pattern Recognition (CVPR), June 2019.
2316
+ [20] V. Nguyen, D. Phung, D.-S. Pham, and S. Venkatesh, “Bayesian
2317
+ nonparametric
2318
+ approaches
2319
+ to
2320
+ abnormality
2321
+ detection
2322
+ in
2323
+ video
2324
+ surveillance,” Annals of Data Science, vol. 2, pp. 21–41, 2015.
2325
+ [Online]. Available: https://doi.org/10.1007/s40745-015-0030-3
2326
+ [21] R. Nawaratne, D. Alahakoon, D. De Silva, and X. Yu, “Spatiotemporal
2327
+ anomaly detection using deep learning for real-time video surveillance,”
2328
+ IEEE Transactions on Industrial Informatics, vol. 16, no. 1, pp. 393–
2329
+ 402, 2020.
2330
+ [22] R. Arroyo, J. J. Yebes, L. M. Bergasa, I. G. Daza, and J. Almaz´an,
2331
+ “Expert video-surveillance system for real-time detection of suspicious
2332
+ behaviors in shopping malls,” Expert Systems with Applications,
2333
+ vol. 42, no. 21, pp. 7991–8005, 2015. [Online]. Available: https:
2334
+ //www.sciencedirect.com/science/article/pii/S0957417415004182
2335
+ [23] J. T. Zhou, J. Du, H. Zhu, X. Peng, Y. Liu, and R. S. M. Goh,
2336
+ “Anomalynet: An anomaly detection network for video surveillance,”
2337
+ IEEE Transactions on Information Forensics and Security, vol. 14,
2338
+ no. 10, pp. 2537–2550, 2019.
2339
+ [24] J. Pierce, R. Y. Wong, and N. Merrill, “Sensor illumination: Exploring
2340
+ design qualities and ethical implications of smart cameras and
2341
+ image/video analytics,” in Proceedings of the 2020 CHI Conference on
2342
+ Human Factors in Computing Systems, ser. CHI ’20.
2343
+ New York, NY,
2344
+ USA: Association for Computing Machinery, 2020, p. 1–19. [Online].
2345
+ Available: https://doi.org/10.1145/3313831.3376347
2346
+ [25] H. Nissenbaum, “Privacy as contextual integrity,” Wash. L. Rev., vol. 79,
2347
+ p. 119, 2004.
2348
+ [26] Y. E. Appenzeller, P. S. Appelbaum, and M. Trachsel, “Ethical and
2349
+ practical issues in video surveillance of psychiatric units,” Psychiatric
2350
+ Services, vol. 71, no. 5, pp. 480–486, 2020.
2351
+ [27] F. Tariq, N. Kanwal, M. S. Ansari, A. Afzaal, M. N. Asghar, and M. J.
2352
+ Anjum, “Towards a privacy preserving surveillance approach for smart
2353
+ cities,” in 3rd Smart Cities Symposium (SCS 2020), vol. 2020, 2020, pp.
2354
+ 450–455.
2355
+ [28] W. Hartzog, Privacy ˜Os Blueprint: The Battle to Control the Design of
2356
+ New Technologies.
2357
+ Harvard University Press, 2018.
2358
+ [29] J. Daubert, A. Wiesmaier, and P. Kikiras, “A view on privacy & trust
2359
+ in iot,” in 2015 IEEE International Conference on Communication
2360
+ Workshop (ICCW).
2361
+ IEEE, 2015, pp. 2665–2670.
2362
+ [30] T. Speicher, M. Ali, G. Venkatadri, F. N. Ribeiro, G. Arvanitakis,
2363
+ F. Benevenuto, K. P. Gummadi, P. Loiseau, and A. Mislove, “Potential
2364
+ for discrimination in online targeted advertising,” in Conference on
2365
+ Fairness, Accountability and Transparency.
2366
+ PMLR, 2018, pp. 5–19.
2367
+ [31] I. D. Raji, T. Gebru, M. Mitchell, J. Buolamwini, J. Lee, and E. Denton,
2368
+ “Saving face: Investigating the ethical concerns of facial recognition
2369
+ auditing,” in Proceedings of the AAAI/ACM Conference on AI, Ethics,
2370
+ and Society, 2020, pp. 145–151.
2371
+ [32] N. Martinez-Martin, “What are important ethical implications of using
2372
+ facial recognition technology in health care?” AMA journal of ethics,
2373
+ vol. 21, no. 2, p. E180, 2019.
2374
+ [33] L. Introna and H. Nissenbaum, “Facial recognition technology a survey
2375
+ of policy and implementation issues,” 2010.
2376
+
2377
+ IEEE INTERNET OF THINGS JOURNAL
2378
+ 10
2379
+ [34] E. Selinger and B. Leong, “The ethics of facial recognition technology,”
2380
+ Forthcoming in The Oxford Handbook of Digital Ethics ed. Carissa
2381
+ V´eliz, 2021.
2382
+ [35] D. Leslie, “Understanding bias in facial recognition technologies,” Tech.
2383
+ Rep., 2020. [Online]. Available: https://zenodo.org/record/4050457
2384
+ [36] G. Jocher, A. Chaurasia, A. Stoken, J. Borovec, NanoCode012,
2385
+ Y. Kwon, K. Michael, TaoXie, J. Fang, imyhxy, Lorna, Z. Yifu,
2386
+ C. Wong, A. V, D. Montes, Z. Wang, C. Fati, J. Nadar, Laughing,
2387
+ UnglvKitDe, V. Sonck, tkianai, yxNONG, P. Skalski, A. Hogan,
2388
+ D. Nair, M. Strobel, and M. Jain, “ultralytics/yolov5: v7.0 - YOLOv5
2389
+ SOTA Realtime Instance Segmentation,” Nov. 2022. [Online]. Available:
2390
+ https://doi.org/10.5281/zenodo.7347926
2391
+ [37] Y. Zhang, P. Sun, Y. Jiang, D. Yu, F. Weng, Z. Yuan, P. Luo, W. Liu,
2392
+ and X. Wang, “Bytetrack: Multi-object tracking by associating every
2393
+ detection box,” 2022.
2394
+ [38] K. Sun, B. Xiao, D. Liu, and J. Wang, “Deep high-resolution represen-
2395
+ tation learning for human pose estimation,” in CVPR, 2019.
2396
+ [39] K. Zhou, Y. Yang, A. Cavallaro, and T. Xiang, “Omni-scale feature
2397
+ learning for person re-identification,” in ICCV, 2019.
2398
+ [40] H. Duan, Y. Zhao, K. Chen, D. Lin, and B. Dai, “Revisiting skeleton-
2399
+ based action recognition,” in Proceedings of the IEEE/CVF Conference
2400
+ on Computer Vision and Pattern Recognition, 2022, pp. 2969–2978.
2401
+ [41] Y. Chen, Z. Zhang, C. Yuan, B. Li, Y. Deng, and W. Hu, “Channel-
2402
+ wise topology refinement graph convolution for skeleton-based action
2403
+ recognition,” in Proceedings of the IEEE/CVF International Conference
2404
+ on Computer Vision, 2021, pp. 13 359–13 368.
2405
+ [42] A. Markovitz, G. Sharir, I. Friedman, L. Zelnik-Manor, and S. Avidan,
2406
+ “Graph embedded pose clustering for anomaly detection,” in Proceed-
2407
+ ings of the IEEE/CVF Conference on Computer Vision and Pattern
2408
+ Recognition, 2020, pp. 10 539–10 547.
2409
+ [43] R. Morais, V. Le, T. Tran, B. Saha, M. Mansour, and S. Venkatesh,
2410
+ “Learning regularity in skeleton trajectories for anomaly detection in
2411
+ videos,” in Proceedings of the IEEE/CVF conference on computer vision
2412
+ and pattern recognition, 2019, pp. 11 996–12 004.
2413
+ [44] T.-Y. Lin, M. Maire, S. Belongie, L. Bourdev, R. Girshick, J. Hays,
2414
+ P. Perona, D. Ramanan, C. L. Zitnick, and P. Doll´ar, “Microsoft coco:
2415
+ Common objects in context,” 2014.
2416
+ [45] P. Dendorfer, H. Rezatofighi, A. Milan, J. Shi, D. Cremers, I. Reid,
2417
+ S. Roth, K. Schindler, and L. Leal-Taix´e, “Mot20: A benchmark for
2418
+ multi object tracking in crowded scenes,” 2020. [Online]. Available:
2419
+ https://arxiv.org/abs/2003.09003
2420
+ [46] E. Ristani, F. Solera, R. Zou, R. Cucchiara, and C. Tomasi, “Performance
2421
+ measures and a data set for multi-target, multi-camera tracking,” in
2422
+ European Conference on Computer Vision workshop on Benchmarking
2423
+ Multi-Target Tracking, 2016.
2424
+ [47] W. Wang, J. Dai, Z. Chen, Z. Huang, Z. Li, X. Zhu, X. Hu, T. Lu, L. Lu,
2425
+ H. Li et al., “Internimage: Exploring large-scale vision foundation mod-
2426
+ els with deformable convolutions,” arXiv preprint arXiv:2211.05778,
2427
+ 2022.
2428
+ [48] L. Zheng, M. Tang, Y. Chen, G. Zhu, J. Wang, and H. Lu, “Improving
2429
+ multiple object tracking with single object tracking,” in Proceedings of
2430
+ the IEEE/CVF Conference on Computer Vision and Pattern Recognition
2431
+ (CVPR), June 2021, pp. 2453–2462.
2432
+ [49] Y. Xu, J. Zhang, Q. Zhang, and D. Tao, “ViTPose: Simple vision
2433
+ transformer baselines for human pose estimation,” in Advances in
2434
+ Neural Information Processing Systems, A. H. Oh, A. Agarwal,
2435
+ D. Belgrave, and K. Cho, Eds., 2022. [Online]. Available: https:
2436
+ //openreview.net/forum?id=6H2pBoPtm0s
2437
+ [50] M. Wieczorek, B. Rychalska, and J. Dabrowski, “On the unreasonable
2438
+ effectiveness of centroids in image retrieval,” in Neural Information
2439
+ Processing: 28th International Conference, ICONIP 2021, Sanur, Bali,
2440
+ Indonesia, December 8–12, 2021, Proceedings, Part IV.
2441
+ Berlin,
2442
+ Heidelberg: Springer-Verlag, 2021, p. 212–223. [Online]. Available:
2443
+ https://doi.org/10.1007/978-3-030-92273-3 18
2444
+ [51] J. Liu, A. Shahroudy, M. Perez, G. Wang, L.-Y. Duan, and A. C.
2445
+ Kot, “Ntu rgb+ d 120: A large-scale benchmark for 3d human activity
2446
+ understanding,” IEEE transactions on pattern analysis and machine
2447
+ intelligence, vol. 42, no. 10, pp. 2684–2701, 2019.
2448
+ [52] H. Duan, J. Wang, K. Chen, and D. Lin, “Pyskl: Towards good
2449
+ practices for skeleton action recognition,” 2022. [Online]. Available:
2450
+ https://arxiv.org/abs/2205.09443
2451
+ [53] W. Liu, D. L. W. Luo, and S. Gao, “Future frame prediction for anomaly
2452
+ detection – a new baseline,” in 2018 IEEE Conference on Computer
2453
+ Vision and Pattern Recognition (CVPR), 2018.
2454
+ [54] L. Wang, D. Q. Huynh, and P. Koniusz, “A comparative review of
2455
+ recent kinect-based action recognition algorithms,” IEEE Transactions
2456
+ on Image Processing, vol. 29, pp. 15–28, 2020.
2457
+ BIOGRAPHY
2458
+ Armin Danesh Pazho (S’22) is currently a Ph.D.
2459
+ student at the University of North Carolina at Char-
2460
+ lotte, NC, United States. With a focus on Artificial
2461
+ Intelligence, Computer Vision, and Deep Learning,
2462
+ his research delves into the realm of developing AI
2463
+ for practical, real-world applications and addressing
2464
+ the challenges and requirements inherent in these
2465
+ fields. Specifically, his research covers action recog-
2466
+ nition, anomaly detection, person re-identification,
2467
+ human pose estimation, and path prediction.
2468
+ Christopher Neff (S’18) is a National Science
2469
+ Foundation Graduate Research Fellow and Doctoral
2470
+ Candidate at the University of North Carolina at
2471
+ Charlotte. His dissertation focus is on tackling the
2472
+ challenges of bringing human-centric computer vi-
2473
+ sion to real-world applications. His previous work
2474
+ focuses on person re-identification, human pose es-
2475
+ timation, action recognition, real-time system devel-
2476
+ opment, lightweight algorithms, noisy data, domain
2477
+ shift, and real-world applications.
2478
+ Ghazal Alinezhad Noghre (S’22) is currently pur-
2479
+ suing her Ph.D. in Electrical and Computer Engi-
2480
+ neering at the University of North Carolina at Char-
2481
+ lotte, NC, United States. Her research concentrates
2482
+ on Artificial Intelligence, Machine Learning, and
2483
+ Computer Vision. She is particularly interested in the
2484
+ applications of anomaly detection, action recogni-
2485
+ tion, and path prediction in real-world environments,
2486
+ and the challenges associated with these fields.
2487
+ Babak Rahimi Ardabili is a Ph.D. student in the
2488
+ Public Policy Analysis program at the University
2489
+ of North Carolina at Charlotte, United States. His
2490
+ main research area is emerging technologies policy
2491
+ making. He mainly focuses on the intersection of
2492
+ Artificial Intelligence and policy from a privacy
2493
+ perspective and the challenges of bringing the tech-
2494
+ nology to the community.
2495
+ Shanle Yao is an Electrical Engineering Graduate
2496
+ student from the University of North Carolina at
2497
+ Charlotte. His dissertation focus is on optimization
2498
+ and application of Computer Vision pipeline perfor-
2499
+ mance and throughput. His areas of interest include
2500
+ object detection, multiple objects tracking, human
2501
+ pose estimation, semantic segmentation and real-
2502
+ world applications.
2503
+ Mohammedreza Baharani is an ML researcher and
2504
+ edge system deployment engineer at ForesightCares.
2505
+ He received his Ph.D. in computer engineering in
2506
+ 2021 from the University of North Carolina at
2507
+ Charlotte, USA, and was a postdoctoral researcher
2508
+ at the TeCSAR Lab. His research focuses on the
2509
+ intersection of computer architecture engineering
2510
+ and machine learning, with the goal of enabling AI
2511
+ algorithms on edge devices to have a positive impact
2512
+ in fields such as healthcare.
2513
+ Hamed Tabkhi (S’07–M’14) is an Associate Pro-
2514
+ fessor in the Department of Electrical and Com-
2515
+ puter Engineering, University of North Carolina at
2516
+ Charlotte, USA. He was a post-doctoral research
2517
+ associate at Northeastern University. Hamed Tabkhi
2518
+ received his Ph.D. degree in 2014 from Northeast-
2519
+ ern University under the direction of Prof. Gunar
2520
+ Schirner. Overall, his research focuses on transfor-
2521
+ mative computer systems and architecture for cyber-
2522
+ physical, real-time streaming and emerging machine
2523
+ learning applications.
2524
+
2525
+ Award1831795
2526
+ d PI), Shannon Reid, Dougl.
2527
+ er,Robert Phocas, Arun Ravi
2528
+ leta,ChristopherNeff,James
2529
+ &Integrative
2530
+ Communit
2531
+ oroach
2532
+ ervice of Public
2533
+ t"policing
2534
+ edge
2535
+ tiple
2536
+ co
CtE1T4oBgHgl3EQf9wbT/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
CtE2T4oBgHgl3EQf9Ant/content/2301.04225v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6436f2fecf3d8273b18a1d3847dd379f9b9a0ba20427fa671a6c59856119393d
3
+ size 4350499
CtFJT4oBgHgl3EQftS25/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b2e8dfde267b747b8a5abc01e566c3ce304f9db718f557a1e9e321d58a1ff32
3
+ size 6225965
CtFJT4oBgHgl3EQftS25/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a158258d01be20dc24f1ed5f31c30081fb6907374ff2aee56afb9aa4ea8bdcba
3
+ size 218769
E9E0T4oBgHgl3EQfhAEU/content/2301.02424v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88509eea6a370bdf4d50c0fafea16ad8a8e67d52118f092f6cac1906459049b1
3
+ size 7728586
E9E4T4oBgHgl3EQf6w7l/content/2301.05335v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff406717c00ce2e14a85a996c12d49c683ee059fede715d4f7dce5c499aa7da8
3
+ size 2553125
E9E4T4oBgHgl3EQf6w7l/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbe2b4fe30c4094f1b9b4dbf64937b5632e8e3f4128ad3c84e391b1d4939ed47
3
+ size 6684717
E9E4T4oBgHgl3EQf6w7l/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccb274bf33f5299b71bd3fb7a7c330a0ba37996e9a868b428219da2427b99401
3
+ size 225974
FNE3T4oBgHgl3EQfVgrM/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7c7e18b0eed56a3eb8f6db2ac2e569e602b3c70ed686b3c0765999c99f424c0
3
+ size 3080237
FNE3T4oBgHgl3EQfVgrM/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81ae4b7b31c093366c9a391dfe6870c45a7f39dee037db5483d1729a223a91be
3
+ size 115028
J9FRT4oBgHgl3EQf0ThP/content/2301.13652v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4c69869cd4fa69f8472f42d0e4490fbda4ccf70dfaf17d028e0e5b45c7ffadf
3
+ size 323217
J9FRT4oBgHgl3EQf0ThP/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acc4607ca202d25f6edf9710ae90b9cfc07d726a06112da9a34c5f2dda36b881
3
+ size 7077933
J9FRT4oBgHgl3EQf0ThP/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a52f49c5baa5c830efcac96569242d8a877d2aba1277158d19460533b61e0d7
3
+ size 257981
JNFIT4oBgHgl3EQfZCuh/content/tmp_files/2301.11251v1.pdf.txt ADDED
@@ -0,0 +1,907 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This paper has been accepted for publication at 2023 IEEE International Conference on Robotics and
2
+ Automation (ICRA 2023)
3
+ Light-Weight Pointcloud Representation with Sparse Gaussian Process
4
+ Mahmoud Ali and Lantao Liu
5
+ Abstract— This paper presents a framework to represent
6
+ high-fidelity pointcloud sensor observations for efficient com-
7
+ munication and storage. The proposed approach exploits Sparse
8
+ Gaussian Process to encode pointcloud into a compact form.
9
+ Our approach represents both the free space and the occupied
10
+ space using only one model (one 2D Sparse Gaussian Process)
11
+ instead of the existing two-model framework (two 3D Gaussian
12
+ Mixture Models). We achieve this by proposing a variance-
13
+ based sampling technique that effectively discriminates between
14
+ the free and occupied space. The new representation requires
15
+ less memory footprint and can be transmitted across limited-
16
+ bandwidth communication channels. The framework is exten-
17
+ sively evaluated in simulation and it is also demonstrated using
18
+ a real mobile robot equipped with a 3D LiDAR. Our method
19
+ results in a 70∼100 times reduction in the communication rate
20
+ compared to sending the raw pointcloud.
21
+ I. INTRODUCTION
22
+ With the rapid advancement of LiDAR technology, we
23
+ now can build maps with remarkably high resolution. For
24
+ example, each full scan of an only 16-channel 3D LiDAR
25
+ can give us 57600 points in the pointcloud that represents
26
+ the surrounding obstacles. However, a price for using the
27
+ high resolution LiDAR is the computation, storage, and com-
28
+ munication costs when mapping the environments. While
29
+ one might be able to upgrade the computation and storage
30
+ by using a high performance computer system, the com-
31
+ munication usually becomes a bottleneck due to the low
32
+ communication bandwidth available. In practice, the low
33
+ bandwidth communication is considered as a major challenge
34
+ for many robotics applications such as occupancy mapping
35
+ of underwater and subterranean environments (caves, tunnels,
36
+ mines, etc), search-and-rescue missions in disaster scenarios
37
+ with a degraded communication infrastructure, and planetary
38
+ exploration missions [1]. The low bandwidth can prevent
39
+ a robot from real-time sharing its sensor observations, and
40
+ this can significantly degrade the system responsiveness if
41
+ the robot needs to follow or interact with external control
42
+ or supervision platforms. This work tackles the problem
43
+ of sharing high-fidelity 3D pointcloud through a limited
44
+ bandwidth communication channel.
45
+ The system we consider consists of a robot (the scout)
46
+ equipped with a LiDAR and a communication apparatus, and
47
+ deployed in a low-bandwidth environment. The scout sends
48
+ 1Mahmoud Ali and Lantao Liu are with the Luddy School of Informatics,
49
+ Computing, and Engineering, Indiana University, Bloomington, IN 47408
50
+ USA, {alimaa, lantao}@iu.edu
51
+ Occupancy Surface
52
+ VSGP
53
+ variance-based sampling
54
+ SGP
55
+ OctoMap
56
+ wifi
57
+ Scout
58
+ Base
59
+ Fig. 1: System Overview.
60
+ (a)
61
+ (b)
62
+ (c)
63
+ Fig. 2: (a) Gazebo simulated mine tunnel; (b) Original pointcloud
64
+ generated by a VLP16 LiDAR in red, and reconstructed pointcloud
65
+ from the VSGP model in white; (d) Occupancy Map generated by
66
+ OctoMap from the reconstructed pointcloud.
67
+ the observations that it acquires to a base for building the
68
+ occupancy map of the environment, see Fig. 1. Our approach
69
+ exploits the Variational Sparse Gaussian Process (VSGP) [2]
70
+ as a generative model to represent the pointcloud in a
71
+ compact form. This lightweight representation is transmitted
72
+ through low-bandwidth communication to the base where the
73
+ original pointcloud is reconstructed. Extensive evaluations
74
+ reveal that our approach results in a 70∼100 times reduction
75
+ in the memory as well as the communication rate required to
76
+ transmit pointcloud data. For example, Fig. 2a shows a scene
77
+ of a simulated mine tunnel, where its raw pointcloud (shown
78
+ in red, Fig. 2b) requires around 750 KB of memory. Our
79
+ approach is able to represent the same observation using only
80
+ 6 KB of memory and transmit through limited-bandwidth
81
+ communication. On the receiver side of the communication
82
+ channel, the compact representation is used to reconstruct
83
+ the original pointcloud (reconstructed pointcloud shown in
84
+ white, Fig. 2b). An occupancy map of the scene can be built
85
+ using the reconstructed pointcloud, see Fig. 2c.
86
+ II. RELATED WORK
87
+ Pointcloud compression algorithms have been investigated
88
+ in recent years to cope with the demands to store and
89
+ communicate the high-precision 3D points [3]. For example,
90
+ the space partitioning trees approaches that exploit the 3D
91
+ correlation between pointcloud points are widely used to
92
+ arXiv:2301.11251v1 [cs.RO] 26 Jan 2023
93
+
94
+ Velodynecompress the pointcloud data [4]–[9]. Recently, deep learning
95
+ based approaches were also proposed to leverage data and
96
+ learn or encode the pointcloud compression [10]–[12]. Dif-
97
+ ferent from these frameworks, the probabilistic approaches
98
+ exploit the compactness of the distributions to compress 3D
99
+ sensor observation. For instance, Gaussian Mixture Models
100
+ (GMM) [13]–[15] have been proposed as a generative model
101
+ to encode 3D occupancy map. The GMM approach encodes
102
+ the 3D data as a mixture of Gaussian densities to represent
103
+ the occupied and free spaces around the robot.
104
+ Gaussian Process (GP) has been proven to be an excellent
105
+ framework to model spatial phenomena or features in a
106
+ continuous domain [16]–[18]. Unfortunately, the standard
107
+ GP has a cubic time complexity and this results in very
108
+ limited scalability to large datasets. Methods for reducing the
109
+ computing burdens of GPs have been previously investigated.
110
+ For example, GP regressions can be done in a real-time
111
+ fashion where the problem can be estimated locally with
112
+ local data [19]. Sparse GPs (SGPs) [20]–[26] tackle the com-
113
+ putational complexity of the normal GP through leveraging
114
+ the Bayesian rule with a sequential construction of the most
115
+ relevant subset of the data.
116
+ We propose a new probabilistic pointcloud compression
117
+ approach which is based on the VSGP [2] and inspired by
118
+ the GMM approach. While the GMM shares the accumulated
119
+ sensory information as a set of accumulated Gaussian den-
120
+ sities which are sampled and used as an occupancy map of
121
+ the environment, in contrast, the proposed approach relies on
122
+ sharing of immediate sensor observation to be reconstructed
123
+ on the other side of the communication channel for further
124
+ processing based on the required task (e.g. 3D mapping,
125
+ object recognition, tracking, etc).
126
+ This proposed VSGP-based approach offers a few ad-
127
+ vantages over the recent GMM approach: while the GMM
128
+ approach uses two 3D GMMs to fit the occupied and free
129
+ points [13]–[15], our approach uses only one 2D VSGP to
130
+ fit all the occupancy surface, including both the occupied
131
+ and free points. The primary reason that our approach uses
132
+ one VSGP instead of two is that we are using the variance
133
+ calculated by the VSGP at each sampled point during the
134
+ reconstruction process to decide if it belongs to the occupied
135
+ or the free space. Therefore, the proposed approach results
136
+ in a more compact representation of the sensor observation,
137
+ which requires less memory than the GMM approach and,
138
+ as a consequence, leads to a lower communication rate.
139
+ III. BACKGROUND
140
+ GP is a non-parametric model described by a mean
141
+ function m(x), and a co-variance function (kernel) k(x,x′),
142
+ where x is the GP input [27]:
143
+ f(x) ∼ GP
144
+
145
+ m(x),k
146
+
147
+ x,x′��
148
+ .
149
+ (1)
150
+ Considering a data set D = {(xi,yi)}N
151
+ i=1 with N training
152
+ inputs x and their corresponding scalar outputs (observations)
153
+ y. After training the GP using the data set D, the output y∗ for
154
+ any new query x∗ can be estimated using the GP prediction:
155
+ p(y∗|y) = N(y∗|my(x∗),ky(x∗,x∗)+σ2),
156
+ (2)
157
+ where my(x) and ky(x,x′) are the posterior mean and co-
158
+ variance functions [2]. The GP prediction equation depends
159
+ on the values of the hyperparameters (Θ,σ2) where Θ is the
160
+ kernel parameters and σ2 is the noise variance.
161
+ The computation complexity of a full GP is O(N3).
162
+ In order to reduce the computation complexity, different
163
+ approximation methods were proposed in the literature by
164
+ considering only M input points to represent the entire
165
+ training data [27]. These input points are called the inducing
166
+ points Xm and their corresponding values of the underlying
167
+ function f(x) are called the inducing variables fm. Replacing
168
+ the entire data set with only the M-inducing inputs leads to
169
+ the SGP which has a computational complexity of O(NM2).
170
+ Titsias [2] proposed a variational learning framework to
171
+ jointly estimate the kernel hyperparameters and the inducing
172
+ points. Titsias’ framework approximates the true exact poste-
173
+ rior of a GP p( f|y,Θ) by a variational posterior distribution
174
+ q( f, fm),
175
+ q(f, fm) = p(f|fm)φ( fm),
176
+ (3)
177
+ where φ( fm) is the free variational Gaussian distribution. The
178
+ Kullback-Leibler (KL) divergence is used to describe the dis-
179
+ crepancy between the approximated and the true posteriors.
180
+ Minimizing the KL divergence between the approximated
181
+ and the true posteriors KL[q( f, fm)||p( f|y,Θ)] is equivalent
182
+ to maximizing the variational lower bound of the true log
183
+ marginal likelihood:
184
+ FV (Xm) = log
185
+
186
+ N
187
+
188
+ y | 0,σ2I +Qnn
189
+ ��
190
+
191
+ 1
192
+ 2σ2 Tr( �K),
193
+ Qnn = KnmK−1
194
+ mmKmn,
195
+ �K = Cov(f | fm) = Knn −KnmK−1
196
+ mmKmn,
197
+ (4)
198
+ where FV (Xm) is the variational objective function, Tr( �K) is a
199
+ regularization trace term, Knn is the original n×n co-variance
200
+ matrix, Kmm is m × m co-variance matrix on the inducing
201
+ inputs, Knm is n×m cross-covariance matrix between training
202
+ and inducing points, and Knm = KT
203
+ mn. More details on VSGP
204
+ can be found in Titsias’s work [2].
205
+ IV. METHODOLOGY
206
+ The proposed approach exploits the VSGP as a generative
207
+ model to encode 3D pointcloud. The VSGP is selected
208
+ among different approximation approaches of GP due to
209
+ the following reasons: i) The variational approximation dis-
210
+ tinguishes between the inducing points M (as a variational
211
+ parameter) and the kernel hyperparameters (Θ,σ). ii) The
212
+ regularization term Tr( �K) in the variational objective func-
213
+ tion (Eq. (4)) regularizes the hyperparameters to avoid over-
214
+ fitting of the data. iii) The variational approximation offers
215
+ a discrete optimization scheme for selecting the inducing
216
+ inputs Xm from the original data1.
217
+ A. VSGP as a generative model for the occupancy surface
218
+ Inspired by [13], we project the occupied points ob-
219
+ served by a ranging sensor, e.g., LiDAR, onto a circular
220
+ surface around the sensor origin with a predefined radius
221
+ 1For more information about the inducing point selection, check [2]
222
+
223
+ roc. This surface is called occupancy surface, see Fig. 3.
224
+ In our approach, the sensor observation is defined in the
225
+ spherical coordinate system, where any observed point xi
226
+ is described by the tuple (θi,αi,ri) which represents the
227
+ azimuth, elevation, and radius values, respectively. Also,
228
+ any pointcloud data can be converted from the cartesian
229
+ coordinates (xi,yi,zi) to the spherical coordinates (θi,αi,ri)
230
+ using the following equations:
231
+ ri =
232
+
233
+ x2
234
+ i +y2
235
+ i +z2
236
+ i ,
237
+ θi = tan−1(yi,xi),
238
+ αi = cos−1(zi/ri).
239
+ (5)
240
+ All observed points that lie outside the circular occupancy
241
+ surface (with a radius ri > roc) or on the surface (with a
242
+ radius ri = roc) are neglected and considered as free space.
243
+ The rest of the points that are inside the circular surface (with
244
+ a radius ri < roc) are projected on the occupancy surface and
245
+ called the occupied points. Therefore, the occupancy surface
246
+ radius roc acts as the maximum range of the sensor. Each
247
+ occupied point xi on the surface is defined by two attributes:
248
+ the azimuth and elevation angles xi = (θi,αi), and assigned
249
+ an occupancy value f(xi) that is a function of the point radius
250
+ ri. The probability of occupancy f(xi) at each point on the
251
+ occupancy surface is modeled by a VSGP:
252
+ f(x) ∼ VSGP
253
+
254
+ m(x),k
255
+
256
+ x,x′��
257
+ .
258
+ (6)
259
+ Considering noisy measurements, we add a white noise ε to
260
+ the occupancy function f(x), so the observed occupancy is
261
+ described as yi = f(xi)+ε where ε follows a Gaussian dis-
262
+ tribution N
263
+
264
+ 0,σ2
265
+ n
266
+
267
+ . The final model of the occupancy surface
268
+ is a 2D VSGP where the input is the azimuth and elevation
269
+ angles, x ∈ {(θ,α)}n
270
+ i=1, and the corresponding output is the
271
+ expected occupancy yi. The three main components of the
272
+ final VSGP are:
273
+ 1) Zero-Mean Function m(x): There are different for-
274
+ mulas to describe the relationship between the occupancy
275
+ of a point f(xi) on the occupancy surface and its radius
276
+ ri [13]. For example, one candidate is f(xi) = 1/ri where ri
277
+ is bounded by the minimum and the maximum range of the
278
+ sensor rmin < ri < rmax = roc, where rmin > 0. Our approach
279
+ relates the occupancy of a point f(xi) to its radius ri by the
280
+ following equation f(xi) = roc − ri. This mapping between
281
+ the occupancy and the radius of a point is compatible with
282
+ the previous assumption that the occupancy surface radius
283
+ roc represents the maximum range of the sensor. Moreover,
284
+ this mapping is encoded in our VSGP model as a zero-mean
285
+ function m(x) = 0 that sets the occupancy value of the non-
286
+ observed points to zero. This mapping behavior mimics the
287
+ mechanism of the LiDAR itself.
288
+ 2) Rational Quadratic (RQ) Kernel: The RQ kernel is
289
+ selected because a GP prior with an RQ kernel is expected
290
+ to have functions that vary across different length scales.
291
+ This quality of the RQ kernel copes with the nature of the
292
+ occupancy surface, specifically in unstructured environments
293
+ where a range of diverse length scales is required, i.e.,
294
+ kRQ
295
+
296
+ x,x′�
297
+ = σ2
298
+
299
+ 1+ (x−x′)2
300
+ 2αℓ2
301
+ �−α
302
+ ,
303
+ (7)
304
+ (a)
305
+ (b)
306
+ (c)
307
+ Fig. 3: (a) Gazebo scene of a robot in a tunnel (black); (b)
308
+ The occupancy surface generated from the original pointcloud,
309
+ where warmer colors reflect smaller f(xi) values (less occupancy);
310
+ (c) The inner surface represents the original occupancy surface
311
+ (same as in b), and the middle surface represents the reconstructed
312
+ occupancy surface using the VSGP model. The outer grey-coded
313
+ surface represents the variance associated with each point on the
314
+ reconstructed occupancy surface where brighter colors reflect high
315
+ uncertainty. Raw pointcloud is shown in red in (b) and (c).
316
+ where σ2
317
+ f is the signal variance, l is the length-scale, and α
318
+ sets the relative weighting of large and small scale variations.
319
+ The RQ co-variance function is more expressive in terms of
320
+ modeling the occupancy surface than the most commonly
321
+ used Squared Exponential (SE) co-variance function. This
322
+ can be reasoned by the fact that the RQ kernel (when α
323
+ and l > 0) is equivalent to a scale mixture of SE kernels
324
+ with mixed characteristic length-scales [27]. In practice, we
325
+ take into account the resolution of LiDAR along both the
326
+ azimuth and elevation axes to initiate different length-scales
327
+ along each axis to reflect the LiDAR resolution.
328
+ 3) Inducing Points Selection: The variational learning
329
+ framework proposed in [2] jointly optimizes the variational
330
+ parameters (inducing points) and the hyperparameters (Θ,σ)
331
+ through a variational Expectation-Maximization (EM) algo-
332
+ rithm. In general, the original discrete optimization frame-
333
+ work [2] suggests having an incremental set of the inducing
334
+ points, so that during the Expectation step (E-step) a point
335
+ from the input data is added to the inducing points set to
336
+ maximize the variational objective function FV and minimize
337
+ the KL divergence between the true and approximated pos-
338
+ teriors KL[q( f)||p( f|y,Θ)]. Then the hyperparameters are
339
+ updated during the Maximization step (M-step).
340
+ Since LiDAR’s field of view is limited within a certain
341
+ range, the projection of the observed points on the circular
342
+ surface leads to a limited input domain for the VSGP. In
343
+ our case, the azimuth and the elevation axes are limited
344
+ to (−π to π) and (−15◦ to 15◦), respectively. The limited
345
+ input domain is used to initiate a fixed number of inducing
346
+ points at evenly distributed locations on the occupied part of
347
+ the occupancy surface. In this way, a different combination
348
+ of the points is selected at each E-step to maximize the
349
+ variational objective function FV and minimize the KL
350
+ divergence. Then the hyperparameters are updated during the
351
+ M-step. The number of the inducing points M is chosen to
352
+ compromise the computational and memory complexity on
353
+ one side and the accuracy of the reconstructed pointcloud
354
+ on the other side. More inducing points result in higher
355
+ computations complexity O(NM2), larger memory to store
356
+ the encoded observation, and higher bandwidth to transfer it.
357
+ However, more inducing points increase the accuracy of the
358
+
359
+ reconstructed pointcloud. We chose M=500 inducing points
360
+ to keep the average deviation between the reconstructed
361
+ pointcloud and the original pointcloud under 15 cm, see
362
+ Section V-A.2 and Fig. 5. After the training phase on the
363
+ scout side is completed, the selected inducing points are
364
+ combined together with the hyperparameters values of the
365
+ VSGP and are transmitted from the scout to the base.
366
+ B. Variance-based sampling
367
+ On the base side, the inducing points and the values of
368
+ the hyperparameters, which are received from the scout,
369
+ are used to reconstruct the original occupancy surface. The
370
+ reconstruction is done through a GP configured with the
371
+ same kernel (RQ) and likelihood (Gaussian) as the VSGP
372
+ on the scout side. The base GP is trained on the inducing
373
+ points and has a computation complexity of O(M3) where
374
+ M is the number of the inducing points, so we refer it as
375
+ a sparse GP (SGP) and refer the reconstructed occupancy
376
+ surface as the SGP occupancy surface. A grid of query points
377
+ x∗ = {(θ,α)}K
378
+ i=1 with the same resolution of the LiDAR
379
+ along the azimuth and the elevation axes is generated to
380
+ reconstruct the original pointcloud from the SGP occupancy
381
+ surface – we refer the reconstructed pointcloud as the SGP
382
+ pointcloud. If up-sampling of the pointcloud is required for
383
+ any reason, a query grid with higher resolution can be used
384
+ for the reconstruction process. The SGP occupancy surface
385
+ is used to predict the occupancy f(xi) of each point xi of
386
+ the query grid x∗. The occupancy is converted back to the
387
+ spherical radius ri = roc − f(xi) to restore the 3D spherical
388
+ coordinates of each point.
389
+ One advantage of the GP and its variants over other
390
+ modeling techniques is the uncertainty (variance) associated
391
+ with the predicted value at any query point. Considering
392
+ the VSGP model of the occupancy surface on the scout
393
+ side, the variance associated with the occupied points is low
394
+ compared to the variance related to the free points. Selecting
395
+ the inducing points as a set from the original occupied
396
+ points maintains low-variance values for the occupied part of
397
+ the reconstructed SGP occupancy surface on the base side.
398
+ Therefore, the variance value associated with any point on
399
+ the reconstructed SGP occupancy surface is used to predict
400
+ if that point belongs to the occupied or the free part of the
401
+ occupancy surface, see Fig. 4. We use a variance threshold
402
+ Vth as a judging criterion. In fact, the variance related to
403
+ the occupancy surface is different from one observation to
404
+ another, and it is affected by both the number of observed
405
+ (occupied) points and their distribution over the occupancy
406
+ surface. Therefore, we chose the variance threshold Vth as
407
+ a variable that changes with the distribution of the variance
408
+ over the occupied and free parts of the occupancy surface.
409
+ Vth is defined as a linear combination of the variance mean
410
+ vm and standard deviation vstd over the surface, i.e., Vth =
411
+ Km ∗vm +Kstd ∗vstd where Km and Kstd are constants. These
412
+ two constants are tuned by first setting Vth = vm (Km = 1 ,
413
+ Kstd = 0), then we increase Kstd and decrease Km gradually
414
+ till we get the values that give the highest accuracy for the
415
+ reconstructed SGP pointcloud (considering a fixed number of
416
+ (a)
417
+ (b)
418
+ (c)
419
+ Fig. 4: Variance-based sampling. (a) Gazebo scene shows the
420
+ entrance of the tunnel; (b) shows the original (inner), reconstructed
421
+ (middle), and variance (outer) surfaces. It also shows the re-
422
+ constructed pointcloud (in white) through reconstructing from all
423
+ points (free and occupied) of the occupancy surface. (c) shows
424
+ reconstructed SGP pointcloud after removing all points that most
425
+ likely belong to the free part of the occupancy surface. Raw
426
+ pointcloud is shown in red in (b) and (c).
427
+ inducing points). Our sampling-based approach is capable
428
+ of discriminating between the free points that most likely
429
+ belong to the free part of the SGP occupancy surface and
430
+ the occupied points that belong to the the occupied part of
431
+ the SGP occupancy surface. After removing the free part
432
+ of the SGP occupancy surface, the Cartesian coordinates of
433
+ the occupied points are calculated using the inverse form of
434
+ Eq. (5) to restore the original point cloud, see Fig. 4c.
435
+ V. EXPERIMENTAL DESIGN AND RESULTS
436
+ The proposed approach is implemented in Python3 on
437
+ top of GPflow-v2 [28] and TensorFlow-v2.4 [29] under
438
+ ROS framework [30]. Both real-time simulation and real-
439
+ time demonstration were considered to evaluate the proposed
440
+ approach. In both the simulation and the hardware experi-
441
+ ments, a VLP-16 LiDAR was used with a maximum range
442
+ of 10m, a frequency of 4Hz, and a resolution of (0.1◦,2◦)
443
+ along the azimuth and the elevation axis, respectively. This
444
+ configuration results in a maximum pointcloud size of 57600
445
+ points. The query grid, which is used to sample the SGP
446
+ occupancy surface on the base side, has the same resolution
447
+ as the VLP-16 LiDAR. A 3D occupancy grid map with a
448
+ resolution of 5cm is generated from the reconstructed SGP
449
+ pointcloud through Octomap [31].
450
+ We investigate the performance of our framework and
451
+ compare it with the GMM approach [13]–[15]. While the
452
+ GMM approach tackles the occupancy mapping problem as
453
+ a whole, our approach focuses on compressing sensor obser-
454
+ vations through limited-bandwidth communication channels.
455
+ To be able to compare the two approaches, we implemented
456
+ the GMM approach in such a way that it is used to encode
457
+ one sensor observation at a time instead of generating an
458
+ entire occupancy map. We compared our approach with two
459
+ versions of the GMM approach: i) A CPU-based implemen-
460
+ tation of GMM that follows the same guidelines of [13].
461
+ ii) An upgraded GPU-based implementation of GMM. We
462
+ implemented the GPU-GMM to have a fair computation
463
+ comparison with our VSGP approach which runs on GPU.
464
+ A. Simulation Experiments
465
+ 1) Simulation Setup:: The simulation setup consists of
466
+ two machines that communicate to each other over WiFi:
467
+
468
+ The first machine, where the scout and the environment are
469
+ simulated, is an Intel® Core™ i7 NUC11 PC equipped with
470
+ 64 GB RAM and 6 GB Geforce RTX2060 GPU. The second
471
+ machine, which acts as the base, is an Intel® Core™ i7
472
+ Alienware Laptop equipped with 32 GB RAM and 8 GB
473
+ Geforce RTX2080 GPU. Both are connected using a 2.4 GHz
474
+ WiFi router. The network flow is monitored using the ifstat
475
+ tool to evaluate the communication performance. The mine
476
+ tunnel of the cpr inspection world, which is developed by
477
+ ClearPath robotics, is used as our simulation environment.
478
+ This environment is selected because it represents one of
479
+ the targeted low-bandwidth subterranean environments. The
480
+ mine tunnel part of the cpr inspection world fits in a rectan-
481
+ gular area with an approximated area of 30×65m2, the tunnel
482
+ length is around 135m. The ground elevation and the height
483
+ of the tunnel are different from one place to another. The
484
+ ClearPath Jackal robot is used as the scout. The proposed
485
+ approach was evaluated through 20 real-time simulation
486
+ trials. In each trial, the robot starts at the beginning of the
487
+ cave and follows a predefined path along the mine using
488
+ way-point based navigation.
489
+ 2) Simulation Results: We evaluate the performance of
490
+ our approach based on the reduction in the memory and the
491
+ communication rate required to transmit the sensor observa-
492
+ tions between the scout and the base. The VSGP representa-
493
+ tion requires only 1514 floating points (FP) to represent the
494
+ entire pointcloud (3 FP for each inducing point (3x500) + 6
495
+ FP for robot pose + 6 FP for the hyperparameters). This value
496
+ is less than the memory needed by the GMM approach which
497
+ requires ∼ 2000 FP (10 FP for each component (10x200)
498
+ distributed as 6 FP for covariance + 3 FP for mean + 1
499
+ FP for weight) [13]. We send the robot pose to the base
500
+ because our approach encodes the observation relative to the
501
+ robot body frame, while the GMM approach first transforms
502
+ the observation from the robot body frame to a global frame
503
+ using the robot current pose and then sends the encoded
504
+ Gaussians densties with respect to the global frame.
505
+ To quantify the accuracy of the reconstructed SGP point-
506
+ cloud, we use the Root Mean Square Deviation (RMSD)
507
+ between the radius predicted by our approach and the actual
508
+ radius of each point on the occupancy surface.
509
+ RMSD =
510
+
511
+ ∑N
512
+ i=1 (ri − ˆri)2
513
+ N
514
+ ,
515
+ (8)
516
+ where N is the size of the pointcloud, ri is the actual radius at
517
+ (θi,αi), and ˆri is the estimated radius value at the same point
518
+ (θi,αi). Fig. 5a shows the mean and the standard deviation
519
+ of the RMSD for each predicted point over 110 observations
520
+ (each observation has around 10K to 50K points). Also,
521
+ Fig. 5a implicitly reflects the memory required by VSGP and
522
+ GMM to store one observation, as described before that the
523
+ memory required to store one observation can be calculated
524
+ by multiplying the number of inducing points (bottom x-axis)
525
+ by 3 and multiplying the number of components (top x-axis)
526
+ by 10. We match pairs of the VSGP and GMM models (in
527
+ terms of the number of inducing points and components)
528
+ based on the memory requirement and the accuracy of the
529
+ (a)
530
+ (b)
531
+ (c)
532
+ (d)
533
+ Fig. 5: Performance comparisons. (a) shows the RMSD between
534
+ the reconstructed and the original pointcloud for VSGP(vs #induc-
535
+ ing points) and GMM(vs #components); (b) illustrates the training
536
+ time against the pointcloud size (considering 500-inducing points
537
+ VSGP, and equivalently, 200-components GMM); (c) represents
538
+ the training time versus the #VSGP-inducing points and #GMM-
539
+ components; (d) shows the prediction time versus the #VSGP-
540
+ inducing points and #GMM-components.
541
+ reconstructed pointcloud (reflected by the RMSD) for each
542
+ pair, see table I. For example, 500-inducing points VSGP
543
+ results in an average RMSD value for each point of 9 cm
544
+ with a standard deviation of 10 cm. This corresponds to an
545
+ average RMSD of 11 cm with a standard deviation of 25 cm
546
+ for a 200-components GMM.
547
+ TABLE I: VSGP vs GMM(ind: inducing, cps: components)
548
+ VSGP
549
+ GMM
550
+ #
551
+ Memory
552
+ RMSD
553
+ #
554
+ Memory
555
+ RMSD
556
+ ind
557
+ ∼FPs
558
+ ∼cm
559
+ cps
560
+ ∼FPs
561
+ ∼cm
562
+ 200
563
+ 600
564
+ 20±22
565
+ 50
566
+ 500
567
+ 27±50
568
+ 300
569
+ 900
570
+ 14±15
571
+ 100
572
+ 1000
573
+ 16±35
574
+ 400
575
+ 1200
576
+ 12±14
577
+ 150
578
+ 1500
579
+ 13±31
580
+ 500
581
+ 1500
582
+ 9±10
583
+ 200
584
+ 2000
585
+ 11±29
586
+ 600
587
+ 1800
588
+ 9±10
589
+ 250
590
+ 2500
591
+ 11±30
592
+ Now we analyze the results in Fig. 5. Fig. 5a shows the
593
+ RMSD values associated with VSGP have a smaller standard
594
+ deviation than the GMM’s. It also shows that increasing the
595
+ number of the VSGP-inducing points (bottom x-axis) or the
596
+ number of the GMM-components (top x-axis) will result in
597
+ smaller RMSD (higher accuracy).
598
+ An intensive evaluation of the training and the prediction
599
+ phases is presented in Figs. 5b-5d. The reduction in the
600
+ training time versus the reduction in the size of the raw
601
+ pointcloud is presented in Fig. 5b, where 0% removal percent
602
+ means a pointcloud size of 57.6K points. Fig. 5c shows
603
+ the increase in training time versus the number of inducing
604
+ points and the number of components. We compare the
605
+ training time of the VSGP, the GMM-CPU (considering the
606
+
607
+ #Components
608
+ 50
609
+ 100
610
+ 150
611
+ 200
612
+ 250
613
+ 0.75 -
614
+ GMM
615
+ 0.50
616
+ VSGP
617
+ RMSI
618
+ 0.25
619
+ 0.00
620
+ 0.25
621
+ 200
622
+ 300
623
+ 00f
624
+ 500
625
+ 600
626
+ #Inducing Points103
627
+ Su
628
+ VSGP
629
+ GNIM
630
+ GNIM-GPU
631
+ 102
632
+ 3350606775
633
+ Removal Components
634
+ 50
635
+ 100
636
+ 150
637
+ 200
638
+ 250
639
+ 103
640
+ GMM
641
+ GMM-GPU
642
+ VSGP
643
+ 102
644
+ 200
645
+ 400
646
+ 600
647
+ Inducing PointsComponents
648
+ 50
649
+ 100
650
+ 150
651
+ 200
652
+ 250
653
+ VSGP
654
+ [stu]
655
+ GMM
656
+ 40
657
+ Prediction
658
+ 20
659
+ 200
660
+ 400
661
+ 600
662
+ Inducing Points(a)
663
+ (b)
664
+ (c)
665
+ (d)
666
+ Fig. 6: (a) shows the simulated mine environment in Gazebo;
667
+ (b) shows the Octomap of the mine generated from the original
668
+ pointcloud; (c) shows the Octomap generated from the recon-
669
+ structed SGP pointcloud; (d) shows the communication rate and the
670
+ accumulated data sent from the scout to the base in case of sending
671
+ raw pointcloud PCL(1750KB/S, 840MB), GMM data(25.8KB/S,
672
+ 12.4MB), and VSGP data(18.2KB/S, 8.7MB). The y-axis is plotted
673
+ in log-scale.
674
+ default configuration of the GMM approach used in [13]),
675
+ and the GMM-GPU implementation. The results show that
676
+ our approach outperforms both the CPU and GPU imple-
677
+ mentation of the GMM approach in terms of training time.
678
+ Fig. 5d presents the variation of the prediction time of the
679
+ VSGP versus the number of the inducing points, where the
680
+ values shown in the figure represent the time required to
681
+ predict the occupancy value associated with all the points of
682
+ the grid query x (57600 points).
683
+ Fig. 5d indicates that for a matching pair of GMM and
684
+ VSGP (Table I), GMM has a less sampling time than
685
+ the paired VSGP. However, the pointcloud reconstruction
686
+ process of the VSGP is more convenient than the GMM
687
+ approach because a fundamental difference between sam-
688
+ pling the VSGP and the GMM is that: when we sample
689
+ from a GMM, we get a sample (from a distribution) with
690
+ random values (θs,αs,rs), so we do not have control over
691
+ the location of the sample on the occupancy surface (θs,αs).
692
+ In contrast, for the VSGP approach, we predict the radius
693
+ value rs for a certain point on the occupancy surface defined
694
+ by (θs,αs). So, we have control over the point location on
695
+ the occupancy surface. While constructing the 3D octomap
696
+ of the tunnel environment using the scout-base scheme, the
697
+ average communication rate was 1750 KB/S, 25.8 KB/S,
698
+ and 18.2 KB/S for sending raw point clouds, GMM encoded
699
+ data, and VSGP encoded data respectively, see Fig. 6d. The
700
+ accumulated data sent through the network is reduced from
701
+ 840 MB for sending raw pointcloud to 12.4 MB in case
702
+ of GMM and 8.7 MB in case of VSGP. This indicates a
703
+ (a)
704
+ (b)
705
+ (c)
706
+ Fig. 7: Indoor demonstration. (a) shows octomap of the laboratory
707
+ building generated from the original pointcloud. (b) shows octomap
708
+ generated from the reconstructed SGP pointcloud. (c) shows the
709
+ reduction in the communication rate and the accumulated data sent
710
+ from the scout to the base, where log-scale is used for y-axis. PCL
711
+ represents the raw pointcloud.
712
+ compression ratio of ∼ 96 (840/8.7 ∼ 1750/18.2).
713
+ B. Hardware Experiment
714
+ A Jackal mobile robot, equipped with a VLP-16 LiDAR
715
+ and NUC11 PC, was used as the scout, while the Alien-
716
+ ware laptop was used as the base. The demonstration was
717
+ conducted in an indoor environment, where the VSGP-
718
+ encoded pointcloud data was sent from the scout to the
719
+ base to generate a 3D Octomap [31] of the building from
720
+ the SGP reconstructed pointcloud in real-time, see Fig. 7.
721
+ Fig. 7c shows the reduction in the communication rate for
722
+ the hardware experiment. The communication rate dropped
723
+ from around 560 KB for transmitting raw pointcloud to
724
+ around 8 KB for transmitting the encoded VSGP (this ratio
725
+ is equivalent to 70 times smaller rate). The communication
726
+ rate of the hardware experiment is low compared to the
727
+ simulation experiment because the LiDAR resolution was
728
+ halved during the hardware experiment. The total amount
729
+ of data transmitted at the end of each trial was around 100
730
+ MB for sending raw pointcloud and only around 1.4 MB for
731
+ sending the VSGP encoded observation.
732
+ VI. CONCLUSION
733
+ In this paper, we introduce a lightweight representation
734
+ for the 3D pointcloud using the VSGP. This representation
735
+ allows high-fidelity observations to be efficiently stored
736
+ and transmitted through limited-bandwidth communication
737
+ channels. Based on the results of the simulation and hardware
738
+ experiments, our approach results in around 70-100 times
739
+ smaller size representation of the sensor observation. This
740
+ compact representation can facilitate many of the robotics
741
+
742
+ KB
743
+ PCI
744
+ GMM
745
+ Rate
746
+ 102
747
+ VSGP
748
+ MB
749
+ PCL
750
+ 101
751
+ Data
752
+ GMM
753
+ VSGP
754
+ 0
755
+ O0T
756
+ 200
757
+ 300
758
+ 400
759
+ 500
760
+ Time [S]S/
761
+ POL
762
+ 101
763
+ VSGP
764
+ Rate
765
+ KB
766
+ PCL
767
+ Data
768
+ 102
769
+ VSGP
770
+ 0
771
+ 25
772
+ 50
773
+ 75
774
+ 100
775
+ 125
776
+ 150
777
+ 175
778
+ Time [S]applications which are limited by the communication band-
779
+ width such as subterranean and underwater exploration,
780
+ search and rescue missions, and planetary exploration. In
781
+ addition, our approach can also be beneficial in the context
782
+ of multi-robot collaboration where a number of robots are
783
+ required to share high-volume information (3D pointcloud)
784
+ through low-bandwidth channels.
785
+ ACKNOWLEDGEMENT
786
+ This work was supported by National Science Foundation
787
+ with grant numbers 2006886 and 2047169.
788
+ REFERENCES
789
+ [1] V. H. Cid et al. Keeping communications flowing during large-scale
790
+ disasters: leveraging amateur radio innovations for disaster medicine.
791
+ Disaster medicine and public health preparedness, 12(2):257–264,
792
+ 2018.
793
+ [2] Michalis Titsias. Variational learning of inducing variables in sparse
794
+ gaussian processes. In Artificial intelligence and statistics, pages 567–
795
+ 574. PMLR, 2009.
796
+ [3] Chao Cao, Marius Preda, and Titus Zaharia.
797
+ 3d point cloud com-
798
+ pression: A survey. In The 24th International Conference on 3D Web
799
+ Technology, pages 1–9, 2019.
800
+ [4] Yu Feng, Shaoshan Liu, and Yuhao Zhu. Real-time spatio-temporal
801
+ lidar point cloud compression.
802
+ In 2020 IEEE/RSJ international
803
+ conference on intelligent robots and systems (IROS), pages 10766–
804
+ 10773. IEEE, 2020.
805
+ [5] Tim Golla and Reinhard Klein. Real-time point cloud compression.
806
+ In 2015 IEEE/RSJ International Conference on Intelligent Robots and
807
+ Systems (IROS), pages 5087–5092. IEEE, 2015.
808
+ [6] S´ebastien Lasserre, David Flynn, and Shouxing Qu. Using neighbour-
809
+ ing nodes for the compression of octrees representing the geometry
810
+ of point clouds. In Proceedings of the 10th ACM Multimedia Systems
811
+ Conference, pages 145–153, 2019.
812
+ [7] Dorina Thanou, Philip A Chou, and Pascal Frossard.
813
+ Graph-based
814
+ compression of dynamic 3d point cloud sequences. IEEE Transactions
815
+ on Image Processing, 25(4):1765–1778, 2016.
816
+ [8] Yan Huang, Jingliang Peng, C-C Jay Kuo, and M Gopi. Octree-based
817
+ progressive geometry coding of point clouds. In PBG@ SIGGRAPH,
818
+ pages 103–110, 2006.
819
+ [9] Lila Huang, Shenlong Wang, Kelvin Wong, Jerry Liu, and Raquel
820
+ Urtasun. Octsqueeze: Octree-structured entropy model for lidar com-
821
+ pression. In Proceedings of the IEEE/CVF conference on computer
822
+ vision and pattern recognition, pages 1313–1323, 2020.
823
+ [10] Maurice Quach, Jiahao Pang, Dong Tian, Giuseppe Valenzise, and
824
+ Fr´ed´eric Dufaux. Survey on deep learning-based point cloud com-
825
+ pression. Frontiers in Signal Processing, 2022.
826
+ [11] Louis Wiesmann, Andres Milioto, Xieyuanli Chen, Cyrill Stachniss,
827
+ and Jens Behley. Deep compression for dense point cloud maps. IEEE
828
+ Robotics and Automation Letters, 6(2):2060–2067, 2021.
829
+ [12] Wei Yan, Shan Liu, Thomas H Li, Zhu Li, Ge Li, et al.
830
+ Deep
831
+ autoencoder-based lossy geometry compression for point clouds. arXiv
832
+ preprint arXiv:1905.03691, 2019.
833
+ [13] W. Tabib et al.
834
+ Real-time information-theoretic exploration with
835
+ gaussian mixture model maps.
836
+ In Robotics: Science and Systems,
837
+ 2019.
838
+ [14] C. O’Meadhra et al. Variable resolution occupancy mapping using
839
+ gaussian mixture models.
840
+ IEEE Robotics and Automation Letters,
841
+ 4(2):2015–2022, 2018.
842
+ [15] Task-Specific Manipulator Design. Communication-efficient planning
843
+ and mapping for multi-robot exploration in large environments. Jour-
844
+ nal Article, 15(2):e1971, 2019.
845
+ [16] Carl Edward Rasmussen and Christopher K. I. Williams. Gaussian
846
+ Processes for Machine Learning. The MIT Press, 2005.
847
+ [17] A. Singh, A. Krause, C. Guestrin, W. Kaiser, and M. Batalin. Efficient
848
+ planning of informative paths for multiple robots.
849
+ In the 20th
850
+ International Joint Conference on Artifical Intelligence, IJCAI’07,
851
+ pages 2204–2211, 2007.
852
+ [18] Ruofei Ouyang, Kian Hsiang Low, Jie Chen, and Patrick Jaillet.
853
+ Multi-robot active sensing of non-stationary gaussian process-based
854
+ environmental phenomena. In Proceedings of the 2014 International
855
+ Conference on Autonomous Agents and Multi-agent Systems, pages
856
+ 573–580, 2014.
857
+ [19] Duy Nguyen-tuong and Jan Peters. Local gaussian process regression
858
+ for real time online model learning and control. In In Advances in
859
+ Neural Information Processing Systems 22 (NIPS, 2008.
860
+ [20] Lehel Csat´o and Manfred Opper. Sparse on-line gaussian processes.
861
+ Neural computation, 14(3):641–668, 2002.
862
+ [21] A. J. Smola and P. L. Bartlett.
863
+ Sparse greedy gaussian process
864
+ regression.
865
+ In Advances in neural information processing systems,
866
+ pages 619–625, 2001.
867
+ [22] Ch. Williams and M. Seeger.
868
+ Using the nystr¨om method to speed
869
+ up kernel machines. In Proceedings of the 14th annual conference
870
+ on neural information processing systems, number CONF, pages 682–
871
+ 688, 2001.
872
+ [23] N. Lawrence, M. Seeger, and R. Herbrich. Fast sparse gaussian process
873
+ methods: The informative vector machine. In 16th annual conference
874
+ on neural information processing systems, number CONF, pages 609–
875
+ 616, 2003.
876
+ [24] E. Snelson and Z. Ghahramani.
877
+ Sparse gaussian processes using
878
+ pseudo-inputs. Advances in neural information processing systems,
879
+ 18:1257, 2006.
880
+ [25] Matthias Seeger.
881
+ Bayesian gaussian process models: Pac-bayesian
882
+ generalisation error bounds and sparse approximations.
883
+ Technical
884
+ report, University of Edinburgh, 2003.
885
+ [26] Rishit Sheth, Yuyang Wang, and Roni Khardon. Sparse variational
886
+ inference for generalized gp models. In International Conference on
887
+ Machine Learning, pages 1302–1311. PMLR, 2015.
888
+ [27] Christopher K Williams and Carl Edward Rasmussen.
889
+ Gaussian
890
+ processes for machine learning, volume 2.
891
+ MIT press Cambridge,
892
+ MA, 2006.
893
+ [28] A. Matthews et al. Gpflow: A gaussian process library using tensor-
894
+ flow. J. Mach. Learn. Res., 18(40):1–6, 2017.
895
+ [29] M. Abadi et al.
896
+ Tensorflow: A system for large-scale machine
897
+ learning. In 12th {USENIX} symposium on operating systems design
898
+ and implementation ({OSDI} 16), pages 265–283, 2016.
899
+ [30] Morgan Quigley, Ken Conley, Brian Gerkey, Josh Faust, Tully Foote,
900
+ Jeremy Leibs, Rob Wheeler, Andrew Y Ng, et al. Ros: an open-source
901
+ robot operating system. In ICRA workshop on open source software,
902
+ volume 3, page 5. Kobe, Japan, 2009.
903
+ [31] A. Hornung et al. Octomap: An efficient probabilistic 3d mapping
904
+ framework based on octrees.
905
+ Autonomous robots, 34(3):189–206,
906
+ 2013.
907
+
JNFIT4oBgHgl3EQfZCuh/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf,len=412
2
+ page_content='This paper has been accepted for publication at 2023 IEEE International Conference on Robotics and Automation (ICRA 2023) Light-Weight Pointcloud Representation with Sparse Gaussian Process Mahmoud Ali and Lantao Liu Abstract— This paper presents a framework to represent high-fidelity pointcloud sensor observations for efficient com- munication and storage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
3
+ page_content=' The proposed approach exploits Sparse Gaussian Process to encode pointcloud into a compact form.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
4
+ page_content=' Our approach represents both the free space and the occupied space using only one model (one 2D Sparse Gaussian Process) instead of the existing two-model framework (two 3D Gaussian Mixture Models).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
5
+ page_content=' We achieve this by proposing a variance- based sampling technique that effectively discriminates between the free and occupied space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
6
+ page_content=' The new representation requires less memory footprint and can be transmitted across limited- bandwidth communication channels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
7
+ page_content=' The framework is exten- sively evaluated in simulation and it is also demonstrated using a real mobile robot equipped with a 3D LiDAR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
8
+ page_content=' Our method results in a 70∼100 times reduction in the communication rate compared to sending the raw pointcloud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
9
+ page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
10
+ page_content=' INTRODUCTION With the rapid advancement of LiDAR technology, we now can build maps with remarkably high resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
11
+ page_content=' For example, each full scan of an only 16-channel 3D LiDAR can give us 57600 points in the pointcloud that represents the surrounding obstacles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
12
+ page_content=' However, a price for using the high resolution LiDAR is the computation, storage, and com- munication costs when mapping the environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
13
+ page_content=' While one might be able to upgrade the computation and storage by using a high performance computer system, the com- munication usually becomes a bottleneck due to the low communication bandwidth available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
14
+ page_content=' In practice, the low bandwidth communication is considered as a major challenge for many robotics applications such as occupancy mapping of underwater and subterranean environments (caves, tunnels, mines, etc), search-and-rescue missions in disaster scenarios with a degraded communication infrastructure, and planetary exploration missions [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
15
+ page_content=' The low bandwidth can prevent a robot from real-time sharing its sensor observations, and this can significantly degrade the system responsiveness if the robot needs to follow or interact with external control or supervision platforms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
16
+ page_content=' This work tackles the problem of sharing high-fidelity 3D pointcloud through a limited bandwidth communication channel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
17
+ page_content=' The system we consider consists of a robot (the scout) equipped with a LiDAR and a communication apparatus, and deployed in a low-bandwidth environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
18
+ page_content=' The scout sends 1Mahmoud Ali and Lantao Liu are with the Luddy School of Informatics, Computing, and Engineering, Indiana University, Bloomington, IN 47408 USA, {alimaa, lantao}@iu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
19
+ page_content='edu Occupancy Surface VSGP variance-based sampling SGP OctoMap wifi Scout Base Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
20
+ page_content=' 1: System Overview.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
21
+ page_content=' (a) (b) (c) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
22
+ page_content=' 2: (a) Gazebo simulated mine tunnel;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
23
+ page_content=' (b) Original pointcloud generated by a VLP16 LiDAR in red, and reconstructed pointcloud from the VSGP model in white;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
24
+ page_content=' (d) Occupancy Map generated by OctoMap from the reconstructed pointcloud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
25
+ page_content=' the observations that it acquires to a base for building the occupancy map of the environment, see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
26
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
27
+ page_content=' Our approach exploits the Variational Sparse Gaussian Process (VSGP) [2] as a generative model to represent the pointcloud in a compact form.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
28
+ page_content=' This lightweight representation is transmitted through low-bandwidth communication to the base where the original pointcloud is reconstructed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
29
+ page_content=' Extensive evaluations reveal that our approach results in a 70∼100 times reduction in the memory as well as the communication rate required to transmit pointcloud data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
30
+ page_content=' For example, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
31
+ page_content=' 2a shows a scene of a simulated mine tunnel, where its raw pointcloud (shown in red, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
32
+ page_content=' 2b) requires around 750 KB of memory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
33
+ page_content=' Our approach is able to represent the same observation using only 6 KB of memory and transmit through limited-bandwidth communication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
34
+ page_content=' On the receiver side of the communication channel, the compact representation is used to reconstruct the original pointcloud (reconstructed pointcloud shown in white, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
35
+ page_content=' 2b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
36
+ page_content=' An occupancy map of the scene can be built using the reconstructed pointcloud, see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
37
+ page_content=' 2c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
38
+ page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
39
+ page_content=' RELATED WORK Pointcloud compression algorithms have been investigated in recent years to cope with the demands to store and communicate the high-precision 3D points [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
40
+ page_content=' For example, the space partitioning trees approaches that exploit the 3D correlation between pointcloud points are widely used to arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
41
+ page_content='11251v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
42
+ page_content='RO] 26 Jan 2023 Velodynecompress the pointcloud data [4]–[9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
43
+ page_content=' Recently, deep learning based approaches were also proposed to leverage data and learn or encode the pointcloud compression [10]–[12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
44
+ page_content=' Dif- ferent from these frameworks, the probabilistic approaches exploit the compactness of the distributions to compress 3D sensor observation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
45
+ page_content=' For instance, Gaussian Mixture Models (GMM) [13]–[15] have been proposed as a generative model to encode 3D occupancy map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
46
+ page_content=' The GMM approach encodes the 3D data as a mixture of Gaussian densities to represent the occupied and free spaces around the robot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
47
+ page_content=' Gaussian Process (GP) has been proven to be an excellent framework to model spatial phenomena or features in a continuous domain [16]–[18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
48
+ page_content=' Unfortunately, the standard GP has a cubic time complexity and this results in very limited scalability to large datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
49
+ page_content=' Methods for reducing the computing burdens of GPs have been previously investigated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
50
+ page_content=' For example, GP regressions can be done in a real-time fashion where the problem can be estimated locally with local data [19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
51
+ page_content=' Sparse GPs (SGPs) [20]–[26] tackle the com- putational complexity of the normal GP through leveraging the Bayesian rule with a sequential construction of the most relevant subset of the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
52
+ page_content=' We propose a new probabilistic pointcloud compression approach which is based on the VSGP [2] and inspired by the GMM approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
53
+ page_content=' While the GMM shares the accumulated sensory information as a set of accumulated Gaussian den- sities which are sampled and used as an occupancy map of the environment, in contrast, the proposed approach relies on sharing of immediate sensor observation to be reconstructed on the other side of the communication channel for further processing based on the required task (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
54
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
55
+ page_content=' 3D mapping, object recognition, tracking, etc).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
56
+ page_content=' This proposed VSGP-based approach offers a few ad- vantages over the recent GMM approach: while the GMM approach uses two 3D GMMs to fit the occupied and free points [13]–[15], our approach uses only one 2D VSGP to fit all the occupancy surface, including both the occupied and free points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
57
+ page_content=' The primary reason that our approach uses one VSGP instead of two is that we are using the variance calculated by the VSGP at each sampled point during the reconstruction process to decide if it belongs to the occupied or the free space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
58
+ page_content=' Therefore, the proposed approach results in a more compact representation of the sensor observation, which requires less memory than the GMM approach and, as a consequence, leads to a lower communication rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
59
+ page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
60
+ page_content=' BACKGROUND GP is a non-parametric model described by a mean function m(x), and a co-variance function (kernel) k(x,x���), where x is the GP input [27]: f(x) ∼ GP � m(x),k � x,x′�� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
61
+ page_content=' (1) Considering a data set D = {(xi,yi)}N i=1 with N training inputs x and their corresponding scalar outputs (observations) y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
62
+ page_content=' After training the GP using the data set D, the output y∗ for any new query x∗ can be estimated using the GP prediction: p(y∗|y) = N(y∗|my(x∗),ky(x∗,x∗)+σ2), (2) where my(x) and ky(x,x′) are the posterior mean and co- variance functions [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
63
+ page_content=' The GP prediction equation depends on the values of the hyperparameters (Θ,σ2) where Θ is the kernel parameters and σ2 is the noise variance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
64
+ page_content=' The computation complexity of a full GP is O(N3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
65
+ page_content=' In order to reduce the computation complexity, different approximation methods were proposed in the literature by considering only M input points to represent the entire training data [27].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
66
+ page_content=' These input points are called the inducing points Xm and their corresponding values of the underlying function f(x) are called the inducing variables fm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
67
+ page_content=' Replacing the entire data set with only the M-inducing inputs leads to the SGP which has a computational complexity of O(NM2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
68
+ page_content=' Titsias [2] proposed a variational learning framework to jointly estimate the kernel hyperparameters and the inducing points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
69
+ page_content=' Titsias’ framework approximates the true exact poste- rior of a GP p( f|y,Θ) by a variational posterior distribution q( f, fm), q(f, fm) = p(f|fm)φ( fm), (3) where φ( fm) is the free variational Gaussian distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
70
+ page_content=' The Kullback-Leibler (KL) divergence is used to describe the dis- crepancy between the approximated and the true posteriors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
71
+ page_content=' Minimizing the KL divergence between the approximated and the true posteriors KL[q( f,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
72
+ page_content=' fm)||p( f|y,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
73
+ page_content='Θ)] is equivalent to maximizing the variational lower bound of the true log marginal likelihood: FV (Xm) = log � N � y | 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
74
+ page_content='σ2I +Qnn �� − 1 2σ2 Tr( �K),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
75
+ page_content=' Qnn = KnmK−1 mmKmn,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
76
+ page_content=' �K = Cov(f | fm) = Knn −KnmK−1 mmKmn,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
77
+ page_content=' (4) where FV (Xm) is the variational objective function,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
78
+ page_content=' Tr( �K) is a regularization trace term,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
79
+ page_content=' Knn is the original n×n co-variance matrix,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
80
+ page_content=' Kmm is m × m co-variance matrix on the inducing inputs,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
81
+ page_content=' Knm is n×m cross-covariance matrix between training and inducing points,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
82
+ page_content=' and Knm = KT mn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
83
+ page_content=' More details on VSGP can be found in Titsias’s work [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
84
+ page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
85
+ page_content=' METHODOLOGY The proposed approach exploits the VSGP as a generative model to encode 3D pointcloud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
86
+ page_content=' The VSGP is selected among different approximation approaches of GP due to the following reasons: i) The variational approximation dis- tinguishes between the inducing points M (as a variational parameter) and the kernel hyperparameters (Θ,σ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
87
+ page_content=' ii) The regularization term Tr( �K) in the variational objective func- tion (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
88
+ page_content=' (4)) regularizes the hyperparameters to avoid over- fitting of the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
89
+ page_content=' iii) The variational approximation offers a discrete optimization scheme for selecting the inducing inputs Xm from the original data1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
90
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
91
+ page_content=' VSGP as a generative model for the occupancy surface Inspired by [13], we project the occupied points ob- served by a ranging sensor, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
92
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
93
+ page_content=', LiDAR, onto a circular surface around the sensor origin with a predefined radius 1For more information about the inducing point selection, check [2] roc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
94
+ page_content=' This surface is called occupancy surface, see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
95
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
96
+ page_content=' In our approach, the sensor observation is defined in the spherical coordinate system, where any observed point xi is described by the tuple (θi,αi,ri) which represents the azimuth, elevation, and radius values, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
97
+ page_content=' Also, any pointcloud data can be converted from the cartesian coordinates (xi,yi,zi) to the spherical coordinates (θi,αi,ri) using the following equations: ri = � x2 i +y2 i +z2 i , θi = tan−1(yi,xi), αi = cos−1(zi/ri).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
98
+ page_content=' (5) All observed points that lie outside the circular occupancy surface (with a radius ri > roc) or on the surface (with a radius ri = roc) are neglected and considered as free space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
99
+ page_content=' The rest of the points that are inside the circular surface (with a radius ri < roc) are projected on the occupancy surface and called the occupied points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
100
+ page_content=' Therefore, the occupancy surface radius roc acts as the maximum range of the sensor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
101
+ page_content=' Each occupied point xi on the surface is defined by two attributes: the azimuth and elevation angles xi = (θi,αi), and assigned an occupancy value f(xi) that is a function of the point radius ri.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
102
+ page_content=' The probability of occupancy f(xi) at each point on the occupancy surface is modeled by a VSGP: f(x) ∼ VSGP � m(x),k � x,x′�� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
103
+ page_content=' (6) Considering noisy measurements, we add a white noise ε to the occupancy function f(x), so the observed occupancy is described as yi = f(xi)+ε where ε follows a Gaussian dis- tribution N � 0,σ2 n � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
104
+ page_content=' The final model of the occupancy surface is a 2D VSGP where the input is the azimuth and elevation angles, x ∈ {(θ,α)}n i=1, and the corresponding output is the expected occupancy yi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
105
+ page_content=' The three main components of the final VSGP are: 1) Zero-Mean Function m(x): There are different for- mulas to describe the relationship between the occupancy of a point f(xi) on the occupancy surface and its radius ri [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
106
+ page_content=' For example, one candidate is f(xi) = 1/ri where ri is bounded by the minimum and the maximum range of the sensor rmin < ri < rmax = roc, where rmin > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
107
+ page_content=' Our approach relates the occupancy of a point f(xi) to its radius ri by the following equation f(xi) = roc − ri.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
108
+ page_content=' This mapping between the occupancy and the radius of a point is compatible with the previous assumption that the occupancy surface radius roc represents the maximum range of the sensor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
109
+ page_content=' Moreover, this mapping is encoded in our VSGP model as a zero-mean function m(x) = 0 that sets the occupancy value of the non- observed points to zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
110
+ page_content=' This mapping behavior mimics the mechanism of the LiDAR itself.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
111
+ page_content=' 2) Rational Quadratic (RQ) Kernel: The RQ kernel is selected because a GP prior with an RQ kernel is expected to have functions that vary across different length scales.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
112
+ page_content=' This quality of the RQ kernel copes with the nature of the occupancy surface, specifically in unstructured environments where a range of diverse length scales is required, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
113
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
114
+ page_content=', kRQ � x,x′� = σ2 � 1+ (x−x′)2 2αℓ2 �−α , (7) (a) (b) (c) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
115
+ page_content=' 3: (a) Gazebo scene of a robot in a tunnel (black);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
116
+ page_content=' (b) The occupancy surface generated from the original pointcloud, where warmer colors reflect smaller f(xi) values (less occupancy);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
117
+ page_content=' (c) The inner surface represents the original occupancy surface (same as in b), and the middle surface represents the reconstructed occupancy surface using the VSGP model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
118
+ page_content=' The outer grey-coded surface represents the variance associated with each point on the reconstructed occupancy surface where brighter colors reflect high uncertainty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
119
+ page_content=' Raw pointcloud is shown in red in (b) and (c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
120
+ page_content=' where σ2 f is the signal variance, l is the length-scale, and α sets the relative weighting of large and small scale variations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
121
+ page_content=' The RQ co-variance function is more expressive in terms of modeling the occupancy surface than the most commonly used Squared Exponential (SE) co-variance function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
122
+ page_content=' This can be reasoned by the fact that the RQ kernel (when α and l > 0) is equivalent to a scale mixture of SE kernels with mixed characteristic length-scales [27].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
123
+ page_content=' In practice, we take into account the resolution of LiDAR along both the azimuth and elevation axes to initiate different length-scales along each axis to reflect the LiDAR resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
124
+ page_content=' 3) Inducing Points Selection: The variational learning framework proposed in [2] jointly optimizes the variational parameters (inducing points) and the hyperparameters (Θ,σ) through a variational Expectation-Maximization (EM) algo- rithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
125
+ page_content=' In general, the original discrete optimization frame- work [2] suggests having an incremental set of the inducing points, so that during the Expectation step (E-step) a point from the input data is added to the inducing points set to maximize the variational objective function FV and minimize the KL divergence between the true and approximated pos- teriors KL[q( f)||p( f|y,Θ)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
126
+ page_content=' Then the hyperparameters are updated during the Maximization step (M-step).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
127
+ page_content=' Since LiDAR’s field of view is limited within a certain range, the projection of the observed points on the circular surface leads to a limited input domain for the VSGP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
128
+ page_content=' In our case, the azimuth and the elevation axes are limited to (−π to π) and (−15◦ to 15◦), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
129
+ page_content=' The limited input domain is used to initiate a fixed number of inducing points at evenly distributed locations on the occupied part of the occupancy surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
130
+ page_content=' In this way, a different combination of the points is selected at each E-step to maximize the variational objective function FV and minimize the KL divergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
131
+ page_content=' Then the hyperparameters are updated during the M-step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
132
+ page_content=' The number of the inducing points M is chosen to compromise the computational and memory complexity on one side and the accuracy of the reconstructed pointcloud on the other side.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
133
+ page_content=' More inducing points result in higher computations complexity O(NM2), larger memory to store the encoded observation, and higher bandwidth to transfer it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
134
+ page_content=' However, more inducing points increase the accuracy of the reconstructed pointcloud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
135
+ page_content=' We chose M=500 inducing points to keep the average deviation between the reconstructed pointcloud and the original pointcloud under 15 cm, see Section V-A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
136
+ page_content='2 and Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
137
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
138
+ page_content=' After the training phase on the scout side is completed, the selected inducing points are combined together with the hyperparameters values of the VSGP and are transmitted from the scout to the base.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
139
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
140
+ page_content=' Variance-based sampling On the base side, the inducing points and the values of the hyperparameters, which are received from the scout, are used to reconstruct the original occupancy surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
141
+ page_content=' The reconstruction is done through a GP configured with the same kernel (RQ) and likelihood (Gaussian) as the VSGP on the scout side.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
142
+ page_content=' The base GP is trained on the inducing points and has a computation complexity of O(M3) where M is the number of the inducing points, so we refer it as a sparse GP (SGP) and refer the reconstructed occupancy surface as the SGP occupancy surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
143
+ page_content=' A grid of query points x∗ = {(θ,α)}K i=1 with the same resolution of the LiDAR along the azimuth and the elevation axes is generated to reconstruct the original pointcloud from the SGP occupancy surface – we refer the reconstructed pointcloud as the SGP pointcloud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
144
+ page_content=' If up-sampling of the pointcloud is required for any reason, a query grid with higher resolution can be used for the reconstruction process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
145
+ page_content=' The SGP occupancy surface is used to predict the occupancy f(xi) of each point xi of the query grid x∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
146
+ page_content=' The occupancy is converted back to the spherical radius ri = roc − f(xi) to restore the 3D spherical coordinates of each point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
147
+ page_content=' One advantage of the GP and its variants over other modeling techniques is the uncertainty (variance) associated with the predicted value at any query point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
148
+ page_content=' Considering the VSGP model of the occupancy surface on the scout side, the variance associated with the occupied points is low compared to the variance related to the free points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
149
+ page_content=' Selecting the inducing points as a set from the original occupied points maintains low-variance values for the occupied part of the reconstructed SGP occupancy surface on the base side.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
150
+ page_content=' Therefore, the variance value associated with any point on the reconstructed SGP occupancy surface is used to predict if that point belongs to the occupied or the free part of the occupancy surface, see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
151
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
152
+ page_content=' We use a variance threshold Vth as a judging criterion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
153
+ page_content=' In fact, the variance related to the occupancy surface is different from one observation to another, and it is affected by both the number of observed (occupied) points and their distribution over the occupancy surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
154
+ page_content=' Therefore, we chose the variance threshold Vth as a variable that changes with the distribution of the variance over the occupied and free parts of the occupancy surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
155
+ page_content=' Vth is defined as a linear combination of the variance mean vm and standard deviation vstd over the surface, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
156
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
157
+ page_content=', Vth = Km ∗vm +Kstd ∗vstd where Km and Kstd are constants.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
158
+ page_content=' These two constants are tuned by first setting Vth = vm (Km = 1 , Kstd = 0), then we increase Kstd and decrease Km gradually till we get the values that give the highest accuracy for the reconstructed SGP pointcloud (considering a fixed number of (a) (b) (c) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
159
+ page_content=' 4: Variance-based sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
160
+ page_content=' (a) Gazebo scene shows the entrance of the tunnel;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
161
+ page_content=' (b) shows the original (inner), reconstructed (middle), and variance (outer) surfaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
162
+ page_content=' It also shows the re- constructed pointcloud (in white) through reconstructing from all points (free and occupied) of the occupancy surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
163
+ page_content=' (c) shows reconstructed SGP pointcloud after removing all points that most likely belong to the free part of the occupancy surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
164
+ page_content=' Raw pointcloud is shown in red in (b) and (c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
165
+ page_content=' inducing points).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
166
+ page_content=' Our sampling-based approach is capable of discriminating between the free points that most likely belong to the free part of the SGP occupancy surface and the occupied points that belong to the the occupied part of the SGP occupancy surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
167
+ page_content=' After removing the free part of the SGP occupancy surface, the Cartesian coordinates of the occupied points are calculated using the inverse form of Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
168
+ page_content=' (5) to restore the original point cloud, see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
169
+ page_content=' 4c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
170
+ page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
171
+ page_content=' EXPERIMENTAL DESIGN AND RESULTS The proposed approach is implemented in Python3 on top of GPflow-v2 [28] and TensorFlow-v2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
172
+ page_content='4 [29] under ROS framework [30].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
173
+ page_content=' Both real-time simulation and real- time demonstration were considered to evaluate the proposed approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
174
+ page_content=' In both the simulation and the hardware experi- ments, a VLP-16 LiDAR was used with a maximum range of 10m, a frequency of 4Hz, and a resolution of (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
175
+ page_content='1◦,2◦) along the azimuth and the elevation axis, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
176
+ page_content=' This configuration results in a maximum pointcloud size of 57600 points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
177
+ page_content=' The query grid, which is used to sample the SGP occupancy surface on the base side, has the same resolution as the VLP-16 LiDAR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
178
+ page_content=' A 3D occupancy grid map with a resolution of 5cm is generated from the reconstructed SGP pointcloud through Octomap [31].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
179
+ page_content=' We investigate the performance of our framework and compare it with the GMM approach [13]–[15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
180
+ page_content=' While the GMM approach tackles the occupancy mapping problem as a whole, our approach focuses on compressing sensor obser- vations through limited-bandwidth communication channels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
181
+ page_content=' To be able to compare the two approaches, we implemented the GMM approach in such a way that it is used to encode one sensor observation at a time instead of generating an entire occupancy map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
182
+ page_content=' We compared our approach with two versions of the GMM approach: i) A CPU-based implemen- tation of GMM that follows the same guidelines of [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
183
+ page_content=' ii) An upgraded GPU-based implementation of GMM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
184
+ page_content=' We implemented the GPU-GMM to have a fair computation comparison with our VSGP approach which runs on GPU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
185
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
186
+ page_content=' Simulation Experiments 1) Simulation Setup:: The simulation setup consists of two machines that communicate to each other over WiFi: The first machine, where the scout and the environment are simulated, is an Intel® Core™ i7 NUC11 PC equipped with 64 GB RAM and 6 GB Geforce RTX2060 GPU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
187
+ page_content=' The second machine, which acts as the base, is an Intel® Core™ i7 Alienware Laptop equipped with 32 GB RAM and 8 GB Geforce RTX2080 GPU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
188
+ page_content=' Both are connected using a 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
189
+ page_content='4 GHz WiFi router.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
190
+ page_content=' The network flow is monitored using the ifstat tool to evaluate the communication performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
191
+ page_content=' The mine tunnel of the cpr inspection world, which is developed by ClearPath robotics, is used as our simulation environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
192
+ page_content=' This environment is selected because it represents one of the targeted low-bandwidth subterranean environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
193
+ page_content=' The mine tunnel part of the cpr inspection world fits in a rectan- gular area with an approximated area of 30×65m2, the tunnel length is around 135m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
194
+ page_content=' The ground elevation and the height of the tunnel are different from one place to another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
195
+ page_content=' The ClearPath Jackal robot is used as the scout.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
196
+ page_content=' The proposed approach was evaluated through 20 real-time simulation trials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
197
+ page_content=' In each trial, the robot starts at the beginning of the cave and follows a predefined path along the mine using way-point based navigation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
198
+ page_content=' 2) Simulation Results: We evaluate the performance of our approach based on the reduction in the memory and the communication rate required to transmit the sensor observa- tions between the scout and the base.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
199
+ page_content=' The VSGP representa- tion requires only 1514 floating points (FP) to represent the entire pointcloud (3 FP for each inducing point (3x500) + 6 FP for robot pose + 6 FP for the hyperparameters).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
200
+ page_content=' This value is less than the memory needed by the GMM approach which requires ∼ 2000 FP (10 FP for each component (10x200) distributed as 6 FP for covariance + 3 FP for mean + 1 FP for weight) [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
201
+ page_content=' We send the robot pose to the base because our approach encodes the observation relative to the robot body frame, while the GMM approach first transforms the observation from the robot body frame to a global frame using the robot current pose and then sends the encoded Gaussians densties with respect to the global frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
202
+ page_content=' To quantify the accuracy of the reconstructed SGP point- cloud, we use the Root Mean Square Deviation (RMSD) between the radius predicted by our approach and the actual radius of each point on the occupancy surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
203
+ page_content=' RMSD = � ∑N i=1 (ri − ˆri)2 N , (8) where N is the size of the pointcloud, ri is the actual radius at (θi,αi), and ˆri is the estimated radius value at the same point (θi,αi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
204
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
205
+ page_content=' 5a shows the mean and the standard deviation of the RMSD for each predicted point over 110 observations (each observation has around 10K to 50K points).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
206
+ page_content=' Also, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
207
+ page_content=' 5a implicitly reflects the memory required by VSGP and GMM to store one observation, as described before that the memory required to store one observation can be calculated by multiplying the number of inducing points (bottom x-axis) by 3 and multiplying the number of components (top x-axis) by 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
208
+ page_content=' We match pairs of the VSGP and GMM models (in terms of the number of inducing points and components) based on the memory requirement and the accuracy of the (a) (b) (c) (d) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
209
+ page_content=' 5: Performance comparisons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
210
+ page_content=' (a) shows the RMSD between the reconstructed and the original pointcloud for VSGP(vs #induc- ing points) and GMM(vs #components);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
211
+ page_content=' (b) illustrates the training time against the pointcloud size (considering 500-inducing points VSGP, and equivalently, 200-components GMM);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
212
+ page_content=' (c) represents the training time versus the #VSGP-inducing points and #GMM- components;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
213
+ page_content=' (d) shows the prediction time versus the #VSGP- inducing points and #GMM-components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
214
+ page_content=' reconstructed pointcloud (reflected by the RMSD) for each pair, see table I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
215
+ page_content=' For example, 500-inducing points VSGP results in an average RMSD value for each point of 9 cm with a standard deviation of 10 cm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
216
+ page_content=' This corresponds to an average RMSD of 11 cm with a standard deviation of 25 cm for a 200-components GMM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
217
+ page_content=' TABLE I: VSGP vs GMM(ind: inducing, cps: components) VSGP GMM # Memory RMSD # Memory RMSD ind ∼FPs ∼cm cps ∼FPs ∼cm 200 600 20±22 50 500 27±50 300 900 14±15 100 1000 16±35 400 1200 12±14 150 1500 13±31 500 1500 9±10 200 2000 11±29 600 1800 9±10 250 2500 11±30 Now we analyze the results in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
218
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
219
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
220
+ page_content=' 5a shows the RMSD values associated with VSGP have a smaller standard deviation than the GMM’s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
221
+ page_content=' It also shows that increasing the number of the VSGP-inducing points (bottom x-axis) or the number of the GMM-components (top x-axis) will result in smaller RMSD (higher accuracy).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
222
+ page_content=' An intensive evaluation of the training and the prediction phases is presented in Figs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
223
+ page_content=' 5b-5d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
224
+ page_content=' The reduction in the training time versus the reduction in the size of the raw pointcloud is presented in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
225
+ page_content=' 5b, where 0% removal percent means a pointcloud size of 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
226
+ page_content='6K points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
227
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
228
+ page_content=' 5c shows the increase in training time versus the number of inducing points and the number of components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
229
+ page_content=' We compare the training time of the VSGP, the GMM-CPU (considering the #Components 50 100 150 200 250 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
230
+ page_content='75 - GMM 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
231
+ page_content='50 VSGP RMSI 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
232
+ page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
233
+ page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
234
+ page_content='25 200 300 00f 500 600 #Inducing Points103 Su VSGP GNIM GNIM-GPU 102 3350606775 Removal Components 50 100 150 200 250 103 GMM GMM-GPU VSGP 102 200 400 600 Inducing PointsComponents 50 100 150 200 250 VSGP [stu] GMM 40 Prediction 20 200 400 600 Inducing Points(a) (b) (c) (d) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
235
+ page_content=' 6: (a) shows the simulated mine environment in Gazebo;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
236
+ page_content=' (b) shows the Octomap of the mine generated from the original pointcloud;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
237
+ page_content=' (c) shows the Octomap generated from the recon- structed SGP pointcloud;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
238
+ page_content=' (d) shows the communication rate and the accumulated data sent from the scout to the base in case of sending raw pointcloud PCL(1750KB/S, 840MB), GMM data(25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
239
+ page_content='8KB/S, 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
240
+ page_content='4MB), and VSGP data(18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
241
+ page_content='2KB/S, 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
242
+ page_content='7MB).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
243
+ page_content=' The y-axis is plotted in log-scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
244
+ page_content=' default configuration of the GMM approach used in [13]), and the GMM-GPU implementation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
245
+ page_content=' The results show that our approach outperforms both the CPU and GPU imple- mentation of the GMM approach in terms of training time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
246
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
247
+ page_content=' 5d presents the variation of the prediction time of the VSGP versus the number of the inducing points, where the values shown in the figure represent the time required to predict the occupancy value associated with all the points of the grid query x (57600 points).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
248
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
249
+ page_content=' 5d indicates that for a matching pair of GMM and VSGP (Table I), GMM has a less sampling time than the paired VSGP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
250
+ page_content=' However, the pointcloud reconstruction process of the VSGP is more convenient than the GMM approach because a fundamental difference between sam- pling the VSGP and the GMM is that: when we sample from a GMM, we get a sample (from a distribution) with random values (θs,αs,rs), so we do not have control over the location of the sample on the occupancy surface (θs,αs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
251
+ page_content=' In contrast, for the VSGP approach, we predict the radius value rs for a certain point on the occupancy surface defined by (θs,αs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
252
+ page_content=' So, we have control over the point location on the occupancy surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
253
+ page_content=' While constructing the 3D octomap of the tunnel environment using the scout-base scheme, the average communication rate was 1750 KB/S, 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
254
+ page_content='8 KB/S, and 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
255
+ page_content='2 KB/S for sending raw point clouds, GMM encoded data, and VSGP encoded data respectively, see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
256
+ page_content=' 6d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
257
+ page_content=' The accumulated data sent through the network is reduced from 840 MB for sending raw pointcloud to 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
258
+ page_content='4 MB in case of GMM and 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
259
+ page_content='7 MB in case of VSGP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
260
+ page_content=' This indicates a (a) (b) (c) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
261
+ page_content=' 7: Indoor demonstration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
262
+ page_content=' (a) shows octomap of the laboratory building generated from the original pointcloud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
263
+ page_content=' (b) shows octomap generated from the reconstructed SGP pointcloud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
264
+ page_content=' (c) shows the reduction in the communication rate and the accumulated data sent from the scout to the base, where log-scale is used for y-axis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
265
+ page_content=' PCL represents the raw pointcloud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
266
+ page_content=' compression ratio of ∼ 96 (840/8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
267
+ page_content='7 ∼ 1750/18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
268
+ page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
269
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
270
+ page_content=' Hardware Experiment A Jackal mobile robot, equipped with a VLP-16 LiDAR and NUC11 PC, was used as the scout, while the Alien- ware laptop was used as the base.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
271
+ page_content=' The demonstration was conducted in an indoor environment, where the VSGP- encoded pointcloud data was sent from the scout to the base to generate a 3D Octomap [31] of the building from the SGP reconstructed pointcloud in real-time, see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
272
+ page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
273
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
274
+ page_content=' 7c shows the reduction in the communication rate for the hardware experiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
275
+ page_content=' The communication rate dropped from around 560 KB for transmitting raw pointcloud to around 8 KB for transmitting the encoded VSGP (this ratio is equivalent to 70 times smaller rate).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
276
+ page_content=' The communication rate of the hardware experiment is low compared to the simulation experiment because the LiDAR resolution was halved during the hardware experiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
277
+ page_content=' The total amount of data transmitted at the end of each trial was around 100 MB for sending raw pointcloud and only around 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
278
+ page_content='4 MB for sending the VSGP encoded observation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
279
+ page_content=' VI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
280
+ page_content=' CONCLUSION In this paper, we introduce a lightweight representation for the 3D pointcloud using the VSGP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
281
+ page_content=' This representation allows high-fidelity observations to be efficiently stored and transmitted through limited-bandwidth communication channels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
282
+ page_content=' Based on the results of the simulation and hardware experiments, our approach results in around 70-100 times smaller size representation of the sensor observation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
283
+ page_content=' This compact representation can facilitate many of the robotics KB PCI GMM Rate 102 VSGP MB PCL 101 Data GMM VSGP 0 O0T 200 300 400 500 Time [S]S/ POL 101 VSGP Rate KB PCL Data 102 VSGP 0 25 50 75 100 125 150 175 Time [S]applications which are limited by the communication band- width such as subterranean and underwater exploration, search and rescue missions, and planetary exploration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
284
+ page_content=' In addition, our approach can also be beneficial in the context of multi-robot collaboration where a number of robots are required to share high-volume information (3D pointcloud) through low-bandwidth channels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
285
+ page_content=' ACKNOWLEDGEMENT This work was supported by National Science Foundation with grant numbers 2006886 and 2047169.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
286
+ page_content=' REFERENCES [1] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
287
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
288
+ page_content=' Cid et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
289
+ page_content=' Keeping communications flowing during large-scale disasters: leveraging amateur radio innovations for disaster medicine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
290
+ page_content=' Disaster medicine and public health preparedness, 12(2):257–264, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
291
+ page_content=' [2] Michalis Titsias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
292
+ page_content=' Variational learning of inducing variables in sparse gaussian processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
293
+ page_content=' In Artificial intelligence and statistics, pages 567– 574.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
294
+ page_content=' PMLR, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
295
+ page_content=' [3] Chao Cao, Marius Preda, and Titus Zaharia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
296
+ page_content=' 3d point cloud com- pression: A survey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
297
+ page_content=' In The 24th International Conference on 3D Web Technology, pages 1–9, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
298
+ page_content=' [4] Yu Feng, Shaoshan Liu, and Yuhao Zhu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
299
+ page_content=' Real-time spatio-temporal lidar point cloud compression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
300
+ page_content=' In 2020 IEEE/RSJ international conference on intelligent robots and systems (IROS), pages 10766– 10773.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
301
+ page_content=' IEEE, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
302
+ page_content=' [5] Tim Golla and Reinhard Klein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
303
+ page_content=' Real-time point cloud compression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
304
+ page_content=' In 2015 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 5087–5092.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
305
+ page_content=' IEEE, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
306
+ page_content=' [6] S´ebastien Lasserre, David Flynn, and Shouxing Qu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
307
+ page_content=' Using neighbour- ing nodes for the compression of octrees representing the geometry of point clouds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
308
+ page_content=' In Proceedings of the 10th ACM Multimedia Systems Conference, pages 145–153, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
309
+ page_content=' [7] Dorina Thanou, Philip A Chou, and Pascal Frossard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
310
+ page_content=' Graph-based compression of dynamic 3d point cloud sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
311
+ page_content=' IEEE Transactions on Image Processing, 25(4):1765–1778, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
312
+ page_content=' [8] Yan Huang, Jingliang Peng, C-C Jay Kuo, and M Gopi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
313
+ page_content=' Octree-based progressive geometry coding of point clouds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
314
+ page_content=' In PBG@ SIGGRAPH, pages 103–110, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
315
+ page_content=' [9] Lila Huang, Shenlong Wang, Kelvin Wong, Jerry Liu, and Raquel Urtasun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
316
+ page_content=' Octsqueeze: Octree-structured entropy model for lidar com- pression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
317
+ page_content=' In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1313–1323, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
318
+ page_content=' [10] Maurice Quach, Jiahao Pang, Dong Tian, Giuseppe Valenzise, and Fr´ed´eric Dufaux.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
319
+ page_content=' Survey on deep learning-based point cloud com- pression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
320
+ page_content=' Frontiers in Signal Processing, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
321
+ page_content=' [11] Louis Wiesmann, Andres Milioto, Xieyuanli Chen, Cyrill Stachniss, and Jens Behley.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
322
+ page_content=' Deep compression for dense point cloud maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
323
+ page_content=' IEEE Robotics and Automation Letters, 6(2):2060–2067, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
324
+ page_content=' [12] Wei Yan, Shan Liu, Thomas H Li, Zhu Li, Ge Li, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
325
+ page_content=' Deep autoencoder-based lossy geometry compression for point clouds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
326
+ page_content=' arXiv preprint arXiv:1905.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
327
+ page_content='03691, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
328
+ page_content=' [13] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
329
+ page_content=' Tabib et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
330
+ page_content=' Real-time information-theoretic exploration with gaussian mixture model maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
331
+ page_content=' In Robotics: Science and Systems, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
332
+ page_content=' [14] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
333
+ page_content=' O’Meadhra et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
334
+ page_content=' Variable resolution occupancy mapping using gaussian mixture models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
335
+ page_content=' IEEE Robotics and Automation Letters, 4(2):2015–2022, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
336
+ page_content=' [15] Task-Specific Manipulator Design.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
337
+ page_content=' Communication-efficient planning and mapping for multi-robot exploration in large environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
338
+ page_content=' Jour- nal Article, 15(2):e1971, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
339
+ page_content=' [16] Carl Edward Rasmussen and Christopher K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
340
+ page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
341
+ page_content=' Williams.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
342
+ page_content=' Gaussian Processes for Machine Learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
343
+ page_content=' The MIT Press, 2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
344
+ page_content=' [17] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
345
+ page_content=' Singh, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
346
+ page_content=' Krause, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
347
+ page_content=' Guestrin, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
348
+ page_content=' Kaiser, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
349
+ page_content=' Batalin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
350
+ page_content=' Efficient planning of informative paths for multiple robots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
351
+ page_content=' In the 20th International Joint Conference on Artifical Intelligence, IJCAI’07, pages 2204–2211, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
352
+ page_content=' [18] Ruofei Ouyang, Kian Hsiang Low, Jie Chen, and Patrick Jaillet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
353
+ page_content=' Multi-robot active sensing of non-stationary gaussian process-based environmental phenomena.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
354
+ page_content=' In Proceedings of the 2014 International Conference on Autonomous Agents and Multi-agent Systems, pages 573–580, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
355
+ page_content=' [19] Duy Nguyen-tuong and Jan Peters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
356
+ page_content=' Local gaussian process regression for real time online model learning and control.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
357
+ page_content=' In In Advances in Neural Information Processing Systems 22 (NIPS, 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
358
+ page_content=' [20] Lehel Csat´o and Manfred Opper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
359
+ page_content=' Sparse on-line gaussian processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
360
+ page_content=' Neural computation, 14(3):641–668, 2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
361
+ page_content=' [21] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
362
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
363
+ page_content=' Smola and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
364
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
365
+ page_content=' Bartlett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
366
+ page_content=' Sparse greedy gaussian process regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
367
+ page_content=' In Advances in neural information processing systems, pages 619–625, 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
368
+ page_content=' [22] Ch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
369
+ page_content=' Williams and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
370
+ page_content=' Seeger.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
371
+ page_content=' Using the nystr¨om method to speed up kernel machines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
372
+ page_content=' In Proceedings of the 14th annual conference on neural information processing systems, number CONF, pages 682– 688, 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
373
+ page_content=' [23] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
374
+ page_content=' Lawrence, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
375
+ page_content=' Seeger, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
376
+ page_content=' Herbrich.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
377
+ page_content=' Fast sparse gaussian process methods: The informative vector machine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
378
+ page_content=' In 16th annual conference on neural information processing systems, number CONF, pages 609– 616, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
379
+ page_content=' [24] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
380
+ page_content=' Snelson and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
381
+ page_content=' Ghahramani.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
382
+ page_content=' Sparse gaussian processes using pseudo-inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
383
+ page_content=' Advances in neural information processing systems, 18:1257, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
384
+ page_content=' [25] Matthias Seeger.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
385
+ page_content=' Bayesian gaussian process models: Pac-bayesian generalisation error bounds and sparse approximations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
386
+ page_content=' Technical report, University of Edinburgh, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
387
+ page_content=' [26] Rishit Sheth, Yuyang Wang, and Roni Khardon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
388
+ page_content=' Sparse variational inference for generalized gp models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
389
+ page_content=' In International Conference on Machine Learning, pages 1302–1311.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
390
+ page_content=' PMLR, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
391
+ page_content=' [27] Christopher K Williams and Carl Edward Rasmussen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
392
+ page_content=' Gaussian processes for machine learning, volume 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
393
+ page_content=' MIT press Cambridge, MA, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
394
+ page_content=' [28] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
395
+ page_content=' Matthews et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
396
+ page_content=' Gpflow: A gaussian process library using tensor- flow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
397
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
398
+ page_content=' Mach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
399
+ page_content=' Learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
400
+ page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
401
+ page_content=', 18(40):1–6, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
402
+ page_content=' [29] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
403
+ page_content=' Abadi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
404
+ page_content=' Tensorflow: A system for large-scale machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
405
+ page_content=' In 12th {USENIX} symposium on operating systems design and implementation ({OSDI} 16), pages 265–283, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
406
+ page_content=' [30] Morgan Quigley, Ken Conley, Brian Gerkey, Josh Faust, Tully Foote, Jeremy Leibs, Rob Wheeler, Andrew Y Ng, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
407
+ page_content=' Ros: an open-source robot operating system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
408
+ page_content=' In ICRA workshop on open source software, volume 3, page 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
409
+ page_content=' Kobe, Japan, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
410
+ page_content=' [31] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
411
+ page_content=' Hornung et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
412
+ page_content=' Octomap: An efficient probabilistic 3d mapping framework based on octrees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
413
+ page_content=' Autonomous robots, 34(3):189–206, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JNFIT4oBgHgl3EQfZCuh/content/2301.11251v1.pdf'}
JdE2T4oBgHgl3EQfUgew/content/tmp_files/2301.03814v1.pdf.txt ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ WKB Across Caustics: The Screened-WKB Method
2
+ Oscar P. Bruno∗
3
+ Martin D. Maas∗
4
+ Abstract
5
+ We present a new methodology, based on the WKB approximation and Fast Fourier Trans-
6
+ forms, for the evaluation of wave propagation through inhomogeneous media. This method can
7
+ accurately resolve fields containing caustics, while still enjoying the computational advantages
8
+ of the WKB approximation, namely, the ability to resolve arbitrarily high-frequency problems in
9
+ computing times which are orders-of-magnitude shorter than those required by other algorithms
10
+ presently available. For example, the proposed approach can simulate with high accuracy (with
11
+ errors such as e.g. 0.1%–0.001%) the propagation of 5 cm radar signals across two-dimensional
12
+ configurations resembling atmospheric ducting conditions, spanning hundreds of kilometers and
13
+ millions of wavelengths in electrical size, in computing times of a few minutes in a single CPU
14
+ core. [Preliminary version]
15
+ 1
16
+ Introduction
17
+ Computations of high-frequency wave propagation through inhomogeneous media play a pivotal
18
+ roles in diverse fields such as telecommunications, remote sensing, seismics, quantum mechanics,
19
+ and optics. A wide range of methodologies have been developed over the last century for the treat-
20
+ ment of high-frequency volumetric-propagation problems. Given that direct numerical simulation
21
+ of the configurations of interest, which comprise thousands to millions of wavelengths in acousti-
22
+ cal/electrical size, is unfeasible in 2D and even more in 3D, the proposed approaches usually contain
23
+ a combination of analytic and numerical approximations.
24
+ The celebrated WKB approximation, also known as the Wentzel-Kramers-Brillouin approxima-
25
+ tion [2,8], was the first method to obtain accurate solutions to problems involving propagation over
26
+ large distances, and is based on the introduction of a system of ray-coordinates, over which the
27
+ amplitude and phase of the solution exhibit slow variations. However, the WKB approximation
28
+ can break down in certain situations, particularly when the ray mapping becomes singular (i.e.
29
+ at caustics). Many approaches have been proposed over time to overcome this limitation, most
30
+ notably the KMAH-index theory, according to which a correction can be incorporated after the
31
+ caustic, of the form (−i)m, where the constant m depends on the number and type of caustics that
32
+ the ray has traversed. This formulation still breaks down at caustics, is inaccurate near caustics,
33
+ and, given its complexity, it is seldom used in practice.
34
+ Another notable approach to solve these type of problems is provided by the parabolic approxi-
35
+ mation introduced in [9], together with its many subsequent versions and improvements, including
36
+ the wide-angle approximation [5]. The method of phase-screens [14], in turn, which assumes a con-
37
+ stant refractivity profile along each vertical volumetric screen, is applicable for certain restricted
38
+ sets of configurations. While, unlike the classical WKB approach, these methods are valid at and
39
+ around caustics, their limitations arise from its computational cost. For example, mesh-sizes of
40
+ ∗Computing and Mathematical Sciences, Caltech, Pasadena, CA 91125, USA
41
+ 1
42
+ arXiv:2301.03814v1 [physics.comp-ph] 10 Jan 2023
43
+
44
+ the order of ∆z ≈ λ
45
+ 4 and ∆x ≈ 12.5λ are reported for propagation distances in the order of a
46
+ few hundreds of wavelengths (see [7] and references therein). (Here z and x denote the vertical
47
+ and range variables, respectively.) The parabolic equation methods are most often based on use
48
+ of finite-difference approximations, which gives rise to associated dispersion errors, while Fourier
49
+ expansions in the vertical axis are only applicable in the lowest-order parabolic approximations.
50
+ The combined effect of large propagation distances, and the presence of dispersion error, and the
51
+ requirement of fine spatial discretizations, can lead to extremely large computational cost, under
52
+ reasonable error tolerances, for challenging configurations commonly arising in applications.
53
+ Other notable approaches include the Gaussian beams formulation, with contributions spanning
54
+ from the 60’s, including [1,3,6,13] among many others. This formulation is based on an additional
55
+ approximation to WKB, which seeks to obtain the phase in the form of a quadratic polynomial,
56
+ whose Hessian matrix is evolved along the ray. This approach eliminates ray-bunching at caustics,
57
+ and produces fields which remain bounded.
58
+ However, theoretical convergence as k → ∞ has
59
+ not been established and is believed to be slow. Moreover, the initial beam representation is a
60
+ challenging optimization problem, which leads to errors of a few percent even for propagations
61
+ distances of the order of a small number of wavelengths [13].
62
+ An additional approach, known as Dynamic Surface Extension (DSE, see [11,12]), can success-
63
+ fully propagate wavefronts in a Cartesian discretization. However, the amplitude computations
64
+ present the same limitations as the classical WKB approximation. Finally, the Kinetic Formu-
65
+ lation [4] views each ray tracing equation as describing the motion of a ”particle” (e.g. photon,
66
+ phonon). This method presents severe computational difficulties, as the initial conditions and so-
67
+ lutions are given in terms of Wigner measures, a δ-function that vanishes for incorrect directions p.
68
+ The approach put forth in this paper, on the other hand, is based on the WKB approximation,
69
+ and overcomes the limitations posed by caustics by resorting to a family of curves (or screens)
70
+ on which the total field is decomposed in Fourier modes. Each mode is then propagated for large
71
+ electrical distances (i.e. 20,000λ in the example considered in Figure 6) which are also short enough
72
+ that the presence of caustics is avoided for each Fourier mode.
73
+ 2
74
+ The Screened WKB Method
75
+ We consider, as a model problem, the scalar Helmholtz equation
76
+ ∆u(r) + k2ε(r)u(r) = 0
77
+ (1)
78
+ u = us + uinc
79
+ (2)
80
+ lim
81
+ r→∞ r
82
+ �∂us
83
+ ∂r − ikus
84
+
85
+ = 0,
86
+ (3)
87
+ whose character at high frequencies presents challenges often found in diverse fields, such high-
88
+ frequency electromagnetism, acoustics, seismics and quantum mechanics. The proposed screened-
89
+ WKB approach first introduces a family of curves (or screens) Γq, for q = 0, 1, . . . , Ns, as depicted
90
+ in Figure 1. The method proceeds by propagating the solution from one screen to the next on the
91
+ basis of Fourier expansions on the screens Γq and applications of the classical WKB approach for
92
+ each separate Fourier mode.
93
+ For conciseness, we consider planar screens of the form Γq = {(xq, z) : z ∈ (za, zb)}. The initial
94
+ conditions on Γ0 are user-prescribed, and given by:
95
+ u|Γ0 = uinc|Γ0
96
+ (4)
97
+ 2
98
+
99
+ Figure 1: Schematics underlying the proposed S-WKB method. Here and throughout this paper
100
+ flat screens Γq are used, but curved screens could alternatively be utilized, if convenient.
101
+ We then represent the incident field on each screen Γq, arising from propagation of the field from
102
+ Γq−1 (or given by (4) for q = 0) by exploiting certain expressions of the form
103
+ u(x, z) ≈
104
+ N/2−1
105
+
106
+ n=−N/2
107
+ Aq
108
+ n(x, z) exp(ikψq
109
+ n(x, z)),
110
+ (5)
111
+ valid between Γq and Γq+1, together with the WKB approximation.
112
+ For simplicity, in our description we consider configurations which may accurately be expressed
113
+ in terms of z-periodic functions, of period [za, zb], which, in particular, can be used to treat cases
114
+ wherein the solution decays rapidly outside a bounded interval in the z variable. (Other arrange-
115
+ ments, including rough top and bottom surfaces and other irregularities, can also be incorporated
116
+ in this framework, but are not considered in this paper at any length.) Under such assumptions,
117
+ for a given screen Γq, a “vertical” DFT can be used by introducing an equi-spaced grid
118
+ {zm : m = −N/2, . . . , N/2 − 1}
119
+ (6)
120
+ in the interval [za, zb] and performing an FFT—which yields
121
+ wq
122
+ j =
123
+ N/2−1
124
+
125
+ m=−N/2
126
+ u(xq, zm)e−ijzm.
127
+ (7)
128
+ Then, re-expressing the field u(xq, z) in terms of an inverse DFT, we obtain
129
+ u|Γq ≈ 1
130
+ N
131
+ N/2−1
132
+
133
+ m=−N/2
134
+ wq
135
+ jeijzm,
136
+ (8)
137
+ 3
138
+
139
+ 2
140
+ I1'T2
141
+ '13Figure 2: Example 1: Ray-tracing leading to a single cusp caustic.
142
+ which may be expressed in terms of (5) by requiring that
143
+ ψq
144
+ n(xq, z) =
145
+
146
+ z + zb − za
147
+ 2
148
+
149
+ 2nπ
150
+ k(zb − za).
151
+ (9)
152
+ As a second step, each term in the expansion (5) is obtained up to the next screen Γq+1 by
153
+ means of the classical WKB expansion (see e.g. [7, chapter 3]), which, in particular, requires the
154
+ solution of the Eikonal and Transport equations
155
+ (∇ψ)2 = ε(r)
156
+ (10)
157
+ and
158
+ 2∇ψ · ∇A + A∆ψ = 0.
159
+ (11)
160
+ In the present case, the initial conditions for each Fourier mode on Γq are obtained from (9) and
161
+ (10):
162
+ ∂zψn(xq, z) =
163
+ 2nπ
164
+ k(zb − za)
165
+ (12)
166
+ ∂xψn(xq, z) =
167
+
168
+ ε(xq, z) −
169
+
170
+ 2nπ
171
+ k(zb − za)
172
+ �2
173
+ (13)
174
+ This procedure yields a finite number of adequately spaced geometrical-optics rays, and corre-
175
+ sponding values of ψn and An along the rays for the n-th mode. By adequately selecting the spacing
176
+ of the screens Γq it can ensured that all the modes − N
177
+ 2 ≤ n ≤ N
178
+ 2 − 1 propagate to the next screen
179
+ 4
180
+
181
+ Figure 3: S-WKB solution (top), and physically-exact separation-of-variables solution with super-
182
+ imposed geometrical-optics rays (bottom), with k = 125, along a propagation domain 40 km
183
+ (800, 000 wavelengths) in range.
184
+ 5
185
+
186
+ Figure 4: Error for the “single-caustic” solution displayed in Figure (3): a relative error of the
187
+ order of 10−5 was obtained throughout the propagation domain.
188
+ Γq+1 without incurring caustics. Interpolation can then be used on Γq+1 to obtain approximate
189
+ values of u on the 1D Cartesian grid (xq+1, zm) (− N
190
+ 2 ≤ n ≤ N
191
+ 2 − 1) on Γq+1:
192
+ u(xq+1, zm) ≈
193
+ N/2−1
194
+
195
+ n=−N/2
196
+ Aq
197
+ n(xq+1, zm) exp(ikψq
198
+ n(zm)).
199
+ (14)
200
+ Expanding u(xq+1, z) in a Fourier series along Γq+1 the next iteration of the algorithm can then
201
+ be initiated. Repeating this procedure for all screens the field u over the domain of interest can be
202
+ obtained.
203
+ 3
204
+ Numerical Results
205
+ This section presents results of applications of the proposed algorithm to problems of wave propa-
206
+ gation through inhomogeneous media, in two-dimensional configurations, and through wide ranges
207
+ of problem parameters. In order to evaluate the accuracy of the proposed S-WKB method by
208
+ comparisons with solutions obtainable by means of separation of variables, we first consider x-
209
+ invariant permittivities (i.e. permittivities of the form ε(x, z) = ε(z)), as described in what follows,
210
+ for which a simple high-order spectral solver can be used to obtained reference solutions that are
211
+ physically-exact—i.e., which contain no approximations to (1) other than those inherent in the well
212
+ established numerical solver utilized.
213
+ 6
214
+
215
+ Figure 5: Geometrical optics rays for a “ducting” configuration.
216
+ 3.1
217
+ High-order reference solutions for x-invariant permittivities
218
+ In order to obtain a valid solution to (1) via separation of variables we seek a solution of the form
219
+ u(x, z) =
220
+
221
+
222
+ i=0
223
+ aieiαixφi(z).
224
+ (15)
225
+ Substituting (15) in (1) leads to
226
+
227
+
228
+ i=0
229
+ (−α2 + φ′′
230
+ i (z) + k2ε(z))aieiαix = 0.
231
+ (16)
232
+ Using the orthogonality of the complex exponentials we then obtain
233
+ (−α2
234
+ i + φ′′
235
+ i (z) + k2ε(z))ai = 0.
236
+ (17)
237
+ It follows that the non-zero coefficients αi in (15) satisfy the Sturm-Liouville problem:
238
+ φ′′
239
+ i (z) + k2ε(z)φi(z) = α2
240
+ i φi(z)
241
+ (18)
242
+ with radiation boundary conditions:
243
+ lim
244
+ z→±∞
245
+
246
+ zφ′
247
+ i − ikφi
248
+
249
+ = 0,
250
+ (19)
251
+ Numerically, the radiation boundary conditions can be imposed by considering a sufficiently
252
+ large interval (za, zb) and imposing either Dirichlet or periodic boundary conditions at such points.
253
+ The resulting Sturm-Liouville problem can be discretized with high-order spectral methods. For
254
+ the purposes of the present paper we utilized the spectral eigensolver [10], which is available in the
255
+ ApproxFun.jl Julia package.
256
+ 3.2
257
+ Evaluation of the S-WKB accuracy for a Gaussian permittivity model
258
+ In this section we consider the exponential permittivity model
259
+ ε(z) = 1 + ae−bz2
260
+ (20)
261
+ 7
262
+
263
+ Figure 6: “Multiple caustics” test case depicting an idealized “ducting” configuration. S-WKB field
264
+ values (top), and field values with super-imposed geometrical-optics rays (bottom). The geometrical
265
+ optics rays are depicted in Figure 5.
266
+ 8
267
+
268
+ whose physically-exact solution can be obtained by relying on the method described in Section 3.1.
269
+ For our example, an incident field given by a Gaussian beam
270
+ uinc(x, z) =
271
+ � ∞
272
+ −∞
273
+ ei√
274
+ k2−β2x+iβze− β2
275
+ σ2 dβ
276
+ (21)
277
+ is utilized, wherein the integral in the variable β is evaluated via standard numerical integration
278
+ techniques.
279
+ In our first example we consider the permittivity model (20) with parameters a = 10−4 and
280
+ b = 10−4—which, at C-band, results in a configuration that gives rise to a single caustic of cusp
281
+ type for the first 40km (800, 000 wavelengths) in horizontal propagation range. The geometrical
282
+ optics rays are displayed in Fig. 2. The S-WKB solution alongside the Sturm-Liouville solution
283
+ with superimposed ray-tracing are depicted in Fig. 3. As shown in Fig. 4, the relative errors for
284
+ this configuration are of the order of 10−5. Employing N = 400 Fourier modes and a total of 40
285
+ screens, the S-WKB solution in this case was obtained in a computing times of 2 minutes in a
286
+ single-core, whereas the separation-of-variables solution required single-core runs of approximately
287
+ 1.5 hours.
288
+ Figure 7: Smooth convex lens simulations produced by the S-WKB method. Note the fine-scale
289
+ fields behind caustics, whose simulation is otherwise quite challenging; cf. e.g. [7, Fig. 6.10].
290
+ For our next example we consider a “ducting” configuration, in which the incident Gaussian
291
+ beam is tilted by an angle of 0.2◦, and where the Gaussian permittivity (20) was used with param-
292
+ eters a = 10−4 and b = 10−3—in such a way that the energy is contained within a bounded interval
293
+ along the z axis. The geometrical-optics rays form a complex system with multiple caustics, as
294
+ 9
295
+
296
+ Figure 8: Permittivity and geometrical-optics rays (left), and rays superimposed on the field de-
297
+ picted in Figure 7.
298
+ depicted in Fig. 5. We consider the propagation of this signal over a range of 200Km (4 million
299
+ wavelengths) in range. A total of n = 800 Fourier modes and 200 of the order of 0.1%.
300
+ 4
301
+ Smooth Lens
302
+ We now consider the test case of a smooth convex lens proposed in [4] on the basis of the permittivity
303
+ function given by
304
+ ε(r) =
305
+
306
+ 1
307
+ d2 > L
308
+
309
+ a
310
+ b−cos(πd2)
311
+ �2
312
+ d2 ≤ L
313
+ d2 =
314
+ �x − xc
315
+ xd
316
+ �2
317
+ +
318
+ �z − zc
319
+ zd
320
+ �2
321
+ (22)
322
+ For the numerical example depicted in Fig. 7, we have set L = 1, a = 4, b = 3, xc = 0.5, xd =
323
+ 0.2, zc = 0, zd = 0.8. The displayed result compares favorably to that presented in [4] on the basis
324
+ of a kinetic formulation, as well as the similar problem demonstrated in [7, Fig. 6.10]. A separation
325
+ of variables solution is not available in this case, and the error could be evaluated by means of
326
+ S-WKB implementation of higher order. In presence of the previous examples, however, we may
327
+ estimate the error in the range of 0.1% to 0.001%.
328
+ References
329
+ [1] Vasilii M Babich and Vladimir Sergeevich Buldyrev.
330
+ Short-wavelength diffraction theory:
331
+ asymptotic methods. Springer, 1991.
332
+ [2] Max Born and Emil Wolf. Principles of optics: electromagnetic theory of propagation, inter-
333
+ ference and diffraction of light. Elsevier, 2013.
334
+ [3] Vlastislav ˇCerven`y, Mikhail M Popov, and Ivan Pˇsenˇc´ık. Computation of wave fields in inhomo-
335
+ geneous media—gaussian beam approach. Geophysical Journal International, 70(1):109–128,
336
+ 1982.
337
+ [4] Bj¨orn Engquist and Olof Runborg. Computational high frequency wave propagation. Acta
338
+ numerica, 12:181–266, 2003.
339
+ 10
340
+
341
+ [5] RH Hardin. Applications of the split-step fourier method to the numerical solution of nonlinear
342
+ and variable coefficient wave equations. SIAM Review (Chronicles), 15, 1973.
343
+ [6] Lars H¨ormander. Linear partial differential operators. Springer, 1963.
344
+ [7] Finn B Jensen, William A Kuperman, Michael B Porter, Henrik Schmidt, and Alexandra
345
+ Tolstoy. Computational ocean acoustics, volume 794. Springer, 2011.
346
+ [8] Joseph B Keller. Geometrical theory of diffraction. Josa, 52(2):116–130, 1962.
347
+ [9] Mikhail Aleksandrovich Leontovich and Vladimir Aleksandrovich Fock. Solution of the problem
348
+ of propagation of electromagnetic waves along the earth’s surface by the method of parabolic
349
+ equation. J. Phys. Ussr, 10(1):13–23, 1946.
350
+ [10] Sheehan Olver and Alex Townsend. A fast and well-conditioned spectral method. siam RE-
351
+ VIEW, 55(3):462–489, 2013.
352
+ [11] Steven J Ruuth, Barry Merriman, and Stanley Osher. A fixed grid method for capturing the
353
+ motion of self-intersecting wavefronts and related pdes. Journal of Computational Physics,
354
+ 163(1):1–21, 2000.
355
+ [12] John Steinhoff, Meng Fan, and Lesong Wang. A new eulerian method for the computation
356
+ of propagating short acoustic and electromagnetic pulses. Journal of Computational Physics,
357
+ 157(2):683–706, 2000.
358
+ [13] Nicolay M Tanushev, Bj¨orn Engquist, and Richard Tsai. Gaussian beam decomposition of
359
+ high frequency wave fields. Journal of Computational Physics, 228(23):8856–8871, 2009.
360
+ [14] Ru-Shan Wu.
361
+ Wide-angle elastic wave one-way propagation in heterogeneous media and
362
+ an elastic wave complex-screen method.
363
+ Journal of Geophysical Research:
364
+ Solid Earth,
365
+ 99(B1):751–766, 1994.
366
+ 11
367
+
JdE2T4oBgHgl3EQfUgew/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf,len=175
2
+ page_content='WKB Across Caustics: The Screened-WKB Method Oscar P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
3
+ page_content=' Bruno∗ Martin D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
4
+ page_content=' Maas∗ Abstract We present a new methodology, based on the WKB approximation and Fast Fourier Trans- forms, for the evaluation of wave propagation through inhomogeneous media.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
5
+ page_content=' This method can accurately resolve fields containing caustics, while still enjoying the computational advantages of the WKB approximation, namely, the ability to resolve arbitrarily high-frequency problems in computing times which are orders-of-magnitude shorter than those required by other algorithms presently available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
6
+ page_content=' For example, the proposed approach can simulate with high accuracy (with errors such as e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
7
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
8
+ page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
9
+ page_content='1%–0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
10
+ page_content='001%) the propagation of 5 cm radar signals across two-dimensional configurations resembling atmospheric ducting conditions, spanning hundreds of kilometers and millions of wavelengths in electrical size, in computing times of a few minutes in a single CPU core.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
11
+ page_content=' [Preliminary version] 1 Introduction Computations of high-frequency wave propagation through inhomogeneous media play a pivotal roles in diverse fields such as telecommunications, remote sensing, seismics, quantum mechanics, and optics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
12
+ page_content=' A wide range of methodologies have been developed over the last century for the treat- ment of high-frequency volumetric-propagation problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
13
+ page_content=' Given that direct numerical simulation of the configurations of interest, which comprise thousands to millions of wavelengths in acousti- cal/electrical size, is unfeasible in 2D and even more in 3D, the proposed approaches usually contain a combination of analytic and numerical approximations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
14
+ page_content=' The celebrated WKB approximation, also known as the Wentzel-Kramers-Brillouin approxima- tion [2,8], was the first method to obtain accurate solutions to problems involving propagation over large distances, and is based on the introduction of a system of ray-coordinates, over which the amplitude and phase of the solution exhibit slow variations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
15
+ page_content=' However, the WKB approximation can break down in certain situations, particularly when the ray mapping becomes singular (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
16
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
17
+ page_content=' at caustics).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
18
+ page_content=' Many approaches have been proposed over time to overcome this limitation, most notably the KMAH-index theory, according to which a correction can be incorporated after the caustic, of the form (−i)m, where the constant m depends on the number and type of caustics that the ray has traversed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
19
+ page_content=' This formulation still breaks down at caustics, is inaccurate near caustics, and, given its complexity, it is seldom used in practice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
20
+ page_content=' Another notable approach to solve these type of problems is provided by the parabolic approxi- mation introduced in [9], together with its many subsequent versions and improvements, including the wide-angle approximation [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
21
+ page_content=' The method of phase-screens [14], in turn, which assumes a con- stant refractivity profile along each vertical volumetric screen, is applicable for certain restricted sets of configurations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
22
+ page_content=' While, unlike the classical WKB approach, these methods are valid at and around caustics, their limitations arise from its computational cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
23
+ page_content=' For example, mesh-sizes of ∗Computing and Mathematical Sciences, Caltech, Pasadena, CA 91125, USA 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
24
+ page_content='03814v1 [physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
25
+ page_content='comp-ph] 10 Jan 2023 the order of ∆z ≈ λ 4 and ∆x ≈ 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
26
+ page_content='5λ are reported for propagation distances in the order of a few hundreds of wavelengths (see [7] and references therein).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
27
+ page_content=' (Here z and x denote the vertical and range variables, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
28
+ page_content=') The parabolic equation methods are most often based on use of finite-difference approximations, which gives rise to associated dispersion errors, while Fourier expansions in the vertical axis are only applicable in the lowest-order parabolic approximations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
29
+ page_content=' The combined effect of large propagation distances, and the presence of dispersion error, and the requirement of fine spatial discretizations, can lead to extremely large computational cost, under reasonable error tolerances, for challenging configurations commonly arising in applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
30
+ page_content=' Other notable approaches include the Gaussian beams formulation, with contributions spanning from the 60’s, including [1,3,6,13] among many others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
31
+ page_content=' This formulation is based on an additional approximation to WKB, which seeks to obtain the phase in the form of a quadratic polynomial, whose Hessian matrix is evolved along the ray.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
32
+ page_content=' This approach eliminates ray-bunching at caustics, and produces fields which remain bounded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
33
+ page_content=' However, theoretical convergence as k → ∞ has not been established and is believed to be slow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
34
+ page_content=' Moreover, the initial beam representation is a challenging optimization problem, which leads to errors of a few percent even for propagations distances of the order of a small number of wavelengths [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
35
+ page_content=' An additional approach, known as Dynamic Surface Extension (DSE, see [11,12]), can success- fully propagate wavefronts in a Cartesian discretization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
36
+ page_content=' However, the amplitude computations present the same limitations as the classical WKB approximation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
37
+ page_content=' Finally, the Kinetic Formu- lation [4] views each ray tracing equation as describing the motion of a ”particle” (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
38
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
39
+ page_content=' photon, phonon).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
40
+ page_content=' This method presents severe computational difficulties, as the initial conditions and so- lutions are given in terms of Wigner measures, a δ-function that vanishes for incorrect directions p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
41
+ page_content=' The approach put forth in this paper, on the other hand, is based on the WKB approximation, and overcomes the limitations posed by caustics by resorting to a family of curves (or screens) on which the total field is decomposed in Fourier modes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
42
+ page_content=' Each mode is then propagated for large electrical distances (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
43
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
44
+ page_content=' 20,000λ in the example considered in Figure 6) which are also short enough that the presence of caustics is avoided for each Fourier mode.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
45
+ page_content=' 2 The Screened WKB Method We consider, as a model problem, the scalar Helmholtz equation ∆u(r) + k2ε(r)u(r) = 0 (1) u = us + uinc (2) lim r→∞ r �∂us ∂r − ikus � = 0, (3) whose character at high frequencies presents challenges often found in diverse fields, such high- frequency electromagnetism, acoustics, seismics and quantum mechanics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
46
+ page_content=' The proposed screened- WKB approach first introduces a family of curves (or screens) Γq, for q = 0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
47
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
48
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
49
+ page_content=' , Ns, as depicted in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
50
+ page_content=' The method proceeds by propagating the solution from one screen to the next on the basis of Fourier expansions on the screens Γq and applications of the classical WKB approach for each separate Fourier mode.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
51
+ page_content=' For conciseness, we consider planar screens of the form Γq = {(xq, z) : z ∈ (za, zb)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
52
+ page_content=' The initial conditions on Γ0 are user-prescribed, and given by: u|Γ0 = uinc|Γ0 (4) 2 Figure 1: Schematics underlying the proposed S-WKB method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
53
+ page_content=' Here and throughout this paper flat screens Γq are used, but curved screens could alternatively be utilized, if convenient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
54
+ page_content=' We then represent the incident field on each screen Γq, arising from propagation of the field from Γq−1 (or given by (4) for q = 0) by exploiting certain expressions of the form u(x, z) ≈ N/2−1 � n=−N/2 Aq n(x, z) exp(ikψq n(x, z)), (5) valid between Γq and Γq+1, together with the WKB approximation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
55
+ page_content=' For simplicity, in our description we consider configurations which may accurately be expressed in terms of z-periodic functions, of period [za, zb], which, in particular, can be used to treat cases wherein the solution decays rapidly outside a bounded interval in the z variable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
56
+ page_content=' (Other arrange- ments, including rough top and bottom surfaces and other irregularities, can also be incorporated in this framework, but are not considered in this paper at any length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
57
+ page_content=') Under such assumptions, for a given screen Γq, a “vertical” DFT can be used by introducing an equi-spaced grid {zm : m = −N/2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
58
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
59
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
60
+ page_content=' , N/2 − 1} (6) in the interval [za, zb] and performing an FFT—which yields wq j = N/2−1 � m=−N/2 u(xq, zm)e−ijzm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
61
+ page_content=" (7) Then, re-expressing the field u(xq, z) in terms of an inverse DFT, we obtain u|Γq ≈ 1 N N/2−1 � m=−N/2 wq jeijzm, (8) 3 2 I1'T2 '13Figure 2: Example 1: Ray-tracing leading to a single cusp caustic." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
62
+ page_content=' which may be expressed in terms of (5) by requiring that ψq n(xq, z) = � z + zb − za 2 � 2nπ k(zb − za).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
63
+ page_content=' (9) As a second step, each term in the expansion (5) is obtained up to the next screen Γq+1 by means of the classical WKB expansion (see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
64
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
65
+ page_content=' [7, chapter 3]), which, in particular, requires the solution of the Eikonal and Transport equations (∇ψ)2 = ε(r) (10) and 2∇ψ · ∇A + A∆ψ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
66
+ page_content=' (11) In the present case, the initial conditions for each Fourier mode on Γq are obtained from (9) and (10): ∂zψn(xq, z) = 2nπ k(zb − za) (12) ∂xψn(xq, z) = � ε(xq, z) − � 2nπ k(zb − za) �2 (13) This procedure yields a finite number of adequately spaced geometrical-optics rays, and corre- sponding values of ψn and An along the rays for the n-th mode.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
67
+ page_content=' By adequately selecting the spacing of the screens Γq it can ensured that all the modes − N 2 ≤ n ≤ N 2 − 1 propagate to the next screen 4 Figure 3: S-WKB solution (top), and physically-exact separation-of-variables solution with super- imposed geometrical-optics rays (bottom), with k = 125, along a propagation domain 40 km (800, 000 wavelengths) in range.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
68
+ page_content=' 5 Figure 4: Error for the “single-caustic” solution displayed in Figure (3): a relative error of the order of 10−5 was obtained throughout the propagation domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
69
+ page_content=' Γq+1 without incurring caustics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
70
+ page_content=' Interpolation can then be used on Γq+1 to obtain approximate values of u on the 1D Cartesian grid (xq+1, zm) (− N 2 ≤ n ≤ N 2 − 1) on Γq+1: u(xq+1, zm) ≈ N/2−1 � n=−N/2 Aq n(xq+1, zm) exp(ikψq n(zm)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
71
+ page_content=' (14) Expanding u(xq+1, z) in a Fourier series along Γq+1 the next iteration of the algorithm can then be initiated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
72
+ page_content=' Repeating this procedure for all screens the field u over the domain of interest can be obtained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
73
+ page_content=' 3 Numerical Results This section presents results of applications of the proposed algorithm to problems of wave propa- gation through inhomogeneous media, in two-dimensional configurations, and through wide ranges of problem parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
74
+ page_content=' In order to evaluate the accuracy of the proposed S-WKB method by comparisons with solutions obtainable by means of separation of variables, we first consider x- invariant permittivities (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
75
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
76
+ page_content=' permittivities of the form ε(x, z) = ε(z)), as described in what follows, for which a simple high-order spectral solver can be used to obtained reference solutions that are physically-exact—i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
77
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
78
+ page_content=', which contain no approximations to (1) other than those inherent in the well established numerical solver utilized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
79
+ page_content=' 6 Figure 5: Geometrical optics rays for a “ducting” configuration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
80
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
81
+ page_content='1 High-order reference solutions for x-invariant permittivities In order to obtain a valid solution to (1) via separation of variables we seek a solution of the form u(x, z) = ∞ � i=0 aieiαixφi(z).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
82
+ page_content=' (15) Substituting (15) in (1) leads to ∞ � i=0 (−α2 + φ′′ i (z) + k2ε(z))aieiαix = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
83
+ page_content=' (16) Using the orthogonality of the complex exponentials we then obtain (−α2 i + φ′′ i (z) + k2ε(z))ai = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
84
+ page_content=' (17) It follows that the non-zero coefficients αi in (15) satisfy the Sturm-Liouville problem: φ′′ i (z) + k2ε(z)φi(z) = α2 i φi(z) (18) with radiation boundary conditions: lim z→±∞ � zφ′ i − ikφi � = 0, (19) Numerically, the radiation boundary conditions can be imposed by considering a sufficiently large interval (za, zb) and imposing either Dirichlet or periodic boundary conditions at such points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
85
+ page_content=' The resulting Sturm-Liouville problem can be discretized with high-order spectral methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
86
+ page_content=' For the purposes of the present paper we utilized the spectral eigensolver [10], which is available in the ApproxFun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
87
+ page_content='jl Julia package.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
88
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
89
+ page_content='2 Evaluation of the S-WKB accuracy for a Gaussian permittivity model In this section we consider the exponential permittivity model ε(z) = 1 + ae−bz2 (20) 7 Figure 6: “Multiple caustics” test case depicting an idealized “ducting” configuration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
90
+ page_content=' S-WKB field values (top), and field values with super-imposed geometrical-optics rays (bottom).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
91
+ page_content=' The geometrical optics rays are depicted in Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
92
+ page_content=' 8 whose physically-exact solution can be obtained by relying on the method described in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
93
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
94
+ page_content=' For our example, an incident field given by a Gaussian beam uinc(x, z) = � ∞ −∞ ei√ k2−β2x+iβze− β2 σ2 dβ (21) is utilized, wherein the integral in the variable β is evaluated via standard numerical integration techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
95
+ page_content=' In our first example we consider the permittivity model (20) with parameters a = 10−4 and b = 10−4—which, at C-band, results in a configuration that gives rise to a single caustic of cusp type for the first 40km (800, 000 wavelengths) in horizontal propagation range.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
96
+ page_content=' The geometrical optics rays are displayed in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
97
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
98
+ page_content=' The S-WKB solution alongside the Sturm-Liouville solution with superimposed ray-tracing are depicted in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
99
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
100
+ page_content=' As shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
101
+ page_content=' 4, the relative errors for this configuration are of the order of 10−5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
102
+ page_content=' Employing N = 400 Fourier modes and a total of 40 screens, the S-WKB solution in this case was obtained in a computing times of 2 minutes in a single-core, whereas the separation-of-variables solution required single-core runs of approximately 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
103
+ page_content='5 hours.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
104
+ page_content=' Figure 7: Smooth convex lens simulations produced by the S-WKB method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
105
+ page_content=' Note the fine-scale fields behind caustics, whose simulation is otherwise quite challenging;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
106
+ page_content=' cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
107
+ page_content=' e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
108
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
109
+ page_content=' [7, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
110
+ page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
111
+ page_content='10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
112
+ page_content=' For our next example we consider a “ducting” configuration, in which the incident Gaussian beam is tilted by an angle of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
113
+ page_content='2◦, and where the Gaussian permittivity (20) was used with param- eters a = 10−4 and b = 10−3—in such a way that the energy is contained within a bounded interval along the z axis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
114
+ page_content=' The geometrical-optics rays form a complex system with multiple caustics, as 9 Figure 8: Permittivity and geometrical-optics rays (left), and rays superimposed on the field de- picted in Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
115
+ page_content=' depicted in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
116
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
117
+ page_content=' We consider the propagation of this signal over a range of 200Km (4 million wavelengths) in range.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
118
+ page_content=' A total of n = 800 Fourier modes and 200 of the order of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
119
+ page_content='1%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
120
+ page_content=' 4 Smooth Lens We now consider the test case of a smooth convex lens proposed in [4] on the basis of the permittivity function given by ε(r) = � 1 d2 > L � a b−cos(πd2) �2 d2 ≤ L d2 = �x − xc xd �2 + �z − zc zd �2 (22) For the numerical example depicted in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
121
+ page_content=' 7, we have set L = 1, a = 4, b = 3, xc = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
122
+ page_content='5, xd = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
123
+ page_content='2, zc = 0, zd = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
124
+ page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
125
+ page_content=' The displayed result compares favorably to that presented in [4] on the basis of a kinetic formulation, as well as the similar problem demonstrated in [7, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
126
+ page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
127
+ page_content='10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
128
+ page_content=' A separation of variables solution is not available in this case, and the error could be evaluated by means of S-WKB implementation of higher order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
129
+ page_content=' In presence of the previous examples, however, we may estimate the error in the range of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
130
+ page_content='1% to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
131
+ page_content='001%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
132
+ page_content=' References [1] Vasilii M Babich and Vladimir Sergeevich Buldyrev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
133
+ page_content=' Short-wavelength diffraction theory: asymptotic methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
134
+ page_content=' Springer, 1991.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
135
+ page_content=' [2] Max Born and Emil Wolf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
136
+ page_content=' Principles of optics: electromagnetic theory of propagation, inter- ference and diffraction of light.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
137
+ page_content=' Elsevier, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
138
+ page_content=' [3] Vlastislav ˇCerven`y, Mikhail M Popov, and Ivan Pˇsenˇc´ık.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
139
+ page_content=' Computation of wave fields in inhomo- geneous media—gaussian beam approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
140
+ page_content=' Geophysical Journal International, 70(1):109–128, 1982.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
141
+ page_content=' [4] Bj¨orn Engquist and Olof Runborg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
142
+ page_content=' Computational high frequency wave propagation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
143
+ page_content=' Acta numerica, 12:181–266, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
144
+ page_content=' 10 [5] RH Hardin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
145
+ page_content=' Applications of the split-step fourier method to the numerical solution of nonlinear and variable coefficient wave equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
146
+ page_content=' SIAM Review (Chronicles), 15, 1973.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
147
+ page_content=' [6] Lars H¨ormander.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
148
+ page_content=' Linear partial differential operators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
149
+ page_content=' Springer, 1963.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
150
+ page_content=' [7] Finn B Jensen, William A Kuperman, Michael B Porter, Henrik Schmidt, and Alexandra Tolstoy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
151
+ page_content=' Computational ocean acoustics, volume 794.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
152
+ page_content=' Springer, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
153
+ page_content=' [8] Joseph B Keller.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
154
+ page_content=' Geometrical theory of diffraction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
155
+ page_content=' Josa, 52(2):116–130, 1962.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
156
+ page_content=' [9] Mikhail Aleksandrovich Leontovich and Vladimir Aleksandrovich Fock.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
157
+ page_content=' Solution of the problem of propagation of electromagnetic waves along the earth’s surface by the method of parabolic equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
158
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
159
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
160
+ page_content=' Ussr, 10(1):13–23, 1946.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
161
+ page_content=' [10] Sheehan Olver and Alex Townsend.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
162
+ page_content=' A fast and well-conditioned spectral method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
163
+ page_content=' siam RE- VIEW, 55(3):462–489, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
164
+ page_content=' [11] Steven J Ruuth, Barry Merriman, and Stanley Osher.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
165
+ page_content=' A fixed grid method for capturing the motion of self-intersecting wavefronts and related pdes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
166
+ page_content=' Journal of Computational Physics, 163(1):1–21, 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
167
+ page_content=' [12] John Steinhoff, Meng Fan, and Lesong Wang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
168
+ page_content=' A new eulerian method for the computation of propagating short acoustic and electromagnetic pulses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
169
+ page_content=' Journal of Computational Physics, 157(2):683–706, 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
170
+ page_content=' [13] Nicolay M Tanushev, Bj¨orn Engquist, and Richard Tsai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
171
+ page_content=' Gaussian beam decomposition of high frequency wave fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
172
+ page_content=' Journal of Computational Physics, 228(23):8856–8871, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
173
+ page_content=' [14] Ru-Shan Wu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
174
+ page_content=' Wide-angle elastic wave one-way propagation in heterogeneous media and an elastic wave complex-screen method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
175
+ page_content=' Journal of Geophysical Research: Solid Earth, 99(B1):751–766, 1994.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
176
+ page_content=' 11' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE2T4oBgHgl3EQfUgew/content/2301.03814v1.pdf'}
KdFOT4oBgHgl3EQfzDRI/content/tmp_files/2301.12930v1.pdf.txt ADDED
@@ -0,0 +1,3349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Cause-Effect Inference in Location-Scale Noise Models: Maximum Likelihood
2
+ vs. Independence Testing
3
+ Xiangyu Sun 1 Oliver Schulte 1
4
+ Abstract
5
+ Location-scale noise models (LSNMs) are a class
6
+ of heteroscedastic structural causal models with
7
+ wide applicability, closely related to affine flow
8
+ models.
9
+ Recent likelihood-based methods de-
10
+ signed for LSNMs that infer cause-effect relation-
11
+ ships achieve state-of-the-art accuracy, when their
12
+ assumptions are satisfied concerning the noise dis-
13
+ tributions. However, under misspecification their
14
+ accuracy deteriorates sharply, especially when the
15
+ conditional variance in the anti-causal direction is
16
+ smaller than that in the causal direction. In this pa-
17
+ per, we demonstrate the misspecification problem
18
+ and analyze why and when it occurs. We show
19
+ that residual independence testing is much more
20
+ robust to misspecification than likelihood-based
21
+ cause-effect inference. Our empirical evaluation
22
+ includes 580 synthetic and 99 real-world datasets.
23
+ 1. Introduction
24
+ Distinguishing cause and effect is a fundamental problem
25
+ in many disciplines, such as biology, healthcare and fi-
26
+ nance (Zhang & Chan, 2006; Huang, 2021; Mansouri et al.,
27
+ 2022). Randomized controlled trials (RCTs) are the gold
28
+ standard for finding causal relationships. However, it may be
29
+ unethical, expensive or even impossible to perform RCTs in
30
+ real-world domains (Peters et al., 2017; Pearl & Mackenzie,
31
+ 2018). Causal discovery algorithms aim to find causal rela-
32
+ tionships given observational data alone. Traditional causal
33
+ discovery algorithms can only identify causal relationships
34
+ up to Markov equivalence classes (MECs) (Spirtes & Gly-
35
+ mour, 1991; Kalisch & B¨uhlman, 2007; Colombo et al.,
36
+ 2012). To break the symmetry in a MEC, additional as-
37
+ sumptions are needed (Peters et al., 2017; Sch¨olkopf, 2022),
38
+ such as the type of functional dependence of effect on cause.
39
+ Structural causal models (SCMs) specify a functional class
40
+ for the causal relations in the data (Pearl, 2009; Peters et al.,
41
+ 2017). In this work, we focus on one particular type of SCM
42
+ 1Simon Fraser University, Burnaby, Canada. Correspondence
43
+ to: Xiangyu Sun <xiangyu [email protected]>.
44
+ Preprint.
45
+ called location-scale noise models (LSNMs) (Tagasovska
46
+ et al., 2020; Khemakhem et al., 2021; Xu et al., 2022; Strobl
47
+ & Lasko, 2022; Immer et al., 2022):
48
+ Y := f(X) + g(X) · ZY
49
+ (1)
50
+ where X is the cause, Y is the effect, written X → Y , and
51
+ ZY is a latent noise variable independent of X (i.e., X ⊥
52
+ ⊥ ZY ). The functions f and g are twice-differentiable on
53
+ the domain of X, and g is strictly positive on the domain of
54
+ X. LSNMs are closely related to affine flow models, where
55
+ g(X) = exp s(X) (Khemakhem et al., 2021). LSNMs
56
+ generalize the widely studied additive noise models (ANMs,
57
+ where g(x) = 1 for all x) and allow heteroscedasticity
58
+ where the variance of Y conditional on X, (i.e. V[Y |X])
59
+ depends on the value of X.
60
+ Two major approaches for cause-effect inference in SCMs
61
+ are maximum likelihood (ML) and independence testing
62
+ (IT) of residuals vs. the (putative) cause (Mooij et al., 2016).
63
+ Both have been recently evaluated for LSNMs (Khemakhem
64
+ et al., 2021; Immer et al., 2022), with good accuracy, espe-
65
+ cially when the f and g functions are estimated by neural
66
+ networks. Immer et al. note, however, that ML can be less
67
+ robust than IT, in the sense that accuracy deteriorates when
68
+ the noise distribution is not Gaussian.
69
+ In this paper we investigate the robustness of ML vs. IT.
70
+ Our analysis shows that ML cause-effect inference performs
71
+ poorly when two factors coincide: (1) Noise Misspecifica-
72
+ tion: the ML method assumes a different noise distribution
73
+ from the true one. (2) Misleading Conditional Variances
74
+ (CVs): V[Y |X] > V[X|Y ] in the data generated by causal
75
+ direction X → Y . For example, in an experiment on syn-
76
+ thetic datasets shown in Table 1 below, (i) changing the true
77
+ noise distribution from Gaussian to uniform and (ii) manip-
78
+ ulating the CV of effect Y given cause X (i.e. V[Y |X]),
79
+ while keeping other settings equal, can decrease the rate of
80
+ identifying the true causal direction from 100% to 10%. In
81
+ contrast, IT approaches maintain a perfect 100% accuracy.
82
+ Both conditions (1) and (2) often hold in practice. For real-
83
+ world domains, assumptions about the noise distribution
84
+ can be hard to determine or verify. It is also common to
85
+ have misleading CVs in real-world datasets. For example,
86
+ in the T¨ubingen Cause-Effect Pairs Benchmark (Mooij et al.,
87
+ arXiv:2301.12930v1 [cs.LG] 26 Jan 2023
88
+
89
+ Cause-Effect Inference in Location-Scale Noise Models
90
+ 2016), about 40% of the real-world datasets exhibit a mis-
91
+ leading CV (see Appendix Table 4).
92
+ We make the following contributions:
93
+ • Describe experiments and theoretical analysis to show
94
+ that ML methods succeed when the noise distribution
95
+ is known.
96
+ • Demonstrate empirically that ML methods often fail
97
+ when the noise distribution is misspecified and CV is
98
+ misleading.
99
+ • Introduce a new IT method based on an affine flow
100
+ model.
101
+ • Demonstrate empirically that our IT method is robust
102
+ to noise misspecification and misleading CVs.
103
+ The paper is structured as follows. We discuss related works
104
+ and preliminaries in Sections 2 and 3, respectively. Sec-
105
+ tion 4 examines when ML methods succeed and when they
106
+ fail. Section 5 demonstrates the robustness of the IT ap-
107
+ proach. Evaluations on synthetic and real-world datasets are
108
+ given in Section 6. The code and scripts to reproduce all the
109
+ results can be found online 1.
110
+ 2. Related Works
111
+ Causal Discovery. Causal discovery methods have been
112
+ well studied in machine learning (Spirtes & Glymour, 1991;
113
+ Kalisch & B¨uhlman, 2007; Colombo et al., 2012). Assum-
114
+ ing causal sufficiency (no latent confounders) and faithful-
115
+ ness, they find causal structure up to a MEC. To identify
116
+ causal relations within a MEC, additional assumptions are
117
+ needed (Peters et al., 2017; Sch¨olkopf, 2022). SCMs exploit
118
+ constraints that result from assumptions about the functional
119
+ dependency of effects on causes. Functional dependencies
120
+ are often studied in the fundamental case of two variables
121
+ X and Y , the simplest MEC (Mooij et al., 2016). We follow
122
+ this line of work and study two-variable LSNMs assuming
123
+ causal sufficiency.
124
+ Structural Causal Models. Assuming a linear non-Gaussian
125
+ acyclic model (LINGAM) (Shimizu et al., 2006), the causal
126
+ direction was proved to be identifiable. The key idea in
127
+ LINGAM is that in the true causal direction X → Y , the
128
+ model residuals are independent of X. We refer to methods
129
+ derived from the LINGAM approach as independence test-
130
+ ing (IT) methods. The more general additive noise model
131
+ (ANM) (Hoyer et al., 2008) allows for nonlinear cause-
132
+ effect relationships and is generally identifiable, except for
133
+ some special cases. There are other identifiable SCMs, such
134
+ as post-nonlinear models (Zhang & Hyvarinen, 2012) and
135
+ Poisson generalized linear models (Park & Park, 2019).
136
+ 1https://github.com/xiangyu-sun-789/CAREFL-H
137
+ LSNM identifiability. There are several identifiability re-
138
+ sults for the causal direction in LSNMs. Xu et al. (2022)
139
+ prove LSNMs are identifiable for linear causal dependen-
140
+ cies. Khemakhem et al. (2021) show that nonlinear LSNMs
141
+ are identifiable with Gaussian noise. Strobl & Lasko (2022);
142
+ Immer et al. (2022) prove LSNMs are identifiable except in
143
+ some pathological cases (cf. Section 4.1).
144
+ Cause-Effect Inference in LSNMs. The blueprint is to fit two
145
+ LSNM models for each direction X → Y and X ← Y and
146
+ select the direction with a higher model score. HECI (Xu
147
+ et al., 2022) bins putative cause values and selects a direc-
148
+ tion based on a Bayesian information criterion based score.
149
+ BQCD (Tagasovska et al., 2020) uses nonparamatric quan-
150
+ tile regression to approximate minimum description length
151
+ to select the direction. GRCI (Strobl & Lasko, 2022) finds
152
+ the direction based on mutual information between the pu-
153
+ tative cause and the model residuals. LOCI (Immer et al.,
154
+ 2022) models the conditional distribution of effect given
155
+ cause with Gaussian natural parameters. Then, it chooses
156
+ the direction based on either likelihood (LOCI-M) or in-
157
+ dependence (LOCI-H). CAREFL-M (Khemakhem et al.,
158
+ 2021) fits affine flow models and scores them by likelihood.
159
+ CAREFL-M is more general than LOCI since LOCI uses a
160
+ fixed Gaussian noise distribution prior, whereas CAREFL-
161
+ M can utilize different prior distributions. We introduce
162
+ CAREFL-H as a new IT method that scores the fitted affine
163
+ flow models based on independence. DECI (Geffner et al.,
164
+ 2022) generalizes CAREFL-M to multivariate cases but is
165
+ designed for ANMs. To our knowledge, the problem of mis-
166
+ leading CVs is neither identified nor analyzed in previous
167
+ work.
168
+ 3. Preliminaries
169
+ In this section, we define the cause-effect inference problem
170
+ and review the LSNM data likelihood.
171
+ 3.1. Problem Definition: Cause-Effect Inference
172
+ Cause-effect inference takes as input a dataset D over two
173
+ random variables X, Y with N observational data pairs
174
+ (X, Y ) = {(x1, y1), (x2, y2), . . . , (xN, yN)} generated by
175
+ a ground-truth LSNM in Equation (1). The binary output
176
+ decision indicates whether X causes Y (X → Y ) or Y
177
+ causes X (X ← Y ). We assume no latent confounders,
178
+ selection bias or feedback cycles.
179
+ 3.2. Definition of Maximum Likelihood Approach for
180
+ LSNMs
181
+ An LSNM model for two variables (X, Y ) is a pair (→, PZ).
182
+ The → represents the direction X → Y with parameter
183
+ space f ≡ fθ, g ≡ gψ, PX ≡ PX,ζ. The ← represents
184
+ the direction X ← Y with parameter space h ≡ hθ′, k ≡
185
+
186
+ Cause-Effect Inference in Location-Scale Noise Models
187
+ kψ′, PY ≡ PY,ζ′. For notational simplicity, we treat the
188
+ model functions directly as model parameters and omit ref-
189
+ erence to their parameterizations. For example, we write
190
+ f ≡ fθ for a function f that is implemented by a neural
191
+ network with weights θ. We refer to PZ as the model prior
192
+ noise distribution.
193
+ A parameterized LSNM model defines a data distribution as
194
+ follows (Immer et al., 2022) (derivation in Appendix A):
195
+ P→,PZ(X, Y ; f, g, PX)
196
+ =PX(X) · PZY (Y − f(X)
197
+ g(X)
198
+ ) ·
199
+ 1
200
+ g(X)
201
+ P←,PZ(X, Y ; h, k, PY )
202
+ =PY (Y ) · PZX(X − h(Y )
203
+ k(Y )
204
+ ) ·
205
+ 1
206
+ k(Y )
207
+ (2)
208
+ For conciseness, we use shorter notation P→,PZ(X, Y ) and
209
+ P←,PZ(X, Y ) in later sections. The likelihood of a dataset
210
+ D for a parametrized → model is given by
211
+ P→,PZ(D; f, g, PX) =
212
+ N
213
+
214
+ i=1
215
+ P→,PZ(xi, yi; f, g, PX)
216
+ The ML approach estimates the ML parameters and utilizes
217
+ them to score the → model (similarly for the ← model ):
218
+ �f, �g, �PX := arg max
219
+ f,g,PX P→,PZ(D; f, g, PX)
220
+ (3)
221
+ L→,PZ(D) := P→,PZ(D; �f, �g, �
222
+ PX)
223
+ (4)
224
+ 4. Analysis of Maximum Likelihood
225
+ Approach
226
+ In this section, we state existing and provide new identi-
227
+ fiability results for LSNMs. We use theoretical analysis
228
+ to understand why misspecified ML methods fail in the
229
+ presence of misleading CVs (cf. Table 1).
230
+ 4.1. Identifiability of LSNMs with Correct Noise
231
+ Distribution
232
+ Strobl & Lasko (2022); Immer et al. (2022) prove the iden-
233
+ tifiability of LSNMs, assuming a correctly specified noise
234
+ distribution. That is, given that the data generating distribu-
235
+ tion (X, Y ) follows a LSNM in the direction X → Y , the
236
+ same distribution with equal likelihood cannot be induced
237
+ by a LSNM in the backward direction X ← Y , except in
238
+ some pathological cases. In terms of our notation, direction
239
+ identifiability means that if (→, PZ) is the data generating
240
+ model, then
241
+ P([L→,PZ(D) − L←,PZ(D)] > 0) → 1 as N → ∞
242
+ Immer et al. prove the following identifiability result:
243
+ Theorem 4.1 (Theorem 1 from (Immer et al., 2022)). For
244
+ data (X, Y ) that follows a LSNM in both direction X → Y
245
+ and X ← Y , i.e.,
246
+ Y = f(X) + g(X) · ZY , where X ⊥⊥ ZY
247
+ X = h(Y ) + k(Y ) · ZX, where Y ⊥⊥ ZX
248
+ The following condition must be true:
249
+ (log p(y))′′ +
250
+ g′(x)
251
+ G(x, y) · (log p(y))′
252
+ + ∂2
253
+ ∂y2 · νX|Y (x|y) +
254
+ g(x)
255
+ G(x, y) ·
256
+ ∂2
257
+ ∂y∂x · νX|Y (x|y)
258
+ +
259
+ g′(x)
260
+ G(x, y) · ∂
261
+ ∂y · νX|Y (x|y) = 0
262
+ (5)
263
+ where G(x, y) = g(x) · f ′(x) + g′(x) · [y − f(x)] ̸= 0 and
264
+ νX|Y (x|y) = log pZX( x−h(y)
265
+ k(y) ) − log k(y).
266
+ They state that Equation (5) will be false except for “patho-
267
+ logical cases”. In addition, Khemakhem et al. (2021) pro-
268
+ vide sufficient conditions for LSNMs with Gaussian noise
269
+ to be identifiable. Our next theorem provides identifiability
270
+ results for some non-Gaussian noise distributions:
271
+ Theorem 4.2. Suppose that the true data-generating dis-
272
+ tribution follows an LSNM model in both X → Y and
273
+ X ← Y directions:
274
+ 1. If the noise distribution is Uniform(a, b), then both
275
+ g(X) and k(Y ) are constant functions.
276
+ 2. If the noise distribution is ContinuousBernoulli(λ ̸=
277
+ 0.5)2 or Exponential(λ), then one of the following
278
+ conditions holds:
279
+ • g(X)−1 and k(Y )−1 are constant functions.
280
+ • g(X)−1 and k(Y )−1 are linear functions with the
281
+ same coefficients on X and Y , respectively.
282
+ The proof is in Appendix B. Essentially, the theorem shows
283
+ that for Uniform, Exponential, and ContinuousBernoulli
284
+ noise distributions, the true LSNM model can be identified
285
+ unless it degenerates to (i) a homoscedastic additive noise
286
+ model or (ii) a heteroscedastic model with the same linear
287
+ scale in both directions.
288
+ 4.2. Non-Identifiability of LSNMs with Misspecified
289
+ Noise Distribution
290
+ Existing identifiability results for ML methods require know-
291
+ ing the ground-truth noise distribution. We find that when
292
+ 2The case of ContinuousBernoulli(λ = 0.5) is equivalent
293
+ to Uniform(0, 1)).
294
+
295
+ Cause-Effect Inference in Location-Scale Noise Models
296
+ Table 1: Accuracy over 10 datasets generated by SCM LSNM-sine-tanh (definition in Appendix G) with N = 10, 000
297
+ samples. The task is a binary decision whether X causes Y or Y causes X. Rewrite Equation (1) as Y = f(X)+α·g(X)·ZY ,
298
+ where α is a scale factor to alter the CV. CVs are computed by binning the putative cause. X denotes the ground-truth cause
299
+ and Y denotes the ground-truth effect. We used Gaussian(0, 1) as model noise prior for both CAREFL and LOCI. The
300
+ suffix -M denotes a ML method. The suffix -H denotes the corresponding IT method (more in Section 5).
301
+ True Noise
302
+ Gaussian(0, 1)
303
+ Uniform(−1, 1)
304
+ α
305
+ 0.1
306
+ 0.5
307
+ 1
308
+ 5
309
+ 10
310
+ 0.1
311
+ 0.5
312
+ 1
313
+ 5
314
+ 10
315
+ V[Y |X]
316
+ vs.
317
+ V[X|Y ]
318
+ 0.166
319
+ vs.
320
+ 0.455
321
+ 0.615
322
+ vs.
323
+ 0.709
324
+ 0.834
325
+ vs.
326
+ 0.793
327
+ 0.990
328
+ vs.
329
+ 0.821
330
+ 0.997
331
+ vs.
332
+ 0.817
333
+ 0.044
334
+ vs.
335
+ 0.047
336
+ 0.404
337
+ vs.
338
+ 0.375
339
+ 0.673
340
+ vs.
341
+ 0.566
342
+ 0.975
343
+ vs.
344
+ 0.681
345
+ 0.994
346
+ vs.
347
+ 0.677
348
+ CAREFL-M
349
+ 1.0
350
+ 1.0
351
+ 1.0
352
+ 1.0
353
+ 1.0
354
+ 0.7
355
+ 0.6
356
+ 0.7
357
+ 0.1
358
+ 0.1
359
+ LOCI-M
360
+ 1.0
361
+ 1.0
362
+ 1.0
363
+ 1.0
364
+ 1.0
365
+ 0.7
366
+ 0.6
367
+ 0.7
368
+ 0.1
369
+ 0.1
370
+ CAREFL-H
371
+ 1.0
372
+ 1.0
373
+ 1.0
374
+ 1.0
375
+ 1.0
376
+ 0.9
377
+ 1.0
378
+ 1.0
379
+ 1.0
380
+ 1.0
381
+ LOCI-H
382
+ 1.0
383
+ 1.0
384
+ 1.0
385
+ 1.0
386
+ 1.0
387
+ 0.7
388
+ 1.0
389
+ 1.0
390
+ 1.0
391
+ 1.0
392
+ the noise distribution is misspecified for both causal and
393
+ anti-causal models, the anti-causal model may achieve a
394
+ higher data likelihood, even in the sample size limit. In
395
+ other words, ML model selection is not consistent when
396
+ the noise distribution is misspecified. In terms of our nota-
397
+ tion, if (→, PZ) is the data generating model, but the noise
398
+ distribution is misspecified as P ′
399
+ Z, then
400
+ P([L→,P ′
401
+ Z(D) − L←,P ′
402
+ Z(D)] > 0) ̸→ 1 as N → ∞
403
+ We conducted a simple experiment to show that with a
404
+ misspecified noise distribution, ML model selection can
405
+ fail badly. Table 1 shows results for CAREFL-M (Khe-
406
+ makhem et al., 2021) and LOCI-M (Immer et al., 2022) with
407
+ model prior distribution Gaussian(0, 1). For the left half
408
+ of the table, the data was generated with Gaussian(0, 1)
409
+ noise, matching the model specification. In this case, both
410
+ CAREFL-M and LOCI-M work quite well, and increasing
411
+ CV in the causal direction does not affect their accuracy.
412
+ For the right half of the table, the data was generated with
413
+ Uniform(−1, 1) noise, contradicting the model specifica-
414
+ tion. With the misspecified noise, the accuracy of both
415
+ CAREFL-M and LOCI-M decreases to 70%. With both
416
+ misspecified noise and misleading CVs, their accuracy be-
417
+ comes even lower. For example, in the last column when
418
+ V[Y |X] = 0.994 and V[X|Y ] = 0.677, both CAREFL-M
419
+ and LOCI-M give an accuracy of just 10%. Thus they select
420
+ the incorrect anti-causal direction 9 out of 10 times.
421
+ To understand why higher CV corresponds to lower data
422
+ likelihood, note that the model Equation (2) entails the
423
+ following relationships for the X → Y model:
424
+ V [Y |X] = g2(X) · V [ZY ]
425
+ P→,PZ(X, Y ) = PX(X) · PZY (ZY ) ·
426
+ 1
427
+ g(X)
428
+ where ZY = Y −f(X)
429
+ g(X) . Therefore we can expect V [Y |X]
430
+ and P→,PZ(X, Y ) to be negatively related:
431
+ 1. Increasing V [Y |X] by enlarging g(X) reduces
432
+ 1
433
+ g(X),
434
+ which in turn reduces P→,PZ(X, Y ).
435
+ 2. High variance typically means small densities. There-
436
+ fore, increasing V [Y |X] by enlarging V [ZY ] often re-
437
+ duces PZY (ZY ), which in turn reduces P→,PZ(X, Y ).
438
+ Figure 1 illustrates these relationships in actual datasets.
439
+ 1. CV and likelihood are negatively related (Figures 1a
440
+ and 1b), with correctly or incorrectly specified noise.
441
+ 2. When the noise distribution is correctly specified, the
442
+ causal model always has a higher likelihood, even with
443
+ misleading CVs (Figure 1a).
444
+ 3. When the noise distribution is misspecified, the anti-
445
+ causal model often has a higher likelihood under mis-
446
+ leading CVs (Figure 1b).
447
+ 5. Robustness of Independence Testing
448
+ This section describes IT approaches for cause-effect learn-
449
+ ing in LSNMs, including a new IT method based on affine
450
+ flows. A theoretical analysis explains why IT approaches
451
+ are robust to noise misspecification and misleading CVs.
452
+ 5.1. The Independence Testing Approach
453
+ Inspired by the breakthrough LINGAM approach, indepen-
454
+ dence testing has been used in existing methods for various
455
+ SCMs (Hoyer et al., 2008; Shimizu et al., 2011; Peters et al.,
456
+ 2014; Strobl & Lasko, 2022; Immer et al., 2022). Like the
457
+
458
+ Cause-Effect Inference in Location-Scale Noise Models
459
+ (a) Log-Likelihood Difference Under Correct Noise
460
+ Specification: Gaussian(0, 1) noise
461
+ (b) Log-Likelihood Difference Under Noise Misspecifi-
462
+ cation: Uniform(−1, 1) noise
463
+ (c) HSIC Difference Under Correct Noise Specification:
464
+ Gaussian(0, 1) noise
465
+ (d) HSIC Difference Under Noise Misspecification:
466
+ Uniform(−1, 1) noise
467
+ Figure 1: Visualization of Table 1. First row (1a,1b): ML methods. Second row (1c,1d): IT methods. Y-axis < 0.0: A ML
468
+ method returns the incorrect anti-causal direction. Y-axis > 0.0: An IT method returns the incorrect anti-causal direction.
469
+ ML methods may fail under misspecification and misleading CVs (1b). IT methods are more robust (1d).
470
+ ML approach (Equation (4)), IT methods fit the model pa-
471
+ rameters in both directions, typically maximizing the data
472
+ likelihood (Equation (3)). The difference is in the model
473
+ selection step: While ML approaches select the direction
474
+ with the highest likelihood, IT approaches select the direc-
475
+ tion with the highest degree of independence between the
476
+ fitted model residuals and the putative cause. Algorithm 1
477
+ provides pseudo-code.
478
+ As in previous work (e.g., Mooij et al. (2016)), we use
479
+ the Hilbert-Schmidt independence criterion (HSIC) (Gret-
480
+ ton et al., 2005) to measure (in)dependence throughout the
481
+ paper. HSIC measures the squared distance between the
482
+ joint probability of the two variables and the product of
483
+ their marginals embedded in the reproducible kernel Hilbert
484
+ space. We have HSIC(U, V ) = 0 if and only if U ⊥⊥ V .
485
+ To fit the functions in a LSNM, we use the affine flow esti-
486
+ mator T from CAREFL-M (Khemakhem et al., 2021). We
487
+ refer to the resulting IT method as CAREFL-H. This com-
488
+ bination of affine flow with IT appears to be new. Details
489
+ on CAREFL-M and learning the flow transformation T are
490
+ given in Appendix C.
491
+ Another approach to IT methods is to test the independence
492
+ of residuals for both X and Y variables (He et al., 2021).
493
+ We found that this performs similarly to CAREFL-H and
494
+ therefore report experimental results only for the more com-
495
+ mon LINGAM-style approach. More details on IT with
496
+ residuals are in Appendix D.
497
+ 5.2. Suitability Theory
498
+ With a consistent HSIC estimator, Mooij et al. (2016) show
499
+ that an IT approach consistently selects the causal direction
500
+ for ANMs if the regression method is suitable. A regres-
501
+ sion method is suitable if the expected mean squared error
502
+ between the predicted residuals �E and the true residuals E
503
+ approaches 0 in the limit of N → ∞:
504
+ lim
505
+ N→∞ ED,D′
506
+ � 1
507
+ N || �E1...N − E1...N||2
508
+
509
+ = 0
510
+ (6)
511
+ where D and D′ denote training set and testing set, respec-
512
+ tively. Hence, with enough data a suitable regression method
513
+ reconstructs the ground-truth noise.
514
+ If an HSIC estimator is consistent, the estimated HSIC value
515
+ converges in probability to the population HSIC value.
516
+
517
+ HSIC(X, Y )
518
+ P−→ HSIC(X, Y ).
519
+ Mooij et al. (2016) show that even a biased HSIC estimator
520
+ with a fixed bounded kernel is consistent.
521
+ With a consistent HSIC estimator and a suitable regression
522
+ method, the consistency result for ANMs in Mooij et al.
523
+ (2016) extends naturally to LSNMs.
524
+ Proposition 5.1. For identifiable LSNMs with an indepen-
525
+ dent noise term in one causal direction only, if an IT ap-
526
+
527
+ : Causal - Anti-Causal
528
+ 2.00
529
+ 1.75
530
+ 1.50
531
+ 1.25
532
+ 1.00
533
+ Log-Likelihood:
534
+ 0.75
535
+ 0.50
536
+ 0.25
537
+ 0.00
538
+ -0.6
539
+ -0.4
540
+ -0.2
541
+ 0.0
542
+ 0.2
543
+ Mean CV: Causal - Anti-Causal-Causal
544
+ 0.6
545
+ Anti.
546
+ 0.5
547
+ 0.4
548
+ Causal
549
+ 0.3
550
+ 0.2
551
+ Log-Likelihood:
552
+ 0.1
553
+ 0.0
554
+ -0.1
555
+ -0.2
556
+ 0.0
557
+ 0.1
558
+ 0.2
559
+ 0.3
560
+ 0.4
561
+ Mean CV: Causal - Anti-CausalAnti-Causal
562
+ 0.000
563
+ -0.005
564
+
565
+ -0.010
566
+ : Causal
567
+ -0.015
568
+ HSIC-Value:
569
+ 0.020
570
+ -0.025
571
+ 0.6-0.4-0.2
572
+ 0.0
573
+ 0.2
574
+ Mean CV: Causal - Anti-CausalAnti-Causal
575
+ 0.0000
576
+ -0.0005
577
+ 0.0010
578
+ : Causal
579
+ -0.0015
580
+ HSIC-Value:
581
+ -0.0020
582
+ -0.0025
583
+ -0.0030
584
+ 0.0
585
+ 0.1
586
+ 0.2
587
+ 0.3
588
+ 0.4
589
+ Mean CV: Causal - Anti-CausalCause-Effect Inference in Location-Scale Noise Models
590
+ Algorithm 1 CAREFL-H
591
+ 1: Input: data pairs D := (X, Y ), the flow estimator T
592
+ of CAREFL-M with prior PZ, and an HSIC estimator
593
+ 2: Output: estimated causal direction dir
594
+ 3: Split D into training set Dtrain := (Xtrain, Ytrain) and
595
+ testing set Dtest := (Xtest, Ytest)
596
+ 4: Optimize T�θ, �
597
+ ψ,�ζ(Dtrain; PZ) in X → Y direction via
598
+ ML to estimate �f and �g
599
+ 5: Compute the residual �ZY := Ytest− �
600
+ f(Xtest)
601
+ �g(Xtest)
602
+ 6: Optimize T �
603
+ θ′,�
604
+ ψ′, �
605
+ ζ′(Dtrain; PZ) in X ← Y direction
606
+ via ML to estimate �h and �k
607
+ 7: Compute the residual �ZX := Xtest−�h(Ytest)
608
+ �k(Ytest)
609
+ 8: if HSIC(Xtest, �ZY ) < HSIC(Ytest, �ZX) then
610
+ 9:
611
+ dir := X → Y
612
+ 10: else if HSIC(Xtest, �ZY ) > HSIC(Ytest, �ZX) then
613
+ 11:
614
+ dir := X ← Y
615
+ 12: else
616
+ 13:
617
+ dir := no conclusion
618
+ 14: end if
619
+ proach is used with a suitable regression method for LSNMs
620
+ and a consistent HSIC estimator, then the IT approach is
621
+ consistent for inferring causal direction for LSNMs.
622
+ 5.3. Suitability: Empirical Results
623
+ The suitability value S is the left-hand side of Equation (6).
624
+ Table 2 shows an empirical evaluation of S for the flow
625
+ estimator T in the causal direction. We generate data from
626
+ 3 synthetic LSNMs (definition in Appendix G), and eval-
627
+ uate T under noise misspecification and misleading CVs.
628
+ We find that as the sample size grows, S approaches 0.
629
+ In other words, T is empirically suitable under noise mis-
630
+ specification and misleading CVs. Therefore, according
631
+ to Proposition 5.1, CAREFL-H based on the flow estimator
632
+ T and a consistent HSIC estimator is empirically consis-
633
+ tent for inferring causal direction in LSNMs under noise
634
+ misspecification and misleading CVs.
635
+ Because the T in CAREFL-M (Khemakhem et al., 2021)
636
+ uses neural networks to approximate the observed data dis-
637
+ tribution, it is difficult to provide a theoretical guarantee of
638
+ suitability. Therefore, although our experiments indicate
639
+ that T is often suitable in practice, we do not claim that it
640
+ is suitable for all LSNMs. For example, we have found it
641
+ to be not suitable in the low-noise regime when the LSNMs
642
+ are close to deterministic.
643
+ 6. Experiments
644
+ The code and scripts to reproduce all the results are given
645
+ online 3. We show that across hyperparameter choices the IT
646
+ approach (i.e., CAREFL-H) produces much higher accuracy
647
+ than the ML approach (i.e,. CAREFL-M) in the difficult
648
+ settings with noise misspecification and misleading CVs,
649
+ and produces comparable accuracy in the easier settings
650
+ without noise misspecification or misleading CVs. Also the
651
+ IT approach is more robust with real-world data, where the
652
+ ground-truth noise distribution is unknown.
653
+ For all experiments, we start with the same default hyperpa-
654
+ rameter values for both CAREFL-M and CAREFL-H and
655
+ alter one hyperparameter value at a time. The default hyper-
656
+ parameter values are those specified in CAREFL-M (Khe-
657
+ makhem et al., 2021) for the T¨ubingen Cause-Effect Pairs
658
+ Benchmark (Mooij et al., 2016). Please see Appendix F for
659
+ more details on default and alternative hyperparameter val-
660
+ ues. Previous work (Mooij et al., 2016; Immer et al., 2022)
661
+ reported that ML methods perform better with data splitting
662
+ (split data into training set for model fitting and testing set
663
+ for model selection) and IT methods perform better with
664
+ data recycling (the same data is used for both model fitting
665
+ and selection). Therefore, we use both splitting methods:
666
+ (i) CAREFL(0.8): 80% as training and 20% as testing. (ii)
667
+ CAREFL(1.0): training = testing = 100%.
668
+ We use a consistent HSIC estimator with Gaussian ker-
669
+ nels (Pfister et al., 2018). A summary of experimental
670
+ datasets is provided in Appendix Table 4. All the datasets
671
+ are normalized to have mean 0 and variance 1.
672
+ 6.1. Synthetic Datasets
673
+ Please see Appendix G for the definition of the 3 ground-
674
+ truth LSNM SCMs and details on how synthetic datasets are
675
+ generated from them. The sample sizes in each synthetic
676
+ dataset are 500 or 5,000. As shown in Appendix Table 4,
677
+ most synthetic datasets generated by such SCMs have mis-
678
+ leading CVs. Based on the analysis of Section 4.2 we formu-
679
+ late the following hypotheses. (i) We expect CAREFL-M
680
+ to be accurate given a correct noise specification with or
681
+ without misleading CVs. (ii) With noise misspecification
682
+ but without misleading CVs, we expect the accuracy of
683
+ CAREFL-M to be reduced. (iii) With both noise misspeci-
684
+ fication and misleading CVs, we expect the accuracy to be
685
+ very low, often below 50%. Overall, the results from the
686
+ experiments confirm our hypotheses.
687
+ 6.1.1. NOISE MISSPECIFICATION
688
+ We
689
+ evaluate
690
+ CAREFL-M
691
+ and
692
+ CAREFL-H
693
+ against
694
+ data generated with Uniform(−1, 1), Exponential(1),
695
+ 3https://github.com/xiangyu-sun-789/CAREFL-H
696
+
697
+ Cause-Effect Inference in Location-Scale Noise Models
698
+ Table 2: Suitability of the flow estimator T of CAREFL-M in the causal direction under noise misspecification and
699
+ misleading CVs. T is trained with a Laplace prior. The original dataset with size 2N is split into two: 50% as training set
700
+ and 50% as testing set. V[Y |X] > V[X|Y ] indicates misleading CVs in the dataset.
701
+ (a) LSNM-tanh-exp-cosine and ContinuousBernoulli(0.9) noise. V[Y |X] vs. V[X|Y ]: 0.324 vs. 0.291.
702
+ N=50
703
+ N=500
704
+ N=1000
705
+ N=5000
706
+ [SZ1, SZ2]
707
+ [0.02406, 0.01283]
708
+ [0.00194, 0.00204]
709
+ [0.00094, 0.00092]
710
+ [0.0002, 0.00018]
711
+ (b) LSNM-sine-tanh and Uniform(−1, 1) noise. V[Y |X] vs. V[X|Y ]: 0.422 vs. 0.367.
712
+ N=50
713
+ N=500
714
+ N=1000
715
+ N=5000
716
+ [SZ1, SZ2]
717
+ [0.0081, 0.00285]
718
+ [0.00039, 0.00031]
719
+ [0.0002, 0.00017]
720
+ [0.00003, 0.00003]
721
+ (c) LSNM-sigmoid-sigmoid and Exponential(1) noise. V[Y |X] vs. V[X|Y ]: 0.927 vs. 0.657.
722
+ N=50
723
+ N=500
724
+ N=1000
725
+ N=5000
726
+ [SZ1, SZ2]
727
+ [0.00979, 0.00443]
728
+ [0.00074, 0.00058]
729
+ [0.00045, 0.00026]
730
+ [0.00005, 0.00005]
731
+ Figure 2: Weighted accuracy over 99 datasets from T¨ubingen Cause-Effect Pairs Benchmark.
732
+ ContinuousBernoulli(0.9)
733
+ or
734
+ Beta(0.5, 0.5)
735
+ noise,
736
+ covered by our identifiability Theorem 4.2 (except
737
+ Beta(0.5, 0.5)).
738
+ Khemakhem et al. (2021) claim that
739
+ CAREFL-M is robust to noise misspecification. We show
740
+ that it may fail remarkably.
741
+ We summarize findings from the 336 settings here; the de-
742
+ tailed results are given in Appendix Figures 3 to 14. In 289
743
+ settings (86.01%), both CAREFL-M(0.8) and CAREFL-
744
+ M(1.0) select the correct causal direction with less than 50%
745
+ random accuracy. Furthermore, in 110 settings (32.74%)
746
+ both CAREFL-M(0.8) and CAREFL-M(1.0) fail catastroph-
747
+ ically with an accuracy of 0%. These experiments also
748
+ show that the accuracy of CAREFL-M often decreases as
749
+ N increases. In contrast, CAREFL-H(1.0) achieves better
750
+ accuracy than CAREFL-M in 333 settings (99.11%). The
751
+ accuracy of CAREFL-H(1.0) goes below 50% in only 6 set-
752
+ tings (1.79%). The results demonstrate the robustness of the
753
+ IT approach under noise misspecification and misleading
754
+ CVs, across different hyperparameter choices.
755
+ Appendix Table 4 and Appendix Figure 14 show that with
756
+ misleading CVs, the accuracy of CAREFL-M is close to 0%.
757
+ This is much lower than the corresponding cases without
758
+ misleading CVs in Appendix Figures 6 and 10.
759
+ 6.1.2. CORRECT NOISE SPECIFICATION
760
+ These experiments show that CAREFL-H is comparable
761
+ with CAREFL-M under correct noise specification, with
762
+ or without misleading CVs, especially on larger datasets,
763
+ as long as the affine model capacity is sufficient. We eval-
764
+ uate CAREFL-M and CAREFL-H against data generated
765
+ with Gaussian(0, 1) and Laplace(0, 1) noise. The detailed
766
+ results are in Appendix Figures 15 to 20. We find that
767
+ CAREFL-M is more sample efficient than CAREFL-H
768
+ when the model prior matches the data. Consistent with the
769
+ suitability results in Section 5.3, the accuracy of CAREFL-
770
+ H improves with more data. For example, with N = 500,
771
+ there are 49 out of 84 settings (58.33%) where CAREFL-M
772
+ outperforms CAREFL-H(1.0). However, with N = 5, 000,
773
+ CAREFL-H(1.0) achieves similar accuracy as CAREFL-
774
+ M on all datasets (except LSNM-sigmoid-sigmoid with
775
+ Laplace(0, 1) noise.) In addition, CAREFL-H(1.0) may
776
+ underperform CAREFL-M when the number of hidden neu-
777
+
778
+ 0.8
779
+ 0.8 ×
780
+ Accuracy
781
+ 0.8
782
+ 0.6
783
+ 0.6
784
+ 0.6
785
+ F9'0
786
+ 0.6
787
+ 0.4
788
+ 0.4
789
+ 0.4
790
+ 0.4
791
+ 0.4
792
+ 2
793
+ 5
794
+ 10
795
+ 20
796
+ 1
797
+ 4
798
+ 10
799
+ 500
800
+ 750
801
+ 1000
802
+ 2000
803
+ 0.0
804
+ 0.0001 0.001
805
+ 0.1
806
+ laplace
807
+ gaussian
808
+ Number of Hidden Neurons
809
+ Number of Sub-Flows
810
+ Number of Epochs
811
+ L2-Penalty
812
+ PriorsCAREFL-M (0.8)
813
+ CAREFL-M (1.0)
814
+ X
815
+ CAREFL-M (0.8)
816
+ CAREFL-M (1.0)
817
+ CAREFL-H (0.8)
818
+ CAREFL-H (1.0)
819
+ 6.
820
+ CAREFL-H (0.8)
821
+ CAREFL-H (1.0)Cause-Effect Inference in Location-Scale Noise Models
822
+ Table 3: The best accuracy of each method on the SIM and T¨ubingen Cause-Effect Pairs benchmarks. For methods other
823
+ than CAREFL-M and CAREFL-H, we use the results reported in Immer et al. (2022).
824
+ (a) SIM Benchmarks
825
+ LOCI-M
826
+ LOCI-H
827
+ GRCI
828
+ BQCD
829
+ HECI
830
+ CAM
831
+ RESIT
832
+ CAREFL-M
833
+ CAREFL-H
834
+ SIM
835
+ 0.53
836
+ 0.79
837
+ 0.77
838
+ 0.62
839
+ 0.49
840
+ 0.57
841
+ 0.77
842
+ 0.55
843
+ 0.80
844
+ SIM-c
845
+ 0.50
846
+ 0.83
847
+ 0.77
848
+ 0.72
849
+ 0.55
850
+ 0.60
851
+ 0.82
852
+ 0.58
853
+ 0.85
854
+ SIM-ln
855
+ 0.79
856
+ 0.72
857
+ 0.77
858
+ 0.80
859
+ 0.65
860
+ 0.87
861
+ 0.87
862
+ 0.84
863
+ 0.83
864
+ SIM-G
865
+ 0.78
866
+ 0.82
867
+ 0.70
868
+ 0.64
869
+ 0.56
870
+ 0.81
871
+ 0.78
872
+ 0.82
873
+ 0.79
874
+ (b) Weighted Accuracy for T¨ubingen Cause-Effect Pairs Benchmark
875
+ LOCI-M
876
+ LOCI-H
877
+ GRCI
878
+ BQCD
879
+ HECI
880
+ CAM
881
+ RESIT
882
+ CAREFL-M
883
+ CAREFL-H
884
+ T¨ubingen
885
+ Cause-Effect
886
+ Pairs
887
+ 0.57
888
+ 0.64
889
+ 0.82
890
+ 0.77
891
+ 0.71
892
+ 0.58
893
+ 0.57
894
+ 0.73
895
+ 0.82
896
+ rons, sub-flows or training epochs is low. The reason is
897
+ that an IT approach requires more power to fit the LSNM
898
+ functions and produce a good reconstruction of the noise.
899
+ 6.2. Synthetic Benchmark
900
+ Similar to Tagasovska et al. (2020); Immer et al. (2022),
901
+ we compare CAREFL-M and CAREFL-H against the SIM
902
+ benchmark suite (Mooij et al., 2016). SIM comprises 4
903
+ sub-benchmarks: default (SIM), with one confounder (SIM-
904
+ c), low noise levels (SIM-ln) and Gaussian noise (SIM-G).
905
+ In this benchmark, most datasets do not have misleading
906
+ CVs (Appendix Table 4), which favors ML methods. Each
907
+ sub-benchmark contains 100 datasets and each dataset has
908
+ N = 1000 data pairs. As shown in Appendix Figure 21,
909
+ CAREFL-M and CAREFL-H(1.0) achieve similar accu-
910
+ racy on SIM-ln and SIM-G across different hyperparame-
911
+ ter choices. For SIM and SIM-c, CAREFL-H, especially
912
+ CAREFL-H(1.0), outperforms CAREFL-M by 20%-30%
913
+ in all settings. The accuracy of CAREFL-M is only about
914
+ random guess (40%-60%) on SIM and SIM-c.
915
+ Table 3a compares CAREFL-H with SOTA methods. For
916
+ each CAREFL method, we report the best accuracy ob-
917
+ tained with the hyperparameter settings considered in Ap-
918
+ pendix Figure 21, without further tuning.
919
+ CAREFL-H
920
+ achieves the best accuracy on SIM and SIM-c, and achieves
921
+ competitive accuracy on SIM-ln and SIM-G.
922
+ 6.3. Real-World Benchmark: T¨ubingen Cause-Effect
923
+ We compare CAREFL-M and CAREFL-H against real-
924
+ world datasets from the T¨ubingen Cause-Effect Pairs Bench-
925
+ mark (Mooij et al., 2016). The benchmark is commonly
926
+ used to evaluate cause-effect inference algorithms (Khe-
927
+ makhem et al., 2021; Xu et al., 2022; Immer et al., 2022).
928
+ To be consistent with previous work (Tagasovska et al.,
929
+ 2020; Strobl & Lasko, 2022; Immer et al., 2022), we ex-
930
+ clude 6 multivariate and 3 discrete datasets (#47, #52-#55,
931
+ #70, #71, #105, #107) and utilize the remaining 99 bivariate
932
+ datasets. As recommended by Mooij et al. (2016), we re-
933
+ port weighted accuracy. 40% of datasets in the benchmark
934
+ feature misleading CVs. As shown in Figure 2, CAREFL-
935
+ H(1.0) outperforms CAREFL-M in all configurations by
936
+ large margins (7%-30%).
937
+ We also compare CAREFL-H with SOTA methods. Fol-
938
+ lowing Khemakhem et al. (2021), we use a single set of
939
+ hyperparameters for all 99 datasets, found by grid search
940
+ (Appendix H). Table 3b shows that CAREFL-H achieves
941
+ the SOTA accuracy (82%). With the respectively best hyper-
942
+ parameter settings, CAREFL-H is 9% more accurate than
943
+ CAREFL-M (Khemakhem et al., 2021).
944
+ 7. Conclusion and Future Work
945
+ We identified a failure of maximum-likelihood (ML) meth-
946
+ ods for cause-effect inference in location-scale noise models.
947
+ Our analysis shows that the failure mode occurs when the
948
+ noise distribution is misspecified and conditional effect vari-
949
+ ances are misleading (i.e., higher in the causal direction).
950
+ Selecting causal models by independence tests (IT) is robust
951
+ even in this difficult setting. Extensive empirical evalu-
952
+ ation compared the ML approach and a new IT method
953
+ based on affine flows, using both synthetic and real-world
954
+ datasets. The IT flow method achieves better accuracy under
955
+ noise misspecification and misleading CVs, with robust per-
956
+ formance across different hyperparameter choices. Future
957
+ directions include improving the sample efficiency of IT
958
+ methods, and improving the robustness of ML methods by
959
+ learning the noise distribution instead of using a fixed prior.
960
+
961
+ Cause-Effect Inference in Location-Scale Noise Models
962
+ References
963
+ Colombo, D., Maathuis, M. H., Kalisch, M., and Richard-
964
+ son, T. S. Learning high-dimensional directed acyclic
965
+ graphs with latent and selection variables. The Annals of
966
+ Statistics, pp. 294–321, 2012.
967
+ Geffner, T., Antoran, J., Foster, A., Gong, W., Ma, C., Kici-
968
+ man, E., Sharma, A., Lamb, A., Kukla, M., Pawlowski,
969
+ N., et al. Deep end-to-end causal inference. arXiv preprint
970
+ arXiv:2202.02195, 2022.
971
+ Gretton, A., Bousquet, O., Smola, A., and Sch¨olkopf, B.
972
+ Measuring statistical dependence with hilbert-schmidt
973
+ norms. In International conference on algorithmic learn-
974
+ ing theory, pp. 63–77. Springer, 2005.
975
+ He, Y., Cui, P., Shen, Z., Xu, R., Liu, F., and Jiang, Y.
976
+ Daring: Differentiable causal discovery with residual
977
+ independence. In Proceedings of the 27th ACM SIGKDD
978
+ Conference on Knowledge Discovery & Data Mining, pp.
979
+ 596–605, 2021.
980
+ Hoyer, P., Janzing, D., Mooij, J. M., Peters, J., and
981
+ Sch¨olkopf, B. Nonlinear causal discovery with additive
982
+ noise models. Advances in neural information processing
983
+ systems, 21, 2008.
984
+ Huang, B. Diagnosis of autism spectrum disorder by causal
985
+ influence strength learned from resting-state fmri data.
986
+ In Neural Engineering Techniques for Autism Spectrum
987
+ Disorder, pp. 237–267. Elsevier, 2021.
988
+ Immer, A., Schultheiss, C., Vogt, J. E., Sch¨olkopf, B.,
989
+ B¨uhlmann, P., and Marx, A. On the identifiability and
990
+ estimation of causal location-scale noise models. arXiv
991
+ preprint arXiv:2210.09054, 2022.
992
+ Kalisch, M. and B¨uhlman, P. Estimating high-dimensional
993
+ directed acyclic graphs with the pc-algorithm. Journal of
994
+ Machine Learning Research, 8(3), 2007.
995
+ Khemakhem, I., Monti, R., Leech, R., and Hyvarinen, A.
996
+ Causal autoregressive flows. In International conference
997
+ on artificial intelligence and statistics, pp. 3520–3528.
998
+ PMLR, 2021.
999
+ Kingma, D. P. and Ba, J. Adam: A method for stochastic
1000
+ optimization. arXiv preprint arXiv:1412.6980, 2014.
1001
+ Mansouri, M., Khakabimamaghani, S., Chindelevitch, L.,
1002
+ and Ester, M. Aristotle: stratified causal discovery for
1003
+ omics data. BMC bioinformatics, 23(1):1–18, 2022.
1004
+ Mooij, J. M., Peters, J., Janzing, D., Zscheischler, J., and
1005
+ Sch¨olkopf, B. Distinguishing cause from effect using ob-
1006
+ servational data: methods and benchmarks. The Journal
1007
+ of Machine Learning Research, 17(1):1103–1204, 2016.
1008
+ Park, G. and Park, S. High-dimensional poisson structural
1009
+ equation model learning via \ell 1-regularized regression.
1010
+ J. Mach. Learn. Res., 20:95–1, 2019.
1011
+ Pearl, J. Causality. Cambridge university press, 2009.
1012
+ Pearl, J. and Mackenzie, D. The book of why: the new
1013
+ science of cause and effect. Basic books, 2018.
1014
+ Peters, J., Mooij, J. M., Janzing, D., and Sch¨olkopf, B.
1015
+ Causal discovery with continuous additive noise models.
1016
+ 2014.
1017
+ Peters, J., Janzing, D., and Sch¨olkopf, B. Elements of causal
1018
+ inference: foundations and learning algorithms. The MIT
1019
+ Press, 2017.
1020
+ Pfister, N., B¨uhlmann, P., Sch¨olkopf, B., and Peters, J.
1021
+ Kernel-based tests for joint independence. Journal of the
1022
+ Royal Statistical Society: Series B (Statistical Methodol-
1023
+ ogy), 80(1):5–31, 2018.
1024
+ Sch¨olkopf, B. Causality for machine learning. In Proba-
1025
+ bilistic and Causal Inference: The Works of Judea Pearl,
1026
+ pp. 765–804. 2022.
1027
+ Shimizu, S., Hoyer, P. O., Hyv¨arinen, A., Kerminen, A.,
1028
+ and Jordan, M. A linear non-gaussian acyclic model for
1029
+ causal discovery. Journal of Machine Learning Research,
1030
+ 7(10), 2006.
1031
+ Shimizu, S., Inazumi, T., Sogawa, Y., Hyvarinen, A., Kawa-
1032
+ hara, Y., Washio, T., Hoyer, P. O., Bollen, K., and Hoyer,
1033
+ P.
1034
+ Directlingam: A direct method for learning a lin-
1035
+ ear non-gaussian structural equation model. Journal of
1036
+ Machine Learning Research-JMLR, 12(Apr):1225–1248,
1037
+ 2011.
1038
+ Spirtes, P. and Glymour, C. An algorithm for fast recovery
1039
+ of sparse causal graphs. Social science computer review,
1040
+ 9(1):62–72, 1991.
1041
+ Strobl, E. V. and Lasko, T. A. Identifying patient-specific
1042
+ root causes with the heteroscedastic noise model. arXiv
1043
+ preprint arXiv:2205.13085, 2022.
1044
+ Tagasovska, N., Chavez-Demoulin, V., and Vatter, T. Dis-
1045
+ tinguishing cause from effect using quantiles: Bivariate
1046
+ quantile causal discovery. In International Conference
1047
+ on Machine Learning, pp. 9311–9323. PMLR, 2020.
1048
+ Xu, S., Mian, O. A., Marx, A., and Vreeken, J. Inferring
1049
+ cause and effect in the presence of heteroscedastic noise.
1050
+ In International Conference on Machine Learning, pp.
1051
+ 24615–24630. PMLR, 2022.
1052
+ Zhang, K. and Chan, L.-W. Extensions of ica for causality
1053
+ discovery in the hong kong stock market. In International
1054
+ Conference on Neural Information Processing, pp. 400–
1055
+ 409. Springer, 2006.
1056
+
1057
+ Cause-Effect Inference in Location-Scale Noise Models
1058
+ Zhang, K. and Hyvarinen, A.
1059
+ On the identifiability
1060
+ of the post-nonlinear causal model.
1061
+ arXiv preprint
1062
+ arXiv:1205.2599, 2012.
1063
+ Zheng, X., Aragam, B., Ravikumar, P. K., and Xing, E. P.
1064
+ Dags with no tears: Continuous optimization for structure
1065
+ learning. Advances in Neural Information Processing
1066
+ Systems, 31, 2018.
1067
+ Zheng, X., Dan, C., Aragam, B., Ravikumar, P., and Xing,
1068
+ E. Learning sparse nonparametric dags. In International
1069
+ Conference on Artificial Intelligence and Statistics, pp.
1070
+ 3414–3425. PMLR, 2020.
1071
+
1072
+ Cause-Effect Inference in Location-Scale Noise Models
1073
+ A. Derivation of Equation (2)
1074
+ Lemma A.1. For a LSNM model X → Y defined in Equation (1), we have PY |X(Y |X) = PZY ( Y −f(X)
1075
+ g(X) ) ·
1076
+ 1
1077
+ g(X), where
1078
+ PZY is the noise distribution.
1079
+ For the data distribution in the X → Y direction:
1080
+ P→,PZ(X, Y ) = PX(X) · PY |X(Y |X) = PX(X) · PZY (Y − f(X)
1081
+ g(X)
1082
+ ) ·
1083
+ 1
1084
+ g(X)
1085
+ Similarly, in the X ← Y direction:
1086
+ P←,PZ(X, Y ) = PY (Y ) · PX|Y (X|Y ) = PY (Y ) · PZX(X − h(Y )
1087
+ k(Y )
1088
+ ) ·
1089
+ 1
1090
+ k(Y )
1091
+ Proof for Lemma A.1. For a LSNM model defined in Equation (1), we have ZY = Y −f(X)
1092
+ g(X) . Hence, ∂ZY
1093
+ ∂Y
1094
+ =
1095
+ 1
1096
+ g(X). Since
1097
+ g(X) > 0, so | ∂ZY
1098
+ ∂Y | = |
1099
+ 1
1100
+ g(X)| =
1101
+ 1
1102
+ g(X).
1103
+ PY |X(Y |X)
1104
+ =PZY (ZY |X) · | det ∂ZY
1105
+ ∂Y |
1106
+ (via change of variables)
1107
+ =PZY (ZY ) · | det ∂ZY
1108
+ ∂Y |
1109
+ =PZY (Y − f(X)
1110
+ g(X)
1111
+ ) · |∂ZY
1112
+ ∂Y |
1113
+ =PZY (Y − f(X)
1114
+ g(X)
1115
+ ) ·
1116
+ 1
1117
+ g(X)
1118
+ B. Identifiability Proofs
1119
+ In this section, we prove Theorem 4.2.
1120
+ If the data (x, y) follows a LSNM in the forward (i.e. causal) model:
1121
+ y := f(x) + g(x) · zY
1122
+ where zX and zY are the noise terms, e ⊥⊥ zX, x ⊥⊥ zY , g(x) > 0 for all x on its domain. We assume f(·) and g(·) are
1123
+ twice-differentiable on the domain of x.
1124
+ If the data (x, y) follows a LSNM in the backward (i.e. anti-causal) model:
1125
+ x := h(y) + k(y) · mX
1126
+ where mY and mX are the noise terms, e′ ⊥⊥ mY , y ⊥⊥ mX, k(y) > 0 for all y on its domain. We assume h(·) and k(·)
1127
+ are twice-differentiable on the domain of y.
1128
+ zX, zY , mY and mX follow one of Uniform(a, b), Exponential(λ) or ContinuousBernoulli(λ) distribution accordingly.
1129
+ Proof for Uniform(a, b) Noise. For the causal model, according to Lemma A.1, we have PY |X(y|x) = PZY ( y−f(x)
1130
+ g(x) ) ·
1131
+ 1
1132
+ g(x) = PZY (zY ) ·
1133
+ 1
1134
+ g(x) =
1135
+ 1
1136
+ b−a ·
1137
+ 1
1138
+ g(x) =
1139
+ 1
1140
+ (b−a)·g(x). Similarly, for the backward model, we have PX|Y (x|y) =
1141
+ 1
1142
+ (b−a)·k(y).
1143
+
1144
+ Cause-Effect Inference in Location-Scale Noise Models
1145
+ The joint likelihood of the observation (x, y) in the causal model is:
1146
+ P→,PZ(x, y) = PX(x) · PY |X(y|x) = PX(x) ·
1147
+ 1
1148
+ (b − a) · g(x)
1149
+ The joint likelihood of the observation (x, y) in the backward model is:
1150
+ P←,PZ(x, y) = PY (y) · PX|Y (x|y) = PY (y) ·
1151
+ 1
1152
+ (b − a) · k(y)
1153
+ If the data follows both models:
1154
+ P→,PZ(x, y) = P←,PZ(x, y)
1155
+ PX(x) ·
1156
+ 1
1157
+ (b − a) · g(x) = PY (y) ·
1158
+ 1
1159
+ (b − a) · k(y)
1160
+ PX(x) ·
1161
+ 1
1162
+ g(x) = PY (y) ·
1163
+ 1
1164
+ k(y)
1165
+ Take the derivative of both sides with respect to x:
1166
+ PX(x) · (g(x)−1)′ + (PX(x))′ · g(x)−1 = 0
1167
+ PX(x) · −1 · g(x)−2 · g(x)′ + 0 · g(x)−1 = 0
1168
+ PX(x) · −1 · g(x)−2 · g(x)′ = 0
1169
+ PX(x) > 0
1170
+ g(x)−2 =
1171
+ 1
1172
+ g(x)2 > 0
1173
+ Therefore, g(x)′ = 0
1174
+ Similarly, if we take the derivative of both sides with respect to y instead, we have:
1175
+ k(y)′ = 0
1176
+ These imply that both g(x) and k(y) are constant functions.
1177
+ Proof for Exponential(λ) Noise. For the causal model, according to Lemma A.1, we have PY |X(y|x) = PZY ( y−f(x)
1178
+ g(x) ) ·
1179
+ 1
1180
+ g(x) = λ · e−λ· y−f(x)
1181
+ g(x)
1182
+ ·
1183
+ 1
1184
+ g(x) =
1185
+ λ
1186
+ g(x) · e−
1187
+ λ
1188
+ g(x) ·(y−f(x)). Similarly, for the backward model, we have PX|Y (x|y) =
1189
+ λ
1190
+ k(y) · e−
1191
+ λ
1192
+ k(y) ·(x−h(y)).
1193
+ The joint likelihood of the observation (x, y) in the causal model is:
1194
+ P→,PZ(x, y) = PX(x) · PY |X(y|x) = PX(x) ·
1195
+ λ
1196
+ g(x) · e−
1197
+ λ
1198
+ g(x) ·(y−f(x))
1199
+ log P→,PZ(x, y) = log PX(x) + log λ − log g(x) −
1200
+ λ
1201
+ g(x) · (y − f(x))
1202
+ The joint likelihood of the observation (x, y) in the backward model is:
1203
+ P←,PZ(x, y) = PY (y) · PX|Y (x|y) = PY (y) ·
1204
+ λ
1205
+ k(y) · e−
1206
+ λ
1207
+ k(y) ·(x−h(y))
1208
+ log P←,PZ(x, y) = log PY (y) + log λ − log k(y) −
1209
+ λ
1210
+ k(y) · (x − h(y))
1211
+
1212
+ Cause-Effect Inference in Location-Scale Noise Models
1213
+ If the data follows both models:
1214
+ log P→,PZ(x, y) = log P←,PZ(x, y)
1215
+ log PX(x) + log λ − log g(x) −
1216
+ λ
1217
+ g(x) · (y − f(x))
1218
+ = log PY (y) + log λ − log k(y) −
1219
+ λ
1220
+ k(y) · (x − h(y))
1221
+ log PX(x) − log g(x) − λ · g(x)−1 · y + λ · g(x)−1 · f(x)
1222
+ = log PY (y) − log k(y) − λ · k(y)−1 · x + λ · k(y)−1 · h(y)
1223
+ Take the derivative of both sides with respect to x:
1224
+ (log PX(x))′ − (log g(x))′ − λ · y · (g(x)−1)′ + λ · (g(x)−1 · f(x))′
1225
+ = −λ · k(y)−1
1226
+ Take the derivative of both sides with respect to y:
1227
+ −λ · (g(x)−1)′ = −λ · (k(y)−1)′
1228
+ ∂g(x)−1
1229
+ ∂x
1230
+ = ∂k(y)−1
1231
+ ∂y
1232
+ They can be equal only if both sides are constants. Therefore, g(x)−1 and k(y)−1 are both constants or both linear functions
1233
+ with the same coefficient on x and y, respectively.
1234
+ Proof for ContinuousBernoulli(λ ̸= 0.5) Noise. Please refer to Uniform for ContinuousBernoulli(λ = 0.5), which
1235
+ equals to Uniform(0, 1). For the causal model, according to Lemma A.1, we have PY |X(y|x) = PZY ( y−f(x)
1236
+ g(x) ) ·
1237
+ 1
1238
+ g(x) =
1239
+ Cλ·λ
1240
+ y−f(x)
1241
+ g(x) ·(1−λ)1− y−f(x)
1242
+ g(x) ·
1243
+ 1
1244
+ g(x), where Cλ is the normalizing constant of the continuous Bernoulli distribution. Similarly,
1245
+ for the backward model, we have PX|Y (x|y) = Cλ · λ
1246
+ x−h(y)
1247
+ k(y)
1248
+ · (1 − λ)1− x−h(y)
1249
+ k(y)
1250
+ ·
1251
+ 1
1252
+ k(y).
1253
+ The joint likelihood of the observation (x, y) in the causal model is:
1254
+ P→,PZ(x, y) = PX(x) · PY |X(y|x) = PX(x) ·
1255
+ 1
1256
+ g(x) · Cλ · λ
1257
+ y−f(x)
1258
+ g(x)
1259
+ ·(1 − λ)1− y−f(x)
1260
+ g(x)
1261
+ log P→,PZ(x, y) = log PX(x) − log g(x) + log Cλ + log λ
1262
+ y−f(x)
1263
+ g(x)
1264
+ + log(1 − λ)1− y−f(x)
1265
+ g(x)
1266
+ The joint likelihood of the observation (x, y) in the backward model is:
1267
+ P←,PZ(x, y) = PY (y) · PX|Y (x|y) = PY (y) ·
1268
+ 1
1269
+ k(y) · Cλ · λ
1270
+ x−h(y)
1271
+ k(y)
1272
+ ·(1 − λ)1− x−h(y)
1273
+ k(y)
1274
+ log P←,PZ(x, y) = log PY (y) − log k(y) + log Cλ + log λ
1275
+ x−h(y)
1276
+ k(y)
1277
+ + log(1 − λ)1− x−h(y)
1278
+ k(y)
1279
+
1280
+ Cause-Effect Inference in Location-Scale Noise Models
1281
+ If the data follows both models:
1282
+ log P→,PZ(x, y) = log P←,PZ(x, y)
1283
+ log PX(x) − log g(x) + log Cλ + log λ
1284
+ y−f(x)
1285
+ g(x)
1286
+ + log(1 − λ)1− y−f(x)
1287
+ g(x)
1288
+ = log PY (y) − log k(y) + log Cλ + log λ
1289
+ x−h(y)
1290
+ k(y)
1291
+ + log(1 − λ)1− x−h(y)
1292
+ k(y)
1293
+ log PX(x) − log g(x) + log Cλ + y − f(x)
1294
+ g(x)
1295
+ · log λ + (1 − y − f(x)
1296
+ g(x)
1297
+ )
1298
+ · log(1 − λ)
1299
+ = log PY (y) − log k(y) + log Cλ + x − h(y)
1300
+ k(y)
1301
+ · log λ + (1 − x − h(y)
1302
+ k(y)
1303
+ )
1304
+ · log(1 − λ)
1305
+ log PX(x) − log g(x) + log Cλ + g(x)−1 · log λ · y − g(x)−1·
1306
+ log λ · f(x) + log(1 − λ) − log(1 − λ) · g(x)−1 · y + log(1 − λ) · g(x)−1 · f(x)
1307
+ = log PY (y) − log k(y) + log Cλ + k(y)−1 · log λ · x − k(y)−1·
1308
+ log λ · h(y) + log(1 − λ) − log(1 − λ) · k(y)−1 · x + log(1 − λ) · k(y)−1 · h(y)
1309
+ Take the derivative of both sides with respect to x:
1310
+ (log PX(x))′ − g(x)−1 · g(x)′ − 1 · g(x)−2 · g(x)′ · log λ · y
1311
+ − (g(x)−1 · log λ · f(x))′ − log(1 − λ) · −1 · g(x)−2 · g(x)′ · y
1312
+ + (log(1 − λ) · g(x)−1 · f(x))′ = k(y)−1 · log λ − log(1 − λ) · k(y)−1
1313
+ Take the derivative of both sides with respect to y:
1314
+ − g(x)−2 · g(x)′ · log λ − log(1 − λ) · −1 · g(x)−2 · g(x)′
1315
+ = −1 · k(y)−2 · k(y)′ · log λ − log(1 − λ) · −1 · k(y)−2 · k(y)′
1316
+ g(x)−2 · g(x)′ · log λ − log(1 − λ) · g(x)−2 · g(x)′
1317
+ = k(y)−2 · k(y)′ · log λ − log(1 − λ) · k(y)−2 · k(y)′
1318
+ g(x)−2 · g(x)′ · (log λ − log(1 − λ)) = k(y)−2 · k(y)′ · (log λ − log(1 − λ))
1319
+ Since λ ̸= 0.5, therefore log λ − log(1 − λ) ̸= 0
1320
+ g(x)−2 · g(x)′ = k(y)−2 · k(y)′
1321
+ ∂g(x)−1
1322
+ ∂x
1323
+ = ∂k(y)−1
1324
+ ∂y
1325
+ They can be equal only if both sides are constants. Therefore, g(x)−1 and k(y)−1 are both constants or both linear functions
1326
+ with the same coefficient on x and y, respectively.
1327
+ C. CAREFL-M
1328
+ CAREFL-M (Khemakhem et al., 2021) models a LSNM in Equation (1) via affine flows T. Each sub-flow Tk ∈ T is
1329
+ defined as the following:
1330
+ X = t1 + es1 · ZX
1331
+ Y = t2(X) + es2(X) · ZY
1332
+ (7)
1333
+ where X is the putative cause and Y is the putative effect in X → Y direction. t1 and s1 are constants. t2 and s2 are
1334
+ functions parameterized using neural networks. Without loss of generality, X is assumed to be a function of latent noise
1335
+
1336
+ Cause-Effect Inference in Location-Scale Noise Models
1337
+ Algorithm 2 CAREFL-M
1338
+ 1: Input: data pairs D := (X, Y ), and the flow estimator T with prior PZ
1339
+ 2: Output: estimated causal direction dir
1340
+ 3: Split D into training set Dtrain := (Xtrain, Ytrain) and testing set Dtest := (Xtest, Ytest)
1341
+ 4: Optimize T�t1,�s1,�t2,�s2(Dtrain; PZ) in X → Y direction via ML
1342
+ 5: Compute the likelihood �L→,PZ(Dtest; T�t1,�s1,�t2,�s2) in X → Y direction
1343
+ 6: Optimize T�t′1,�s′1,�t′2,�s′2(Dtrain; PZ) in X ← Y direction via ML
1344
+ 7: Compute the likelihood �L←,PZ(Dtest; T�t′1,�s′1,�t′2,�s′2) in X ← Y direction
1345
+ 8: if �L→,PZ(Dtest; T�t1,�s1,�t2,�s2) > �L←,PZ(Dtest; T�t′1,�s′1,�t′2,�s′2) then
1346
+ 9:
1347
+ dir := X → Y
1348
+ 10: else if �L→,PZ(Dtest; T�t1,�s1,�t2,�s2) < �L←,PZ(Dtest; T�t′1,�s′1,�t′2,�s′2) then
1349
+ 11:
1350
+ dir := X ← Y
1351
+ 12: else
1352
+ 13:
1353
+ dir := no conclusion
1354
+ 14: end if
1355
+ variable ZX. If t1 = 0 and s1 = 0, then X = ZX. The exponential function e ensures the multipliers to Z are positive
1356
+ without expression loss. Similarly, for the backward direction X ← Y :
1357
+ Y = t′
1358
+ 1 + es′
1359
+ 1 · ZY
1360
+ X = t′
1361
+ 2(Y ) + es′
1362
+ 2(Y ) · ZX
1363
+ (8)
1364
+ where Y is the putative cause and X is the putative effect in X ← Y direction. t′
1365
+ 1 and s′
1366
+ 1 are constants. t′
1367
+ 2 and s′
1368
+ 2 are
1369
+ functions parameterized using neural networks.
1370
+ Given Equation (7), the joint log-likelihood of (x, y) in X → Y direction is:
1371
+ log P→,PZ(x, y) = log PZX
1372
+
1373
+ e−s1 · (x − t1)
1374
+
1375
+ + log PZY
1376
+
1377
+ e−s2(x) · (y − t2(x))
1378
+
1379
+ − s1 − s2(x)
1380
+ (9)
1381
+ Similarly for the X ← Y direction. Note that the priors PZ = {PZX, PZY } in Equation (9) may mismatch the unknown
1382
+ ground-truth noise distribution P ∗
1383
+ Z = {P ∗
1384
+ ZX, P ∗
1385
+ ZY }. Both CAREFL-M and CAREFL-H optimizes Equation (9) for each
1386
+ direction over the training set. For CAREFL-M, it chooses the direction with ML Score L over the testing set as the
1387
+ estimated causal direction. Detailed procedure for CAREFL-M is given in Algorithm 2. To map the parameters θ, ψ, ζ, θ′,
1388
+ ψ′ and ζ′ in LSNM (Equation (2)) to the flow estimator T of CAREFL (Equations (7) and (8)), we have fθ ≡ t2, gψ ≡ es2,
1389
+ PX,ζ ≡ {t1, es1}, fθ′ ≡ t′
1390
+ 2, gψ′ ≡ es′
1391
+ 2 and PY,ζ′ ≡ {t′
1392
+ 1, es′
1393
+ 1}.
1394
+ D. CAREFL-H Alternative Independence Testing
1395
+ In Algorithm 1, CAREFL-H tests independence between the putative cause and the residual of the putative effect in each
1396
+ direction, i.e., between X and �ZY in X → Y direction, and between Y and �ZX in X ← Y direction. An alternative way of
1397
+ testing independence is to test between the residual of the putative cause and the residual of the putative effect (He et al.,
1398
+ 2021), i.e., between �ZX and �ZY in both directions. Please see Algorithm 3 for complete steps.
1399
+ Theorem D.1. By optimizing the log-likelihood in the causal direction, may or may not under noise misspecification,
1400
+ CAREFL-H gives the reconstructed residual of the ground-truth cause (i.e. �ZX) identical to the ground-truth cause (i.e. X)
1401
+ and to the latent noise variable of the cause (i.e. ZX) up to shifting and scaling.
1402
+ Proof is in Appendix E. Although we prove only for the cause, empirically we find that in the causal direction the
1403
+ reconstructed residual of the ground-truth effect (i.e., �ZY ) is also close to the latent noise variable of the effect (i.e.,
1404
+ ZY ). According to Theorem D.1, testing HSIC(X, �ZY ) in Algorithm 1 and HSIC( �ZX, �ZY ) in Algorithm 3 for the
1405
+ causal direction are equivalent. The difference comes from testing HSIC(Y, �ZX) in Algorithm 1 and HSIC( �ZX, �ZY )
1406
+ in Algorithm 3 for the anti-causal direction. Although in our experiments the two algorithms often produce the same
1407
+ estimation of causal direction, we prefer Algorithm 1, since it relies on few estimations of the residuals.
1408
+
1409
+ Cause-Effect Inference in Location-Scale Noise Models
1410
+ Algorithm 3 CAREFL-H (Between Residuals)
1411
+ 1: Input: data pairs D := (X, Y ), the flow estimator T of CAREFL-M with prior PZ, and an HSIC estimator
1412
+ 2: Output: estimated causal direction dir
1413
+ 3: Split D into training set Dtrain := (Xtrain, Ytrain) and testing set Dtest := (Xtest, Ytest)
1414
+ 4: Optimize T�t1,�s1,�t2,�s2(Dtrain; PZ) in X → Y direction via ML to estimate �t1, �s1, �t2 and �s2
1415
+ 5: Compute the residuals �ZX,→ := Xtest−�t1
1416
+ e�s1
1417
+ and �ZY,→ := Ytest−�t2(X)
1418
+ e�s2(X)
1419
+ 6: Optimize T�t′1,�s′1,�t′2,�s′2(Dtrain; PZ) in X ← Y direction via ML to estimate �t′1, �s′1, �t′2 and �s′2
1420
+ 7: Compute the residuals �ZY,← := Ytest−�t′1
1421
+ e
1422
+
1423
+ s′1
1424
+ and �ZX,← := Xtest−�t′2(Y )
1425
+ e
1426
+
1427
+ s′2(Y )
1428
+ 8: if HSIC( �ZX,→, �ZY,→) < HSIC( �ZX,←, �ZY,←) then
1429
+ 9:
1430
+ dir := X → Y
1431
+ 10: else if HSIC( �ZX,→, �ZY,→) > HSIC( �ZX,←, �ZY,←) then
1432
+ 11:
1433
+ dir := X ← Y
1434
+ 12: else
1435
+ 13:
1436
+ dir := no conclusion
1437
+ 14: end if
1438
+ E. Proof for Theorem D.1
1439
+ In this section, we prove Theorem D.1.
1440
+ The flow estimator T in CAREFL-M (Khemakhem et al., 2021) models a LSNM in Equation (1) as the following:
1441
+ X = t1 + es1 · ZX
1442
+ Y = t2(X) + es2(X) · ZY
1443
+ (10)
1444
+ where X and Y are putative cause and effect, respectively. ZX and ZY are assumed to follow a prior distribution, e.g.
1445
+ Gaussian(0, 1) or Laplace(0, 1). t1 and s1 are constants. It also means ZX is identical to X up to shifting and scaling.
1446
+ If t1 = 0 and s1 = 0, then X = ZX. t2 and s2 are functions parameterized using neural networks. Let (xn, yn), where
1447
+ n ∈ {1, . . . , N}, be the n-th data pair.
1448
+ Fact 1, invert Equation (10):
1449
+ zn
1450
+ X = e−s1 · (xn − t1)
1451
+ zn
1452
+ Y = e−s2(xn) · (yn − t2(xn))
1453
+ Fact 2, how �zn
1454
+ X and �zn
1455
+ Y are computed in the flow estimator T:
1456
+ �zn
1457
+ X = e−�s1 · (xn − �t1)
1458
+ �zn
1459
+ Y = e−�s2(xn) · (yn − �t2(xn))
1460
+
1461
+ Cause-Effect Inference in Location-Scale Noise Models
1462
+ Fact 3, how the flow estimator T is trained via ML (may under noise misspecification):
1463
+ pX(xn, yn) = pZX(�zn
1464
+ X) · pZY (�zn
1465
+ Y ) · | det
1466
+ � ∂�zn
1467
+ X
1468
+ ∂xn
1469
+ ∂�zn
1470
+ X
1471
+ ∂yn
1472
+ ∂�zn
1473
+ Y
1474
+ ∂xn
1475
+ ∂�zn
1476
+ Y
1477
+ ∂yn
1478
+
1479
+ |
1480
+ = pZX(e−�s1 · (xn − �t1)) · pZY (e−�s2(xn) · (yn − �t2(xn)))
1481
+ · | det
1482
+ �e−�s1
1483
+ 0
1484
+ ∂�zn
1485
+ Y
1486
+ ∂xn
1487
+ e−�s2(xn)
1488
+
1489
+ |
1490
+ = pZX(e−�s1 · (xn − �t1)) · pZY (e−�s2(xn) · (yn − �t2(xn)))
1491
+ · |e−�s1 · e−�s2(xn) − 0|
1492
+ = pZX(e−�s1 · (xn − �t1)) · pZY (e−�s2(xn) · (yn − �t2(xn))) · e−�s1 · e−�s2(xn)
1493
+ = pZX(e−�s1 · (es1 · zn
1494
+ X + t1 − �t1))
1495
+ · pZY (e−�s2(xn) · (es2(xn) · zn
1496
+ Y + t2(xn) − �t2(xn)))
1497
+ · e−�s1 · e−�s2(xn)
1498
+ = pZX(e−�s1 · (es1 · zn
1499
+ X + t1 − �t1)) · e−�s1
1500
+ · pZY (e−�s2(xn) · (es2(xn) · zn
1501
+ Y + t2(xn) − �t2(xn))) · e−�s2(xn)
1502
+ = pZX(es1−�s1 · zn
1503
+ X + e−�s1 · (t1 − �t1)) · e−�s1
1504
+ · pZY (es2(xn)−�s2(xn) · zn
1505
+ Y + e−�s2(xn) · (t2(xn) − �t2(xn))) · e−�s2(xn)
1506
+ Assume the priors pZX and pZY are standard Gaussian N(0, 1) for math convenience. The proof is analogous with other
1507
+ prior distributions, e.g., standard Laplace. We do not make assumptions on the ground-truth noise distribution, which allows
1508
+ noise misspecification.
1509
+ pX(xn, yn) = N(es1−�s1 · zn
1510
+ X + e−�s1 · (t1 − �t1); µ = 0, σ2 = 1) · e−�s1
1511
+ · N(es2(xn)−�s2(xn) · zn
1512
+ Y + e−�s2(xn) · (t2(xn) − �t2(xn)); µ = 0, σ2 = 1)
1513
+ · e−�s2(xn)
1514
+ (the PDF of N(x; 0, 1) is
1515
+ 1
1516
+
1517
+ 2π · e− 1
1518
+ 2 ·(x)2)
1519
+ =
1520
+ 1
1521
+
1522
+ 2π · e− 1
1523
+ 2 ·(es1−�s1·zn
1524
+ X+e−�s1·(t1−�t1))2 · e−�s1
1525
+ ·
1526
+ 1
1527
+
1528
+ 2π · e− 1
1529
+ 2 ·(es2(xn)−�s2(xn)·zn
1530
+ Y +e−�s2(xn)·(t2(xn)−�t2(xn)))2 · e−�s2(xn)
1531
+ ln pX(xn, yn) = ln
1532
+ 1
1533
+
1534
+ 2π − 1
1535
+ 2 · (es1−�s1 · zn
1536
+ X + e−�s1 · (t1 − �t1))2 − �s1
1537
+ + ln
1538
+ 1
1539
+
1540
+ 2π − 1
1541
+ 2 · (es2(xn)−�s2(xn) · zn
1542
+ Y + e−�s2(xn) · (t2(xn) − �t2(xn)))2
1543
+ − �s2(xn)
1544
+ Lemma E.1. To maximize the log-likelihood, may or may not under noise misspecification, �t1 = E[X], a constant.
1545
+ Lemma E.2. To maximize the log-likelihood, may or may not under noise misspecification, �s1 = E[ln |�t1 − X|], a constant.
1546
+ �zn
1547
+ X = e−�s1 · (xn − �t1)
1548
+ �zn
1549
+ X = e−E[ln |�t1−X|] · (xn − �t1)
1550
+ �zn
1551
+ X = e−E[ln |E[X]−X|] · (xn − E[X])
1552
+ �zn
1553
+ X = C1 · (xn − C2)
1554
+
1555
+ Cause-Effect Inference in Location-Scale Noise Models
1556
+ Therefore, �ZX and X are identically distributed up to shifting and scaling.
1557
+ Since xn := t1 + es1 · zn
1558
+ X, we have:
1559
+ �zn
1560
+ X = C1 · (xn − C2)
1561
+ = C1 · (es1 · zn
1562
+ X + t1 − C2)
1563
+ = C1 · es1 · zn
1564
+ X + C1 · t1 − C1 · C2
1565
+ = C3 · zn
1566
+ X + C4 − C5
1567
+ = C3 · zn
1568
+ X + C6
1569
+ Therefore, �ZX and ZX are also identically distributed up to shifting and scaling.
1570
+ Proof for Lemma E.1.
1571
+ ∂ ln pX(xn, yn)
1572
+ ∂�t1
1573
+ = e−�s1 · (zn
1574
+ X · es1−�s1 + e−�s1 · (t1 − �t1))
1575
+ Let e−�s1 · (zn
1576
+ X · es1−�s1 + e−�s1 · (t1 − �t1)) = 0
1577
+ �t1 = t1 + zn
1578
+ X · es1 = xn
1579
+ Similarly, for ∂ ln pX(xn=1,yn=1)
1580
+ ∂�t1
1581
+ , �t1 = xn=1; for ∂ ln pX(xn=2,yn=2)
1582
+ ∂�t1
1583
+ , �t1 = xn=2. Therefore,
1584
+ �t1 = 1
1585
+ n ·
1586
+ N
1587
+
1588
+ n=1
1589
+ xn = E[X]
1590
+ Since ∂2 ln pX(xn,yn)
1591
+ ∂2�t1
1592
+ is negative, �t1 maximizes the log-likelihood.
1593
+ Proof for Lemma E.2.
1594
+ ∂ ln pX(xn, yn)
1595
+ ∂�s1
1596
+ = (−zn
1597
+ X · es1−�s1 − (t1 − �t1) · e−�s1)2 − 1
1598
+ Let (−zn
1599
+ X · es1−�s1 − (t1 − �t1) · e−�s1)2 − 1 = 0
1600
+ ± e�s1 = �t1 − xn
1601
+ e�s1 = ±(�t1 − xn)
1602
+ e· must be positive.
1603
+ e�s1 = |�t1 − xn|
1604
+ �s1 = ln |�t1 − xn|
1605
+ Similarly, for ∂ ln pX(xn=1,yn=1)
1606
+ ∂�s1
1607
+ , �s1 = ln |�t1 − xn=1|; for ∂ ln pX(xn=2,yn=2)
1608
+ ∂�s1
1609
+ , �s1 = ln |�t1 − xn=2|. Therefore,
1610
+ �s1 = 1
1611
+ n ·
1612
+ N
1613
+
1614
+ n=1
1615
+ ln |�t1 − xn| = E[ln |�t1 − X|]
1616
+ Since ∂2 ln pX(xn,yn)
1617
+ ∂2�s1
1618
+ is negative, �s1 maximizes the log-likelihood.
1619
+
1620
+ Cause-Effect Inference in Location-Scale Noise Models
1621
+ F. Default and Alternative Hyperparameter Values Used in Section 6
1622
+ We use the reported hyperparameter values in CAREFL-M (Khemakhem et al., 2021) for the T¨ubingen Cause-Effect Pairs
1623
+ Benchmark (Mooij et al., 2016) as the default hyperparameter values in all our experiments:
1624
+ • The flow estimator T is parameterized with 4 sub-flows (alternatively: 1, 7 and 10).
1625
+ • For each sub-flow, f, g, h and k are modelled as four-layer MLPs with 5 hidden neurons in each layer (alternatively: 2,
1626
+ 10 and 20).
1627
+ • Prior distribution is Laplace (alternatively: Gaussian prior).
1628
+ • Adam optimizer (Kingma & Ba, 2014) is used to train each model for 750 epochs (alternatively: 500, 1000 and 2000).
1629
+ • L2-penalty strength is 0 by default (alternatively: 0.0001, 0.001, 0.1).
1630
+ Although we also observe that LOCI-H is more robust than LOCI-M under noise misspecification and misleading CVs, we
1631
+ omit their results because: (1) CAREFL often outperforms LOCI (see Tables 1 and 3). (2) LOCI is fixed as a Gaussian
1632
+ distribution, whereas CAREFL can specify different prior distributions. (3) For conciseness. LOCI uses different set of
1633
+ hyperparameters than CAREFL. So their results cannot be merged into the same figures.
1634
+ G. Synthetic SCMs Used in Section 6.1
1635
+ We use the following SCMs to generate synthetic datasets:
1636
+ • LSNM-tanh-exp-cosine: Y := tanh(X · θ1) · θ2 + ecos(X·ψ1)·ψ2 · ZY
1637
+ • LSNM-sine-tanh: Y := sin(X · θ1) · θ2 + (tanh(X · ψ) + φ) · ZY
1638
+ • LSNM-sigmoid-sigmoid: Y := σ(X · θ1) · θ2 + σ(X · ψ1) · ψ2 · ZY
1639
+ where ZY is the ground-truth noise sampled from one of the following distributions: ContinuousBernoulli(0.9),
1640
+ Uniform(−1, 1), Exponential(1), Beta(0.5, 0.5), Gaussian(0, 1) and Laplace(0, 1), and σ is the sigmoid function. Al-
1641
+ though we did not prove identifiability with Beta(0.5, 0.5) noise in Theorem 4.2, empirically we find it is identifiable.
1642
+ Following (Zheng et al., 2018; 2020), each θ and ψ are sampled uniformly from range [−2, −0.5] ∪ [0.5, 2]. φ is sampled
1643
+ uniformly from range [1, 2] to make the tanh function positive. The number of data pairs in each synthetic dataset is
1644
+ N ∈ {500, 5000}.
1645
+ H. Hyperparameter Values of CAREFL-H For Table 3b
1646
+ To acquire the result of CAREFL-H in Table 3b, the hyperparameter values used are as follows:
1647
+ • Number of hidden neurons in each layer of the MLPs: 2
1648
+ • Number of sub-flows: 10
1649
+ • Training dataset = testing dataset = 100%.
1650
+ The rest of the hyperparameter values are identical to the default ones.
1651
+
1652
+ Cause-Effect Inference in Location-Scale Noise Models
1653
+ Table 4: (Best viewed in color) Summary of datasets. Settings in Section 6.1 are color-coded. (1) Blue: with noise
1654
+ misspecification; (2) Red: with more than 50% of datasets having misleading CVs; (3) Brown: both (1) and (2).
1655
+ Type
1656
+ Name
1657
+ Noise
1658
+ α
1659
+ Number of
1660
+ Datasets
1661
+ Percentage of Datasets
1662
+ With Misleading CVs
1663
+ (%)
1664
+ Synthetic
1665
+ (Table 1)
1666
+ LSNM-sine-tanh
1667
+ Gaussian(0, 1)
1668
+ 0.1
1669
+ 10
1670
+ 30
1671
+ 0.5
1672
+ 10
1673
+ 50
1674
+ 1
1675
+ 10
1676
+ 70
1677
+ 5
1678
+ 10
1679
+ 100
1680
+ 10
1681
+ 10
1682
+ 100
1683
+ LSNM-sine-tanh
1684
+ Uniform(−1, 1)
1685
+ 0.1
1686
+ 10
1687
+ 30
1688
+ 0.5
1689
+ 10
1690
+ 90
1691
+ 1
1692
+ 10
1693
+ 100
1694
+ 5
1695
+ 10
1696
+ 100
1697
+ 10
1698
+ 10
1699
+ 100
1700
+ Synthetic
1701
+ (Section 6.1)
1702
+ LSNM-tanh-exp-cosine
1703
+ Uniform(−1, 1)
1704
+ N/A
1705
+ 10
1706
+ 60
1707
+ Beta(0.5, 0.5)
1708
+ 10
1709
+ 80
1710
+ ContinuousBernoulli(0.9)
1711
+ 10
1712
+ 70
1713
+ Exponential(1)
1714
+ 10
1715
+ 20
1716
+ Gaussian(0, 1)
1717
+ 10
1718
+ 40
1719
+ Laplace(0, 1)
1720
+ 10
1721
+ 40
1722
+ LSNM-sine-tanh
1723
+ Uniform(−1, 1)
1724
+ N/A
1725
+ 10
1726
+ 100
1727
+ Beta(0.5, 0.5)
1728
+ 10
1729
+ 100
1730
+ ContinuousBernoulli(0.9)
1731
+ 10
1732
+ 60
1733
+ Exponential(1)
1734
+ 10
1735
+ 20
1736
+ Gaussian(0, 1)
1737
+ 10
1738
+ 60
1739
+ Laplace(0, 1)
1740
+ 10
1741
+ 80
1742
+ LSNM-sigmoid-sigmoid
1743
+ Uniform(−1, 1)
1744
+ N/A
1745
+ 10
1746
+ 90
1747
+ Beta(0.5, 0.5)
1748
+ 10
1749
+ 100
1750
+ ContinuousBernoulli(0.9)
1751
+ 10
1752
+ 80
1753
+ Exponential(1)
1754
+ 10
1755
+ 70
1756
+ Gaussian(0, 1)
1757
+ 10
1758
+ 90
1759
+ Laplace(0, 1)
1760
+ 10
1761
+ 100
1762
+ Synthetic
1763
+ (Section 6.2)
1764
+ SIM
1765
+ N/A
1766
+ N/A
1767
+ 100
1768
+ 40
1769
+ SIM-c
1770
+ 100
1771
+ 46
1772
+ SIM-ln
1773
+ 100
1774
+ 23
1775
+ SIM-G
1776
+ 100
1777
+ 29
1778
+ Real-World
1779
+ (Section 6.3)
1780
+ T¨ubingen Cause-Effect Pairs
1781
+ N/A
1782
+ N/A
1783
+ 99
1784
+ 40
1785
+
1786
+ Cause-Effect Inference in Location-Scale Noise Models
1787
+ (a) N = 500
1788
+ (b) N = 5000
1789
+ Figure 3: Accuracy over 10 datasets: LSNM-tanh-exp-cosine and Uniform(−1, 1) noise.
1790
+ (a) N = 500
1791
+ (b) N = 5000
1792
+ Figure 4: Accuracy over 10 datasets: LSNM-tanh-exp-cosine and Beta(0.5, 0.5) noise.
1793
+
1794
+ 1.0-
1795
+ 1.0
1796
+ 1.0
1797
+ 1.0
1798
+ 0.5 -
1799
+ 0.5 -
1800
+ 0.5 -
1801
+ 0.5 -
1802
+ 0.5 -
1803
+ 0.0
1804
+ 0.0
1805
+ 0.0
1806
+ 0.0
1807
+ 0.0
1808
+ 2
1809
+ 5
1810
+ 10
1811
+ 20
1812
+ 1
1813
+ 4
1814
+ 7
1815
+ 10
1816
+ 500
1817
+ 750
1818
+ 10002000
1819
+ 0.0
1820
+ 0.00010.001
1821
+ 0.1
1822
+ laplace
1823
+ gaussian
1824
+ Number of Hidden Neurons
1825
+ Number of Sub-Flows
1826
+ Number of Epochs
1827
+ L2-Penalty
1828
+ PriorsCAREFL-M (0.8)
1829
+ CAREFL-M (1.0)
1830
+ X
1831
+ CAREFL-M (0.8)
1832
+ CAREFL-M (1.0)
1833
+ CAREFL-H (0.8)
1834
+ CAREFL-H (1.0)
1835
+ 6.
1836
+ CAREFL-H (0.8)
1837
+ CAREFL-H (1.0)1.0
1838
+ 1.0
1839
+ 1.0
1840
+ 1.0
1841
+ 1.0
1842
+ Accuracy
1843
+ 0.5
1844
+ 0.5
1845
+ 0.5
1846
+ 0.5
1847
+ 0.5
1848
+ 0.0
1849
+ 0.0
1850
+ 0.0
1851
+ 0.0
1852
+ 0.0
1853
+ 2
1854
+ 5
1855
+ 10
1856
+ 20
1857
+ 1
1858
+ 4
1859
+ 10
1860
+ 500
1861
+ 750
1862
+ 1000
1863
+ 2000
1864
+ 0.0
1865
+ 0.00010.001
1866
+ 0.1
1867
+ laplace
1868
+ gaussian
1869
+ Number of Hidden Neurons
1870
+ Number of Sub-Flows
1871
+ Number of Epochs
1872
+ L2-Penalty
1873
+ Priors1.0
1874
+ 1.0
1875
+ 1.0
1876
+ 1.0
1877
+ 1.0
1878
+ Accuracy
1879
+ 0.5
1880
+ 0.5
1881
+ 0.5 -
1882
+ 0.5
1883
+ 0.5 -
1884
+ 0.0
1885
+ 0.0
1886
+ 0.0
1887
+ 0.0
1888
+ 0.0
1889
+ 2
1890
+ 5
1891
+ 10
1892
+ 20
1893
+ 1
1894
+ 4
1895
+ 7
1896
+ 10
1897
+ 500
1898
+ 750
1899
+ 1000
1900
+ 2000
1901
+ 0.0
1902
+ 0.00010.001
1903
+ 0.1
1904
+ laplace
1905
+ gaussian
1906
+ Number of Hidden Neurons
1907
+ Number of Sub-Flows
1908
+ Numberof Epochs
1909
+ L2-Penalty
1910
+ Priors1.0
1911
+ 1.0
1912
+ 1.0 -
1913
+ 1.0
1914
+ 0.5 -
1915
+ 0.5 -
1916
+ 0.5 -
1917
+ 0.5
1918
+ 0.5 -
1919
+ 0.0
1920
+ 0.0
1921
+ 0.0
1922
+ 0.0
1923
+ 0.0
1924
+ 2
1925
+ 5
1926
+ 10
1927
+ 20
1928
+ 1
1929
+ 4
1930
+ 7
1931
+ 10
1932
+ 500
1933
+ 750
1934
+ 10002000
1935
+ 0.0
1936
+ 0.00010.001
1937
+ 0.1
1938
+ laplace
1939
+ gaussian
1940
+ Number of Hidden Neurons
1941
+ Number of Sub-Flows
1942
+ Number of Epochs
1943
+ L2-Penalty
1944
+ PriorsCause-Effect Inference in Location-Scale Noise Models
1945
+ (a) N = 500
1946
+ (b) N = 5000
1947
+ Figure 5: Accuracy over 10 datasets: LSNM-tanh-exp-cosine and ContinuousBernoulli(0.9) noise.
1948
+ (a) N = 500
1949
+ (b) N = 5000
1950
+ Figure 6: Accuracy over 10 datasets: LSNM-tanh-exp-cosine and Exponential(1) noise.
1951
+
1952
+ 1.0
1953
+ 1.0
1954
+ 1.0
1955
+ 1.0
1956
+ 1.0
1957
+ X
1958
+ 0.5
1959
+ 0.5
1960
+ 0.5
1961
+ 0.5
1962
+ 0.5
1963
+ 0.0
1964
+ 0.0
1965
+ 0.0
1966
+ 0.0
1967
+ 0.0
1968
+ 2
1969
+ 5
1970
+ 10
1971
+ 20
1972
+ +
1973
+ 4
1974
+ 7
1975
+ 10
1976
+ 500
1977
+ 750
1978
+ 1000
1979
+ 2000
1980
+ 0.0
1981
+ 0.00010.001
1982
+ 0.1
1983
+ laplace
1984
+ gaussian
1985
+ Number of Hidden Neurons
1986
+ Number of Sub-Flows
1987
+ Number of Epochs
1988
+ L2-Penalty
1989
+ Priors1.0 -
1990
+ 1.0
1991
+ 1.0
1992
+ 1.0
1993
+ 1.0
1994
+ 0.5
1995
+ 0.5
1996
+ 0.5
1997
+ 0.5
1998
+ 0.5
1999
+ 0.0
2000
+ 0.0
2001
+ 0.0
2002
+ T
2003
+ 0.0
2004
+ 0.0
2005
+ 2
2006
+ 5
2007
+ 10
2008
+ 20
2009
+ +
2010
+ 4
2011
+ 7
2012
+ 10
2013
+ 500
2014
+ 750
2015
+ 10002000
2016
+ 0.0
2017
+ 0.00010.001
2018
+ 0.1
2019
+ laplace
2020
+ gaussian
2021
+ Number of Hidden Neurons
2022
+ Number of Sub-Flows
2023
+ Numberof Epochs
2024
+ L2-Penalty
2025
+ Priors1.0
2026
+ 1.0
2027
+ 1.0
2028
+ 1.0
2029
+ 1.0
2030
+ 0.5
2031
+ 0.5
2032
+ 0.5 -
2033
+ 0.5
2034
+ 0.5
2035
+ X
2036
+ 0.0
2037
+ 0.0
2038
+ 0.0
2039
+ 0.0
2040
+ 0.0
2041
+ 2
2042
+ 5
2043
+ 10
2044
+ 20
2045
+ 1
2046
+ 4
2047
+ 7
2048
+ 10
2049
+ 500
2050
+ 750
2051
+ 1000
2052
+ 2000
2053
+ 0.0
2054
+ 0.00010.001
2055
+ 0.1
2056
+ laplace
2057
+ gaussian
2058
+ Number of Hidden Neurons
2059
+ NumberofSub-Flows
2060
+ Number of Epochs
2061
+ L2-Penalty
2062
+ Priors1.0
2063
+ 1.0
2064
+ 1.0
2065
+ 1.0
2066
+ 1.0
2067
+ 0
2068
+ 0.5
2069
+ 0.5 -
2070
+ 0.5 -
2071
+ 0.5 -
2072
+ 0.5
2073
+ 0.0
2074
+ 0.0
2075
+ 0.0
2076
+ 0.0
2077
+ 0.0
2078
+ 2
2079
+ 10
2080
+ 20
2081
+ 1
2082
+ 4
2083
+ 7
2084
+ 10
2085
+ 500
2086
+ 750
2087
+ 1000
2088
+ 2000
2089
+ 0.0
2090
+ 0.00010.001
2091
+ 0.1
2092
+ laplace
2093
+ gaussian
2094
+ Number of Hidden Neurons
2095
+ Number of Sub-Flows
2096
+ Number of Epochs
2097
+ L2-Penalty
2098
+ PriorsCAREFL-M (0.8)
2099
+ CAREFL-M (1.0)
2100
+ X
2101
+ CAREFL-M (0.8)
2102
+ CAREFL-M (1.0)
2103
+ CAREFL-H (0.8)
2104
+ CAREFL-H (1.0)
2105
+ 6.
2106
+ CAREFL-H (0.8)
2107
+ CAREFL-H (1.0)Cause-Effect Inference in Location-Scale Noise Models
2108
+ (a) N = 500
2109
+ (b) N = 5000
2110
+ Figure 7: Accuracy over 10 datasets: LSNM-sine-tanh and Uniform(−1, 1) noise.
2111
+ (a) N = 500
2112
+ (b) N = 5000
2113
+ Figure 8: Accuracy over 10 datasets: LSNM-sine-tanh and Beta(0.5, 0.5) noise.
2114
+
2115
+ 1.0
2116
+ 1.0
2117
+ 1.0
2118
+ 1.0
2119
+ 1.0
2120
+ 0.5
2121
+ 0.5
2122
+ 0.5
2123
+ 0.5
2124
+ 0.5
2125
+ 0.0
2126
+ 0.0
2127
+ 0.0
2128
+ 0.0
2129
+ 0.0
2130
+ 2
2131
+ 5
2132
+ 10
2133
+ 20
2134
+ +
2135
+ 4
2136
+ 7
2137
+ 10
2138
+ 500
2139
+ 750
2140
+ 10002000
2141
+ 0.0
2142
+ 0.00010.001
2143
+ 0.1
2144
+ laplace
2145
+ gaussian
2146
+ Number of Hidden Neurons
2147
+ NumberofSub-Flows
2148
+ Number of Epochs
2149
+ L2-Penalty
2150
+ Priors1.0
2151
+ 1.0
2152
+ 1.0
2153
+ 1.0
2154
+ Accuracy
2155
+ 0.5
2156
+ 0.5
2157
+ 0.5 -
2158
+ 0.5
2159
+ 0.5
2160
+ 0.0
2161
+ 0.0-
2162
+ 0.0
2163
+ 0.0
2164
+ 0.0
2165
+ 2
2166
+ 5
2167
+ 10
2168
+ 20
2169
+ 1
2170
+ 4
2171
+ 7
2172
+ 10
2173
+ 500
2174
+ 750
2175
+ 1000
2176
+ 2000
2177
+ 0.0
2178
+ 0.00010.001
2179
+ 0.1
2180
+ laplace
2181
+ gaussian
2182
+ Number of Hidden Neurons
2183
+ Number of Sub-Flows
2184
+ Numberof Epochs
2185
+ L2-Penalty
2186
+ Priors1.0
2187
+ 1.0
2188
+ 1.0
2189
+ 1.0
2190
+ 1.0
2191
+ X
2192
+ 0.5
2193
+ 0.5
2194
+ 0.5
2195
+ 0.5
2196
+ 0.5 -
2197
+ 0.0
2198
+ 0.0
2199
+ 0.0
2200
+ 0.0
2201
+ 0.0
2202
+ 2
2203
+ 5
2204
+ 10
2205
+ 20
2206
+ 1
2207
+ 4
2208
+ 7
2209
+ 10
2210
+ 500
2211
+ 750
2212
+ 10002000
2213
+ 0.0
2214
+ 0.00010.001
2215
+ 0.1
2216
+ laplace
2217
+ gaussian
2218
+ Number of Hidden Neurons
2219
+ Number of Sub-Flows
2220
+ Number of Epochs
2221
+ L2-Penalty
2222
+ Priors1.0
2223
+ 1.0
2224
+ 1.0
2225
+ 1.0
2226
+ 0.5
2227
+ 0.5 -
2228
+ 0.5 -
2229
+ 0.5 -
2230
+ 0.5 -
2231
+ 0.0
2232
+ 0.0-
2233
+ 0.0
2234
+ 0.0
2235
+ 0.0
2236
+ 2
2237
+ 5
2238
+ 10
2239
+ 20
2240
+ 1
2241
+ 4
2242
+ 7
2243
+ 10
2244
+ 500
2245
+ 750
2246
+ 10002000
2247
+ 0.0 0.00010.001
2248
+ 0.1
2249
+ laplace
2250
+ gaussian
2251
+ Number of Hidden Neurons
2252
+ Number of Sub-Flows
2253
+ Number of Epochs
2254
+ L2-Penalty
2255
+ PriorsCAREFL-M (0.8)
2256
+ CAREFL-M (1.0)
2257
+ X
2258
+ CAREFL-M (0.8)
2259
+ CAREFL-M (1.0)
2260
+ CAREFL-H (0.8)
2261
+ CAREFL-H (1.0)
2262
+ 6.
2263
+ CAREFL-H (0.8)
2264
+ CAREFL-H (1.0)Cause-Effect Inference in Location-Scale Noise Models
2265
+ (a) N = 500
2266
+ (b) N = 5000
2267
+ Figure 9: Accuracy over 10 datasets: LSNM-sine-tanh and ContinuousBernoulli(0.9) noise.
2268
+ (a) N = 500
2269
+ (b) N = 5000
2270
+ Figure 10: Accuracy over 10 datasets: LSNM-sine-tanh and Exponential(1) noise.
2271
+
2272
+ 1.0
2273
+ 1.0
2274
+ 1.0
2275
+ 1.0
2276
+ 1.0
2277
+ 0.5
2278
+ 0.5
2279
+ 0.5
2280
+ X
2281
+ 0.5
2282
+ 0.5
2283
+ X
2284
+ 0.0
2285
+ 0.0
2286
+ 0.0
2287
+ 0.0
2288
+ 0.0
2289
+ 2
2290
+ 10
2291
+ 20
2292
+ +
2293
+ x
2294
+ 7
2295
+ 10
2296
+ 500
2297
+ 750
2298
+ 1000
2299
+ 2000
2300
+ 0.0
2301
+ 0.00010.001
2302
+ 0.1
2303
+ laplace
2304
+ gaussian
2305
+ Number of Hidden Neurons
2306
+ Number of Sub-Flows
2307
+ Numberof Epochs
2308
+ L2-Penalty
2309
+ Priors1.0
2310
+ 1.0 -
2311
+ 1.0
2312
+ 1.0
2313
+ 1.0
2314
+ Accuracy
2315
+ 0.5
2316
+ 0.5
2317
+ 0.5
2318
+ 0.5
2319
+ 0.5
2320
+ 0.0
2321
+ 0.0
2322
+ 0.0
2323
+ 0.0
2324
+ 0.0
2325
+ 2
2326
+ 10
2327
+ 20
2328
+ +
2329
+ 4
2330
+ 7
2331
+ 10
2332
+ 500
2333
+ 750
2334
+ 10002000
2335
+ 0.0
2336
+ 0.00010.001
2337
+ 0.1
2338
+ laplace
2339
+ gaussian
2340
+ Number of Hidden Neurons
2341
+ Number of Sub-Flows
2342
+ Numberof Epochs
2343
+ L2-Penalty
2344
+ Priors1.0
2345
+ 1.0
2346
+ 1.0
2347
+ 1.0
2348
+ 1.0
2349
+ 0.5
2350
+ 0.5 -
2351
+ 0.5
2352
+ 0.0
2353
+ 0.0
2354
+ 0.0
2355
+ 0.0
2356
+ 0.0
2357
+ 2
2358
+ 5
2359
+ 10
2360
+ 20
2361
+ +
2362
+ 4
2363
+ 7
2364
+ 10
2365
+ 500
2366
+ 750
2367
+ 10002000
2368
+ 0.0
2369
+ 0.00010.001
2370
+ 0.1
2371
+ laplace
2372
+ gaussian
2373
+ Number of Hidden Neurons
2374
+ NumberofSub-Flows
2375
+ Number of Epochs
2376
+ L2-Penalty
2377
+ Priors1.0
2378
+ 1.0
2379
+ 1.0
2380
+ 1.0
2381
+ 1.0
2382
+ 0.5
2383
+ 0.5
2384
+ 0.5
2385
+ 0.5
2386
+ 0.5
2387
+ 0.0
2388
+ 0.0
2389
+ 0.0
2390
+ 0.0
2391
+ 0.0
2392
+ 2
2393
+ 5
2394
+ 10
2395
+ 20
2396
+ +
2397
+ 4
2398
+ 7
2399
+ 10
2400
+ 500
2401
+ 750
2402
+ 10002000
2403
+ 0.0
2404
+ 0.00010.001
2405
+ 0.1
2406
+ laplace
2407
+ gaussian
2408
+ Number of Hidden Neurons
2409
+ Number of Sub-Flows
2410
+ Number of Epochs
2411
+ L2-Penalty
2412
+ PriorsCAREFL-M (0.8)
2413
+ CAREFL-M (1.0)
2414
+ X
2415
+ CAREFL-M (0.8)
2416
+ CAREFL-M (1.0)
2417
+ CAREFL-H (0.8)
2418
+ CAREFL-H (1.0)
2419
+ 6.
2420
+ CAREFL-H (0.8)
2421
+ CAREFL-H (1.0)Cause-Effect Inference in Location-Scale Noise Models
2422
+ (a) N = 500
2423
+ (b) N = 5000
2424
+ Figure 11: Accuracy over 10 datasets: LSNM-sigmoid-sigmoid and Uniform(−1, 1) noise.
2425
+ (a) N = 500
2426
+ (b) N = 5000
2427
+ Figure 12: Accuracy over 10 datasets: LSNM-sigmoid-sigmoid and Beta(0.5, 0.5) noise.
2428
+
2429
+ 1.0
2430
+ 1.0
2431
+ 1.0
2432
+ 1.0
2433
+ 1.0
2434
+ Accuracy
2435
+ 0.5
2436
+ 0.5
2437
+ 0.5
2438
+ 0.5
2439
+ 0.5
2440
+ 0.0
2441
+ 0.0
2442
+ 0.0
2443
+ 0.0
2444
+ 0.0
2445
+ 2
2446
+ 5
2447
+ 10
2448
+ 20
2449
+ 4
2450
+ 10
2451
+ 500
2452
+ 750
2453
+ 1000
2454
+ 2000
2455
+ 0.0
2456
+ 0.00010.001
2457
+ 0.1
2458
+ laplace
2459
+ gaussian
2460
+ Number of Hidden Neurons
2461
+ Number of Sub-Flows
2462
+ Numberof Epochs
2463
+ L2-Penalty
2464
+ Priors1.0
2465
+ 1.0 -
2466
+ 1.0
2467
+ 1.0
2468
+ 1.0
2469
+ Accuracy
2470
+ 0.5
2471
+ 0.5
2472
+ 0.5
2473
+ 0.5
2474
+ 0.5
2475
+ 0.0
2476
+ 0.0
2477
+ 0.0
2478
+ 0.0
2479
+ 0.0
2480
+ 2
2481
+ 10
2482
+ 20
2483
+ +
2484
+ 4
2485
+ 7
2486
+ 10
2487
+ 500
2488
+ 75010002000
2489
+ 0.0
2490
+ 0.00010.001
2491
+ 0.1
2492
+ laplace
2493
+ gaussian
2494
+ Number of Hidden Neurons
2495
+ Number of Sub-Flows
2496
+ Numberof Epochs
2497
+ L2-Penalty
2498
+ Priors1.0
2499
+ 1.0
2500
+ 1.0
2501
+ 1.0
2502
+ 1.0
2503
+ 0.5 -
2504
+ 0.5 -
2505
+ 0.5 -
2506
+ 0.5 -
2507
+ 0.5 -
2508
+ 0.0
2509
+ 0.0
2510
+ 0.0
2511
+ 0.0
2512
+ 0.0
2513
+ 2
2514
+ 5
2515
+ 10
2516
+ 20
2517
+ 1
2518
+ 4
2519
+ 7
2520
+ 10
2521
+ 500
2522
+ 750
2523
+ 10002000
2524
+ 0.0
2525
+ 0.00010.001
2526
+ 0.1
2527
+ laplace
2528
+ gaussian
2529
+ Number of Hidden Neurons
2530
+ Number of Sub-Flows
2531
+ Number of Epochs
2532
+ L2-Penalty
2533
+ Priors1.0-
2534
+ 1.0
2535
+ 1.0
2536
+ 1.0
2537
+ 0.5
2538
+ 0.5
2539
+ 0.5 -
2540
+ 0.5 -
2541
+ 0.5
2542
+ 0.0
2543
+ 0.0-
2544
+ 0.0
2545
+ 0.0
2546
+ 0.0
2547
+ 2
2548
+ 5
2549
+ 10
2550
+ 20
2551
+ 1
2552
+ 4
2553
+ 7
2554
+ 10
2555
+ 500
2556
+ 750
2557
+ 10002000
2558
+ 0.0 0.00010.001
2559
+ 0.1
2560
+ laplace
2561
+ gaussian
2562
+ Number of Hidden Neurons
2563
+ Number of Sub-Flows
2564
+ Number of Epochs
2565
+ L2-Penalty
2566
+ PriorsCAREFL-M (0.8)
2567
+ CAREFL-M (1.0)
2568
+ X
2569
+ CAREFL-M (0.8)
2570
+ CAREFL-M (1.0)
2571
+ CAREFL-H (0.8)
2572
+ CAREFL-H (1.0)
2573
+ 6.
2574
+ CAREFL-H (0.8)
2575
+ CAREFL-H (1.0)Cause-Effect Inference in Location-Scale Noise Models
2576
+ (a) N = 500
2577
+ (b) N = 5000
2578
+ Figure 13: Accuracy over 10 datasets: LSNM-sigmoid-sigmoid and ContinuousBernoulli(0.9) noise.
2579
+ (a) N = 500
2580
+ (b) N = 5000
2581
+ Figure 14: Accuracy over 10 datasets: LSNM-sigmoid-sigmoid and Exponential(1) noise.
2582
+
2583
+ 1.0
2584
+ 1.0
2585
+ 1.0
2586
+ 1.0
2587
+ 1.0
2588
+ 0.5
2589
+ 0.5
2590
+ 0.5
2591
+ 0.5
2592
+ 0.5 -
2593
+ 0.0-
2594
+ 0.0-
2595
+ 0.0
2596
+ 0.0
2597
+ 0.0
2598
+ 2
2599
+ 5
2600
+ 10
2601
+ 20
2602
+ +
2603
+ 4
2604
+ 10
2605
+ 500
2606
+ 750
2607
+ 1000
2608
+ 2000
2609
+ 0.0
2610
+ 0.00010.001
2611
+ 0.1
2612
+ laplace
2613
+ gaussian
2614
+ Number of Hidden Neurons
2615
+ Number of Sub-Flows
2616
+ Number of Epochs
2617
+ L2-Penalty
2618
+ Priors1.0
2619
+ 1.0 -
2620
+ 1.0
2621
+ 1.0
2622
+ 1.0
2623
+ 0.5
2624
+ 0.5
2625
+ 0.5 -
2626
+ 0.5 -
2627
+ 0.5 -
2628
+ 0.0
2629
+ 0.0
2630
+ 0.0
2631
+ 0.0
2632
+ 0.0
2633
+ 2
2634
+ 5
2635
+ 10
2636
+ 20
2637
+ 1
2638
+ 4
2639
+ 7
2640
+ 10
2641
+ 500
2642
+ 750
2643
+ 10002000
2644
+ 0.0
2645
+ 0.00010.001
2646
+ 0.1
2647
+ laplace
2648
+ gaussian
2649
+ Number of Hidden Neurons
2650
+ Number of Sub-Flows
2651
+ Number of Epochs
2652
+ L2-Penalty
2653
+ Priors1.0
2654
+ 1.0
2655
+ 1.0
2656
+ 1.0
2657
+ 1.0
2658
+ 0.5
2659
+ 0.5
2660
+ 0.5
2661
+ 0.5
2662
+ 0.5 -
2663
+ 0.0
2664
+ 0.0
2665
+ 0.0
2666
+ 0.0
2667
+ 0.0
2668
+ 2
2669
+ 5
2670
+ 10
2671
+ 20
2672
+ +
2673
+ 4
2674
+ 7
2675
+ 10
2676
+ 500
2677
+ 750
2678
+ 10002000
2679
+ 0.0
2680
+ 0.00010.001
2681
+ 0.1
2682
+ laplace
2683
+ gaussian
2684
+ Number of Hidden Neurons
2685
+ Number of Sub-Flows
2686
+ Numberof Epochs
2687
+ L2-Penalty
2688
+ Priors1.0
2689
+ 1.0
2690
+ 1.0
2691
+ 1.0
2692
+ 1.0
2693
+ 0.5
2694
+ 0.5 -
2695
+ 0.5 -
2696
+ 0.5
2697
+ 0.5 -
2698
+ 0.0
2699
+ 0.0
2700
+ 0.0
2701
+ 0.0
2702
+ 0.0
2703
+ 2
2704
+ 5
2705
+ 10
2706
+ 20
2707
+ 1
2708
+ 4
2709
+ 7
2710
+ 10
2711
+ 500
2712
+ 750
2713
+ 10002000
2714
+ 0.0
2715
+ 0.00010.001
2716
+ 0.1
2717
+ laplace
2718
+ gaussian
2719
+ Number of Hidden Neurons
2720
+ Number of Sub-Flows
2721
+ Number of Epochs
2722
+ L2-Penalty
2723
+ PriorsCAREFL-M (0.8)
2724
+ CAREFL-M (1.0)
2725
+ X
2726
+ CAREFL-M (0.8)
2727
+ CAREFL-M (1.0)
2728
+ CAREFL-H (0.8)
2729
+ CAREFL-H (1.0)
2730
+ 6.
2731
+ CAREFL-H (0.8)
2732
+ CAREFL-H (1.0)Cause-Effect Inference in Location-Scale Noise Models
2733
+ (a) N = 500
2734
+ (b) N = 5000
2735
+ Figure 15: Accuracy over 10 datasets: LSNM-tanh-exp-cosine and Gaussian(0, 1) noise.
2736
+ (a) N = 500
2737
+ (b) N = 5000
2738
+ Figure 16: Accuracy over 10 datasets: LSNM-tanh-exp-cosine and Laplace(0, 1) noise.
2739
+
2740
+ 1.0
2741
+ 1.0
2742
+ 1.0
2743
+ 1.0
2744
+ 1.0
2745
+ 0.5
2746
+ 0.5
2747
+ 0.5 -
2748
+ 0.5
2749
+ 0.5
2750
+ 0.0
2751
+ 0.0
2752
+ 0.0
2753
+ 0.0
2754
+ 0.0
2755
+ 2
2756
+ 5
2757
+ 10
2758
+ 20
2759
+ 1
2760
+ 4
2761
+ 7
2762
+ 10
2763
+ 500
2764
+ 750
2765
+ 10002000
2766
+ 0.0
2767
+ 0.00010.001
2768
+ 0.1
2769
+ laplace
2770
+ gaussian
2771
+ Number of Hidden Neurons
2772
+ Number of Sub-Flows
2773
+ Number of Epochs
2774
+ L2-Penalty
2775
+ Priors1.0
2776
+ 1.0
2777
+ 1.0
2778
+ 1.0 -
2779
+ 1.0
2780
+ 0.5
2781
+ 0.5
2782
+ 0.5
2783
+ 0.5
2784
+ 0.5
2785
+ 0.0
2786
+ 0.0
2787
+ 0.0
2788
+ 0.0
2789
+ 0.0
2790
+ 2
2791
+ 10
2792
+ 20
2793
+ 1
2794
+ 4
2795
+ 7
2796
+ 10
2797
+ 500
2798
+ 750
2799
+ 10002000
2800
+ 0.0
2801
+ 0.00010.001
2802
+ 0.1
2803
+ laplace
2804
+ gaussian
2805
+ Number of Hidden Neurons
2806
+ Number of Sub-Flows
2807
+ Number of Epochs
2808
+ L2-Penalty
2809
+ Priors1.0
2810
+ 1.0
2811
+ 1.0
2812
+ 1.0
2813
+ 1.0
2814
+ X
2815
+ 0.5
2816
+ 0.5
2817
+ 0.5
2818
+ 0.5
2819
+ 0.5
2820
+ 0.0
2821
+ 0.0
2822
+ 0.0
2823
+ 0.0
2824
+ 0.0
2825
+ 2
2826
+ 10
2827
+ 20
2828
+ 1
2829
+ 7
2830
+ 10
2831
+ 500
2832
+ 750
2833
+ 10002000
2834
+ 0.0
2835
+ 0.00010.001
2836
+ 0.1
2837
+ laplace
2838
+ gaussian
2839
+ Number of Hidden Neurons
2840
+ Number of Sub-Flows
2841
+ Number of Epochs
2842
+ L2-Penalty
2843
+ Priors1.0 -
2844
+ 1.0
2845
+ 1.0
2846
+ 1.0
2847
+ 1.0
2848
+ 0.5
2849
+ 0.5 -
2850
+ 0.5
2851
+ 0.5 -
2852
+ 0.5 -
2853
+ 0.0
2854
+ 0.0
2855
+ 0.0
2856
+ 0.0
2857
+ 0.0
2858
+ 2
2859
+ 10
2860
+ 20
2861
+ 1
2862
+ 4
2863
+ 7
2864
+ 10
2865
+ 500
2866
+ 750
2867
+ 10002000
2868
+ 0.0
2869
+ 0.00010.001
2870
+ 0.1
2871
+ laplace
2872
+ gaussian
2873
+ Number of Hidden Neurons
2874
+ Number of Sub-Flows
2875
+ Number of Epochs
2876
+ L2-Penalty
2877
+ PriorsCAREFL-M (0.8)
2878
+ CAREFL-M (1.0)
2879
+ X
2880
+ CAREFL-M (0.8)
2881
+ CAREFL-M (1.0)
2882
+ CAREFL-H (0.8)
2883
+ CAREFL-H (1.0)
2884
+ 6.
2885
+ CAREFL-H (0.8)
2886
+ CAREFL-H (1.0)Cause-Effect Inference in Location-Scale Noise Models
2887
+ (a) N = 500
2888
+ (b) N = 5000
2889
+ Figure 17: Accuracy over 10 datasets: LSNM-sine-tanh and Gaussian(0, 1) noise.
2890
+ (a) N = 500
2891
+ (b) N = 5000
2892
+ Figure 18: Accuracy over 10 datasets: LSNM-sine-tanh and Laplace(0, 1) noise.
2893
+
2894
+ 1.0
2895
+ 1.0
2896
+ 1.0
2897
+ 1.0-
2898
+ 1.0
2899
+ 0.5
2900
+ S
2901
+ 0.5
2902
+ 0.5
2903
+ 0.0
2904
+ 0.0
2905
+ 0.0
2906
+ 0.0
2907
+ 0.0
2908
+ 2
2909
+ 10
2910
+ 20
2911
+ 1
2912
+ 4
2913
+ 7
2914
+ 10
2915
+ 500
2916
+ 750
2917
+ 10002000
2918
+ 0.0
2919
+ 0.00010.001
2920
+ 0.1
2921
+ laplace
2922
+ gaussian
2923
+ Number of Hidden Neurons
2924
+ Number of Sub-Flows
2925
+ Number of Epochs
2926
+ L2-Penalty
2927
+ Priors1.0 -
2928
+ 1.0
2929
+ 1.0
2930
+ 1.0
2931
+ 1.0
2932
+ 0.5
2933
+ 0.5 -
2934
+ 0.5
2935
+ 0.5
2936
+ 0.5
2937
+ 0.0
2938
+ 0.0
2939
+ 0.0
2940
+ 0.0
2941
+ 0.0
2942
+ 2
2943
+ 5
2944
+ 10
2945
+ 20
2946
+ 1
2947
+ 7
2948
+ 10
2949
+ 500
2950
+ 750
2951
+ 1000
2952
+ 2000
2953
+ 0.0
2954
+ 0.00010.001
2955
+ 0.1
2956
+ laplace
2957
+ gaussian
2958
+ Number of Hidden Neurons
2959
+ NumberofSub-Flows
2960
+ Number of Epochs
2961
+ L2-Penalty
2962
+ Priors1.0
2963
+ 1.0
2964
+ 1.0
2965
+ 1.0
2966
+ 1.0
2967
+ 0.5
2968
+ 0.5 -
2969
+ 0.5 -
2970
+ 0.5 -
2971
+ 0.5
2972
+ 0.0
2973
+ 0.0
2974
+ 0.0
2975
+ 0.0
2976
+ 0.0
2977
+ 2
2978
+ 10
2979
+ 20
2980
+ 1
2981
+ 4
2982
+ 7
2983
+ 10
2984
+ 500
2985
+ 750
2986
+ 10002000
2987
+ 0.0
2988
+ 0.00010.001
2989
+ 0.1
2990
+ laplace
2991
+ gaussian
2992
+ Number of Hidden Neurons
2993
+ Number of Sub-Flows
2994
+ Number of Epochs
2995
+ L2-Penalty
2996
+ Priors1.0 -
2997
+ 1.0-
2998
+ 1.0
2999
+ 1.0
3000
+ 1.0
3001
+ 0.5
3002
+ 0.5 -
3003
+ 0.5 -
3004
+ 0.5 -
3005
+ 0.5
3006
+ 0.0
3007
+ 0.0
3008
+ 0.0
3009
+ 0.0
3010
+ 0.0
3011
+ 2
3012
+ 10
3013
+ 20
3014
+ 1
3015
+ 4
3016
+ 7
3017
+ 10
3018
+ 500
3019
+ 750
3020
+ 10002000
3021
+ 0.0
3022
+ 0.00010.001
3023
+ 0.1
3024
+ laplace
3025
+ gaussian
3026
+ Number of Hidden Neurons
3027
+ Number of Sub-Flows
3028
+ Number of Epochs
3029
+ L2-Penalty
3030
+ PriorsCAREFL-M (0.8)
3031
+ CAREFL-M (1.0)
3032
+ X
3033
+ CAREFL-M (0.8)
3034
+ CAREFL-M (1.0)
3035
+ CAREFL-H (0.8)
3036
+ CAREFL-H (1.0)
3037
+ 6.
3038
+ CAREFL-H (0.8)
3039
+ CAREFL-H (1.0)Cause-Effect Inference in Location-Scale Noise Models
3040
+ (a) N = 500
3041
+ (b) N = 5000
3042
+ Figure 19: Accuracy over 10 datasets: LSNM-sigmoid-sigmoid and Gaussian(0, 1) noise.
3043
+ (a) N = 500
3044
+ (b) N = 5000
3045
+ Figure 20: Accuracy over 10 datasets: LSNM-sigmoid-sigmoid and Laplace(0, 1) noise.
3046
+
3047
+ 1.0
3048
+ 1.0
3049
+ 1.0
3050
+ 1.0
3051
+ 1.0
3052
+ 0.5
3053
+ 0.5
3054
+ 0.5
3055
+ 0.5
3056
+ 0.5
3057
+ 0.0
3058
+ 0.0
3059
+ 0.0
3060
+ 0.0
3061
+ 0.0
3062
+ 2
3063
+ 5
3064
+ 10
3065
+ 20
3066
+ 1
3067
+ 4
3068
+ 7
3069
+ 10
3070
+ 500
3071
+ 750
3072
+ 10002000
3073
+ 0.0
3074
+ 0.00010.001
3075
+ 0.1
3076
+ laplace
3077
+ gaussian
3078
+ Number of Hidden Neurons
3079
+ NumberofSub-Flows
3080
+ Numberof Epochs
3081
+ L2-Penalty
3082
+ Priors1.0 -
3083
+ 1.0 -
3084
+ 1.0
3085
+ 1.0
3086
+ 1.0
3087
+ 0.5
3088
+ 0.5
3089
+ 0.5
3090
+ 0.5
3091
+ 0.5
3092
+ 0.0
3093
+ 0.0
3094
+ 0.0
3095
+ 0.0
3096
+ 0.0
3097
+ 2
3098
+ 5
3099
+ 10
3100
+ 20
3101
+ 1
3102
+ 4
3103
+ 7
3104
+ 10
3105
+ 500
3106
+ 750
3107
+ 10002000
3108
+ 0.0
3109
+ 0.00010.001
3110
+ 0.1
3111
+ laplace
3112
+ gaussian
3113
+ Number of Hidden Neurons
3114
+ NumberofSub-Flows
3115
+ Number of Epochs
3116
+ L2-Penalty
3117
+ Priors1.0
3118
+ 1.0
3119
+ 1.0
3120
+ 1.0
3121
+ 1.0
3122
+ 0.5
3123
+ 0.5
3124
+ 0.5
3125
+ 0.5
3126
+ 0.5
3127
+ 0.0
3128
+ 0.0
3129
+ 0.0
3130
+ 0.0
3131
+ 0.0
3132
+ 2
3133
+ 10
3134
+ 20
3135
+ 1
3136
+ 4
3137
+ 7
3138
+ 10
3139
+ 500
3140
+ 750
3141
+ 10002000
3142
+ 0.0
3143
+ 0.00010.001
3144
+ 0.1
3145
+ laplace
3146
+ gaussian
3147
+ Number of Hidden Neurons
3148
+ NumberofSub-Flows
3149
+ Number of Epochs
3150
+ L2-Penalty
3151
+ Priors1.0 -
3152
+ 1.0
3153
+ 1.0
3154
+ 1.0
3155
+ 1.0
3156
+ 0.5
3157
+ 0.5 -
3158
+ 0.5 -
3159
+ 0.5
3160
+ 0.5
3161
+ 0.0
3162
+ 0.0
3163
+ 0.0
3164
+ 0.0
3165
+ 0.0
3166
+ n.
3167
+ 10
3168
+ 20
3169
+ 1
3170
+ 4
3171
+ 7
3172
+ 10
3173
+ 500
3174
+ 750
3175
+ 1000
3176
+ 2000
3177
+ 0.0
3178
+ 0.00010.001
3179
+ 0.1
3180
+ laplace
3181
+ gaussian
3182
+ Number of Hidden Neurons
3183
+ Number of Sub-Flows
3184
+ Number of Epochs
3185
+ L2-Penalty
3186
+ PriorsCAREFL-M (0.8)
3187
+ CAREFL-M (1.0)
3188
+ X
3189
+ CAREFL-M (0.8)
3190
+ CAREFL-M (1.0)
3191
+ CAREFL-H (0.8)
3192
+ CAREFL-H (1.0)
3193
+ 6.
3194
+ CAREFL-H (0.8)
3195
+ CAREFL-H (1.0)Cause-Effect Inference in Location-Scale Noise Models
3196
+ (a) Accuracy over 100 datasets from SIM sub-benchmark.
3197
+ (b) Accuracy over 100 datasets from SIM-c sub-benchmark.
3198
+ (c) Accuracy over 100 datasets from SIM-ln sub-benchmark.
3199
+ (d) Accuracy over 100 datasets from SIM-G sub-benchmark.
3200
+ Figure 21: Results with SIM benchmarks.
3201
+
3202
+ 1.0
3203
+ 1.0
3204
+ 1.0
3205
+ 1.0
3206
+ 1.0
3207
+ 0.5
3208
+ 0.5 -
3209
+ 0.5
3210
+ 0.5
3211
+ 0.5
3212
+ 0.0
3213
+ 0.0
3214
+ 0.0
3215
+ 0.0
3216
+ 0.0
3217
+ 2
3218
+ 5
3219
+ 10
3220
+ 20
3221
+ +
3222
+ 4
3223
+ 7
3224
+ 10
3225
+ 500
3226
+ 750
3227
+ 10002000
3228
+ 0.0
3229
+ 0.00010.001
3230
+ 0.1
3231
+ laplace
3232
+ gaussian
3233
+ Number of Hidden Neurons
3234
+ Number of Sub-Flows
3235
+ Number of Epochs
3236
+ L2-Penalty
3237
+ Priors1.0 -
3238
+ 1.0
3239
+ 1.0
3240
+ 1.0
3241
+ 1.0
3242
+ 0.5
3243
+ 0.5
3244
+ 0.5
3245
+ 0.0
3246
+ 0.0
3247
+ 0.0
3248
+ 0.0
3249
+ 0.0
3250
+ 2
3251
+ 5
3252
+ 10
3253
+ 20
3254
+ +
3255
+ 4
3256
+ 7
3257
+ 10
3258
+ 500
3259
+ 750
3260
+ 10002000
3261
+ 0.0
3262
+ 0.00010.001
3263
+ 0.1
3264
+ laplace
3265
+ gaussian
3266
+ Number of Hidden Neurons
3267
+ Number of Sub-Flows
3268
+ Number of Epochs
3269
+ L2-Penalty
3270
+ Priors1.0
3271
+ 1.0
3272
+ 1.0
3273
+ 1.0
3274
+ 1.0
3275
+ 0.5
3276
+ 0.5 -
3277
+ 0.5
3278
+ 0.5
3279
+ 0.5
3280
+ 0.0
3281
+ 0.0
3282
+ 0.0
3283
+ 0.0
3284
+ 0.0
3285
+ 2
3286
+ 10
3287
+ 20
3288
+ 1
3289
+ 4
3290
+ 7
3291
+ 10
3292
+ 500
3293
+ 750
3294
+ 10002000
3295
+ 0.0
3296
+ 0.00010.001
3297
+ 0.1
3298
+ laplace
3299
+ gaussian
3300
+ Number of Hidden Neurons
3301
+ Number of Sub-Flows
3302
+ Number of Epochs
3303
+ L2-Penalty
3304
+ Priors1.0
3305
+ 1.0
3306
+ 1.0
3307
+ 1.0
3308
+ 1.0
3309
+ +
3310
+ 1
3311
+ 0.5
3312
+ 0.5
3313
+ 0.5
3314
+ 0.5
3315
+ 0.5
3316
+ 0.0
3317
+ 0.0
3318
+ 0.0
3319
+ 0.0
3320
+ 0.0
3321
+ 2
3322
+ 10
3323
+ 20
3324
+ 1
3325
+ 4
3326
+ 7
3327
+ 10
3328
+ 500
3329
+ 750
3330
+ 10002000
3331
+ 0.0
3332
+ 0.00010.001
3333
+ 0.1
3334
+ laplace
3335
+ gaussian
3336
+ Number of Hidden Neurons
3337
+ Number of Sub-Flows
3338
+ Number of Epochs
3339
+ L2-Penalty
3340
+ PriorsCAREFL-M (0.8)
3341
+ CAREFL-M (1.0)
3342
+ X
3343
+ CAREFL-M (0.8)
3344
+ CAREFL-M (1.0)
3345
+ CAREFL-H (0.8)
3346
+ CAREFL-H (1.0)
3347
+ 6.
3348
+ CAREFL-H (0.8)
3349
+ CAREFL-H (1.0)
KdFOT4oBgHgl3EQfzDRI/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
L9E3T4oBgHgl3EQfwAue/content/2301.04699v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07e15d033fd42a7919c8451e20cbcbf7a03c2471cd3b38a0f7bb5e971751b778
3
+ size 6420850