jackkuo commited on
Commit
569dfda
·
verified ·
1 Parent(s): b8b1374

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. -NFQT4oBgHgl3EQf6jZ7/content/2301.13439v1.pdf +3 -0
  2. -NFQT4oBgHgl3EQf6jZ7/vector_store/index.faiss +3 -0
  3. -NFQT4oBgHgl3EQf6jZ7/vector_store/index.pkl +3 -0
  4. -tFQT4oBgHgl3EQfKTWv/vector_store/index.faiss +3 -0
  5. .gitattributes +60 -0
  6. 1dE4T4oBgHgl3EQfzQ2-/content/tmp_files/2301.05273v1.pdf.txt +1509 -0
  7. 1dE4T4oBgHgl3EQfzQ2-/content/tmp_files/load_file.txt +0 -0
  8. 29FLT4oBgHgl3EQfrS9R/content/2301.12143v1.pdf +3 -0
  9. 29FLT4oBgHgl3EQfrS9R/vector_store/index.pkl +3 -0
  10. 39FQT4oBgHgl3EQf3zah/content/tmp_files/2301.13429v1.pdf.txt +940 -0
  11. 39FQT4oBgHgl3EQf3zah/content/tmp_files/load_file.txt +0 -0
  12. 3tFAT4oBgHgl3EQflR3K/vector_store/index.faiss +3 -0
  13. 4dFKT4oBgHgl3EQfRy0L/content/tmp_files/2301.11772v1.pdf.txt +1563 -0
  14. 4dFKT4oBgHgl3EQfRy0L/content/tmp_files/load_file.txt +0 -0
  15. 4tAzT4oBgHgl3EQfuv3m/content/tmp_files/2301.01697v1.pdf.txt +0 -0
  16. 4tAzT4oBgHgl3EQfuv3m/content/tmp_files/load_file.txt +0 -0
  17. 4tE2T4oBgHgl3EQfkAch/content/tmp_files/2301.03973v1.pdf.txt +1195 -0
  18. 4tE2T4oBgHgl3EQfkAch/content/tmp_files/load_file.txt +485 -0
  19. 5NE1T4oBgHgl3EQfmgQS/content/tmp_files/2301.03297v1.pdf.txt +1527 -0
  20. 5NE1T4oBgHgl3EQfmgQS/content/tmp_files/load_file.txt +0 -0
  21. 5tE1T4oBgHgl3EQfmgTC/content/2301.03299v1.pdf +3 -0
  22. 5tE1T4oBgHgl3EQfmgTC/vector_store/index.faiss +3 -0
  23. 5tE1T4oBgHgl3EQfmgTC/vector_store/index.pkl +3 -0
  24. 6dFKT4oBgHgl3EQfTi2q/vector_store/index.faiss +3 -0
  25. 9NE3T4oBgHgl3EQfqwrj/content/tmp_files/2301.04655v1.pdf.txt +1022 -0
  26. 9NE3T4oBgHgl3EQfqwrj/content/tmp_files/load_file.txt +0 -0
  27. 9dFJT4oBgHgl3EQfoixI/content/tmp_files/2301.11596v1.pdf.txt +1090 -0
  28. 9dFJT4oBgHgl3EQfoixI/content/tmp_files/load_file.txt +0 -0
  29. A9AyT4oBgHgl3EQfd_iI/content/2301.00313v1.pdf +3 -0
  30. A9AyT4oBgHgl3EQfd_iI/vector_store/index.pkl +3 -0
  31. ANAzT4oBgHgl3EQfF_sv/content/tmp_files/2301.01019v1.pdf.txt +1634 -0
  32. ANAzT4oBgHgl3EQfF_sv/content/tmp_files/load_file.txt +0 -0
  33. C9E4T4oBgHgl3EQfeg2i/content/2301.05100v1.pdf +3 -0
  34. DNAzT4oBgHgl3EQfiP3j/vector_store/index.faiss +3 -0
  35. DNFKT4oBgHgl3EQfYy5L/vector_store/index.faiss +3 -0
  36. DdAzT4oBgHgl3EQfT_w7/vector_store/index.pkl +3 -0
  37. DdE4T4oBgHgl3EQfew2P/content/2301.05102v1.pdf +3 -0
  38. DdE4T4oBgHgl3EQfew2P/vector_store/index.faiss +3 -0
  39. DdE4T4oBgHgl3EQfew2P/vector_store/index.pkl +3 -0
  40. FNFQT4oBgHgl3EQfRTbc/content/tmp_files/2301.13286v1.pdf.txt +2283 -0
  41. FNFQT4oBgHgl3EQfRTbc/content/tmp_files/load_file.txt +0 -0
  42. GNE2T4oBgHgl3EQfTQeG/content/2301.03801v1.pdf +3 -0
  43. GNE2T4oBgHgl3EQfTQeG/vector_store/index.faiss +3 -0
  44. GNE2T4oBgHgl3EQfTQeG/vector_store/index.pkl +3 -0
  45. ItAyT4oBgHgl3EQfTPfP/content/tmp_files/2301.00103v1.pdf.txt +0 -0
  46. ItAyT4oBgHgl3EQfTPfP/content/tmp_files/load_file.txt +0 -0
  47. ItE3T4oBgHgl3EQfugv_/content/2301.04686v1.pdf +3 -0
  48. ItE3T4oBgHgl3EQfugv_/vector_store/index.pkl +3 -0
  49. J9E1T4oBgHgl3EQfsQWj/content/2301.03364v1.pdf +3 -0
  50. J9E1T4oBgHgl3EQfsQWj/vector_store/index.faiss +3 -0
-NFQT4oBgHgl3EQf6jZ7/content/2301.13439v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d98bc0a1ca3ebfbac0d52a3fdc7e82d9bd751e3b95485df82c8d40297dc111d2
3
+ size 174000
-NFQT4oBgHgl3EQf6jZ7/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb3237d14daff9f98ed6c17a0b429be8b842d7db7751eb532c8f982731119e2
3
+ size 1048621
-NFQT4oBgHgl3EQf6jZ7/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95d8fdc058f0501405cff71aa6f1ac1d81e33137cd3bfd21d176f1e716b38271
3
+ size 40383
-tFQT4oBgHgl3EQfKTWv/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cee3af8a49ea173477b3b102232ac676d4150a01d1bc226ab070982c3895048
3
+ size 3014701
.gitattributes CHANGED
@@ -9819,3 +9819,63 @@ DNAzT4oBgHgl3EQfiP3j/content/2301.01498v1.pdf filter=lfs diff=lfs merge=lfs -tex
9819
  qNA0T4oBgHgl3EQfKf_Y/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9820
  X9E1T4oBgHgl3EQfwAUv/content/2301.03405v1.pdf filter=lfs diff=lfs merge=lfs -text
9821
  dtAzT4oBgHgl3EQfZ_z3/content/2301.01363v1.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9819
  qNA0T4oBgHgl3EQfKf_Y/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9820
  X9E1T4oBgHgl3EQfwAUv/content/2301.03405v1.pdf filter=lfs diff=lfs merge=lfs -text
9821
  dtAzT4oBgHgl3EQfZ_z3/content/2301.01363v1.pdf filter=lfs diff=lfs merge=lfs -text
9822
+ qNA0T4oBgHgl3EQfKf_Y/content/2301.02106v1.pdf filter=lfs diff=lfs merge=lfs -text
9823
+ c9FRT4oBgHgl3EQfTTda/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9824
+ M9AyT4oBgHgl3EQf6_oJ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9825
+ rNE4T4oBgHgl3EQfwA2r/content/2301.05247v1.pdf filter=lfs diff=lfs merge=lfs -text
9826
+ DNFKT4oBgHgl3EQfYy5L/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9827
+ aNE0T4oBgHgl3EQfnQFP/content/2301.02509v1.pdf filter=lfs diff=lfs merge=lfs -text
9828
+ atE0T4oBgHgl3EQf4gIr/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9829
+ RtE5T4oBgHgl3EQfZw_r/content/2301.05584v1.pdf filter=lfs diff=lfs merge=lfs -text
9830
+ -tFQT4oBgHgl3EQfKTWv/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9831
+ itE3T4oBgHgl3EQf4wuZ/content/2301.04775v1.pdf filter=lfs diff=lfs merge=lfs -text
9832
+ wtFST4oBgHgl3EQfRjgB/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9833
+ DNAzT4oBgHgl3EQfiP3j/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9834
+ OtFJT4oBgHgl3EQfICxT/content/2301.11454v1.pdf filter=lfs diff=lfs merge=lfs -text
9835
+ ldFIT4oBgHgl3EQfsCtp/content/2301.11334v1.pdf filter=lfs diff=lfs merge=lfs -text
9836
+ J9E1T4oBgHgl3EQfsQWj/content/2301.03364v1.pdf filter=lfs diff=lfs merge=lfs -text
9837
+ aNE4T4oBgHgl3EQfOwxI/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9838
+ GNE2T4oBgHgl3EQfTQeG/content/2301.03801v1.pdf filter=lfs diff=lfs merge=lfs -text
9839
+ o9E4T4oBgHgl3EQfvA3U/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9840
+ edFPT4oBgHgl3EQfzjXc/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9841
+ j9E0T4oBgHgl3EQfYgAz/content/2301.02307v1.pdf filter=lfs diff=lfs merge=lfs -text
9842
+ DdE4T4oBgHgl3EQfew2P/content/2301.05102v1.pdf filter=lfs diff=lfs merge=lfs -text
9843
+ edFPT4oBgHgl3EQfzjXc/content/2301.13176v1.pdf filter=lfs diff=lfs merge=lfs -text
9844
+ X9E3T4oBgHgl3EQfGAla/content/2301.04310v1.pdf filter=lfs diff=lfs merge=lfs -text
9845
+ Q9A0T4oBgHgl3EQfDf--/content/2301.02005v1.pdf filter=lfs diff=lfs merge=lfs -text
9846
+ ydFRT4oBgHgl3EQfizeW/content/2301.13588v1.pdf filter=lfs diff=lfs merge=lfs -text
9847
+ GNE2T4oBgHgl3EQfTQeG/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9848
+ ydFRT4oBgHgl3EQfizeW/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9849
+ zdFQT4oBgHgl3EQfCTXT/content/2301.13230v1.pdf filter=lfs diff=lfs merge=lfs -text
9850
+ 5tE1T4oBgHgl3EQfmgTC/content/2301.03299v1.pdf filter=lfs diff=lfs merge=lfs -text
9851
+ RtE5T4oBgHgl3EQfZw_r/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9852
+ YdAyT4oBgHgl3EQfiPiV/content/2301.00392v1.pdf filter=lfs diff=lfs merge=lfs -text
9853
+ cdE0T4oBgHgl3EQfWQDo/content/2301.02277v1.pdf filter=lfs diff=lfs merge=lfs -text
9854
+ 6dFKT4oBgHgl3EQfTi2q/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9855
+ ltFPT4oBgHgl3EQfHzTL/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9856
+ itE3T4oBgHgl3EQfIwnu/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9857
+ J9E1T4oBgHgl3EQfsQWj/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9858
+ itE3T4oBgHgl3EQfIwnu/content/2301.04338v1.pdf filter=lfs diff=lfs merge=lfs -text
9859
+ 5tE1T4oBgHgl3EQfmgTC/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9860
+ DdE4T4oBgHgl3EQfew2P/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9861
+ dtAzT4oBgHgl3EQfn_1r/content/2301.01589v1.pdf filter=lfs diff=lfs merge=lfs -text
9862
+ Q9A0T4oBgHgl3EQfDf--/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9863
+ qdE4T4oBgHgl3EQfvw0Q/content/2301.05244v1.pdf filter=lfs diff=lfs merge=lfs -text
9864
+ P9FJT4oBgHgl3EQf2S1v/content/2301.11655v1.pdf filter=lfs diff=lfs merge=lfs -text
9865
+ gtE2T4oBgHgl3EQfcAfW/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9866
+ JNE4T4oBgHgl3EQfhg0d/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9867
+ s9FJT4oBgHgl3EQfcyxN/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9868
+ X9E1T4oBgHgl3EQfwAUv/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9869
+ UtE3T4oBgHgl3EQf0Au1/content/2301.04734v1.pdf filter=lfs diff=lfs merge=lfs -text
9870
+ -NFQT4oBgHgl3EQf6jZ7/content/2301.13439v1.pdf filter=lfs diff=lfs merge=lfs -text
9871
+ fdE2T4oBgHgl3EQfGwaV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9872
+ C9E4T4oBgHgl3EQfeg2i/content/2301.05100v1.pdf filter=lfs diff=lfs merge=lfs -text
9873
+ rNE4T4oBgHgl3EQfwA2r/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9874
+ ItE3T4oBgHgl3EQfugv_/content/2301.04686v1.pdf filter=lfs diff=lfs merge=lfs -text
9875
+ R9E2T4oBgHgl3EQfBwbf/content/2301.03607v1.pdf filter=lfs diff=lfs merge=lfs -text
9876
+ RNFKT4oBgHgl3EQfiS5U/content/2301.11841v1.pdf filter=lfs diff=lfs merge=lfs -text
9877
+ -NFQT4oBgHgl3EQf6jZ7/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9878
+ qdAyT4oBgHgl3EQfl_jn/content/2301.00464v1.pdf filter=lfs diff=lfs merge=lfs -text
9879
+ A9AyT4oBgHgl3EQfd_iI/content/2301.00313v1.pdf filter=lfs diff=lfs merge=lfs -text
9880
+ 3tFAT4oBgHgl3EQflR3K/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9881
+ 29FLT4oBgHgl3EQfrS9R/content/2301.12143v1.pdf filter=lfs diff=lfs merge=lfs -text
1dE4T4oBgHgl3EQfzQ2-/content/tmp_files/2301.05273v1.pdf.txt ADDED
@@ -0,0 +1,1509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Improving the speed of variational quantum algorithms
2
+ for quantum error correction
3
+ Fabio Zoratti
4
+ Scuola Normale Superiore, I-56126 Pisa, Italy
5
+ Giacomo De Palma
6
+ Department of Mathematics, University of Bologna, 40126 Bologna, Italy
7
+ Vittorio Giovannetti
8
+ NEST, Scuola Normale Superiore and Istituto Nanoscienze-CNR, I-56126 Pisa, Italy
9
+ We consider the problem of devising a suitable quantum error correction procedure for a generic
10
+ quantum noise acting on a quantum circuit. In general, there is no analytic universal procedure
11
+ to obtain the encoding and correction unitary gates, and the problem is even harder if the noise
12
+ is unknown and has to be reconstructed. The existing procedures rely on variational quantum
13
+ algorithms and are very difficult to train since the size of the gradient of the cost function decays
14
+ exponentially with the number of qubits. We address this problem using a cost function based on
15
+ the quantum Wasserstein distance of order 1. Our results show that such cost function significantly
16
+ increases both the probability of a successful training and the fidelity of the recovered state.
17
+ I.
18
+ INTRODUCTION
19
+ Performing reliable computations on physical imperfect
20
+ hardware is something that has become usual nowadays,
21
+ given the current state of classical computers, which can
22
+ produce perfect results without any software-side miti-
23
+ gation of the imperfections of the physical media where
24
+ the computation happens. Error correction is based on
25
+ the fact that these machines perform automatically, on
26
+ the hardware side, procedures that allow errors to hap-
27
+ pen and to be fixed without any intervention of the end
28
+ user. This kind of setting is even more crucial in a quan-
29
+ tum scenario where the current noisy intermediate-scale
30
+ quantum computers (NISQ) have a much larger error
31
+ rate than their classical counterparts [1]. Performing re-
32
+ liable computations with a trustworthy error correction
33
+ procedure has direct implications not only in quantum
34
+ computation [2, 3], but also in quantum key distribution
35
+ and cryptography [4–8].
36
+ In the typical Quantum Error Correction (QEC)
37
+ scheme, the quantum state that has to be protected is
38
+ stored in a subspace of a larger Hilbert space, using an
39
+ encoding procedure. Stabilizer codes [9], which are within
40
+ the best analytical results in this field, are not universal
41
+ because they are tailored for a generic noise acting on a
42
+ small but unknown subset of qubits. Several attempts
43
+ have already been made to create a numerical optimiza-
44
+ tion procedure to find an error correction code for specific
45
+ noise models [10–13], but these studies are not universal
46
+ because they rely heavily on the type of noise on the
47
+ specific quantum circuit and this is a problem because
48
+ real quantum devices are not characterized by a single
49
+ kind of quantum noise. Some attempts have been made
50
+ to characterize the noise of the current and near-term
51
+ devices [14, 15], but these methods will become very diffi-
52
+ cult to implement soon because classical computers are
53
+ not able to simulate efficiently quantum circuits when
54
+ the number of qubit increases. Near-term devices with
55
+ approximately 50 qubits may already be intractable to
56
+ simulate for supercomputers [16].
57
+ If we define a figure of merit of the quality of the state
58
+ after the action of the noise and its corresponding cor-
59
+ rection, the obvious choice for the kind of maximization
60
+ algorithm is a Variational Quantum Algorithm [17]. These
61
+ are hybrid algorithms that couple a quantum computer
62
+ with a classical one. In this kind of algorithms, usually, a
63
+ parametric quantum circuit is applied to some reference
64
+ state, some measurements are performed on the system,
65
+ and the outcomes are given to the classical computer to
66
+ perform a minimization procedure of a given cost function.
67
+ Some interesting examples of this class of algorithms are
68
+ the variational quantum eigensolver [18] and the Quan-
69
+ tum Approximate Optimization Algorithm (QAOA) [19].
70
+ The optimization procedure in a variational quantum al-
71
+ gorithm can be seen as the training phase in machine
72
+ learning, for example, to train a neural network.
73
+ Some variational quantum algorithms applied to quan-
74
+ tum error correction are already known in literature [20].
75
+ However, VQAs usually suffer from the phenomenon of
76
+ barren plateaus [21, 22], namely the gradient of the cost
77
+ function decays exponentially with respect to the number
78
+ of qubits of the system, leading to an untrainable model.
79
+ Some general results have already been found about this
80
+ topic [22], which justifies the presence of barren plateaus
81
+ when the cost function is a global function of the quantum
82
+ circuit, namely the measurement of a highly non-local op-
83
+ erator. For this reason, we compared the performance of
84
+ an algorithm inspired by [20] using two different cost func-
85
+ tions: the fidelity and an approximation of the quantum
86
+ Wasserstein distance.
87
+ The quantum Wasserstein distance is a generalization of
88
+ the classical Wasserstein distance between probability dis-
89
+ tributions [23–27]. Several quantum generalizations of the
90
+ Wasserstein distance have been proposed [28–62]. This
91
+ arXiv:2301.05273v1 [quant-ph] 12 Jan 2023
92
+
93
+ 2
94
+ work is based on the quantum Wasserstein distance of or-
95
+ der 1 (or quantum W1 distance) proposed in Refs. [63, 64],
96
+ which is not unitarily invariant and recovers the Hamming
97
+ distance [65] for the states of the computational basis. We
98
+ expect this new distance to improve the barren plateau
99
+ phenomenon and we can give an intuitive reason. If we
100
+ use a unitarily invariant distance as the trace distance or
101
+ the distances derived from the fidelity, all the states of the
102
+ computational basis are equally orthogonal and thus have
103
+ all maximum distance one with respect to the other. The
104
+ quantum W1 distance instead measures how many qubits
105
+ are different between the two states and this allows the
106
+ gradient to be less flat in the regions that are not already
107
+ very close to a local minimum. Indeed, the quantum W1
108
+ distance has succesfully been employed as cost function
109
+ of quantum Generative Adversarial Networks [64, 66–70]
110
+ The manuscript is organized as follows: in Sec. II we
111
+ present some basic notion on conventional QEC proce-
112
+ dures which allow us to set the notation and the theo-
113
+ retical background; in Sec. III we introduce our VQA
114
+ discussing the different choices of cost functions that can
115
+ be used in order to guide it; in Sec. IV we present our nu-
116
+ merical results where comparing the performances of the
117
+ VQA implemented with different types of cost functions.
118
+ Conclusions are given in Sec. V.
119
+ II.
120
+ PRELIMINARIES ON QEC
121
+ Let Q be a quantum register we wish to protect (at least
122
+ in part) from the action of some external noise source.
123
+ In a typical QEC scenario [71] this problem is addressed
124
+ through the following three-step procedure:
125
+ i) Before the action the noise, a unitary encoding gate
126
+ ˆVQA is used to distribute the information originally
127
+ contained in Q on the larger system QA. Here A
128
+ is an auxiliary quantum register that is assumed to
129
+ be intialized in a fiduciary quantum state, and that
130
+ is affected by the same noise that tampers with Q;
131
+ ii) After the action of the noise a measurement on QA
132
+ is performed to reveal the nature of the latter and,
133
+ based on the associated outcome, a unitary recovery
134
+ operation is applied to the system. Equivalently this
135
+ step can be described by introducing yet an extra
136
+ quantum register B (also intialized on a fiduciary
137
+ state but not affected by the noise) that is coupled
138
+ with QA trough a recovering unitary transformation
139
+ ˆWQAB which effectively mimics the measurement
140
+ and the recovery operation;
141
+ iii) The inverse of the gate ˆVQA is finally used on QA
142
+ to refocus the recovered information in Q.
143
+ Denoting with |ψ⟩Q the input state of Q, the corre-
144
+ sponding output state of QA that emerges from the pro-
145
+ cess at the end of the step iii) can be expressed as the
146
+ density matrix
147
+ ˆρ(V,W )
148
+ QA
149
+ (ψ) := trB
150
+
151
+ V†
152
+ QA ◦ WQAB ◦ ΦQA
153
+ (1)
154
+ ◦VQA
155
+
156
+ |ψ⟩Q⟨ψ| ⊗ |Ø⟩A⟨Ø| ⊗ |Ø⟩B⟨Ø|
157
+ ��
158
+ := V†
159
+ QA ◦ Φ(R)
160
+ QA ◦ ΦQA ◦ VQA
161
+
162
+ |ψ⟩Q⟨ψ| ⊗ |Ø⟩A⟨Ø|
163
+
164
+ where |Ø⟩X represents the fiduciary state of the X register,
165
+ trB{· · · } is the partial trace over B, and given a unitary
166
+ ˆUX on X we adopted the symbol UX(· · · ) := ˆUX · · · ˆU †
167
+ X
168
+ to denote its action as super-operator.
169
+ In the above
170
+ expressions ΦQA is the LCPT quantum channel [71]
171
+ describing the noise on Q and A, while Φ(R)
172
+ QA(· · · ) :=
173
+ trB{WQAB(· · ·⊗|Ø⟩B⟨Ø|)} is the LCPT (recovery) quan-
174
+ tum channel on QA originating from the interaction with
175
+ B, that attempts to undo the action of ΦQA.
176
+ An ideal QEC procedure capable to completely remove
177
+ the noise from the system will make sure that ˆρ(V,W )
178
+ QA
179
+ (ψ)
180
+ corresponds to |ψ⟩Q|Ø⟩A, irrespectively from the specific
181
+ choice of |ψ⟩Q. A bona-fide figure of merit to character-
182
+ ize the effectiveness of a generic QEC scheme is hence
183
+ provided by the average input-output fidelity
184
+ F(V, W) :=
185
+
186
+ dµψ Q⟨ψ|A⟨Ø|ˆρ(V,W )
187
+ QA
188
+ (ψ)|ψ⟩Q|Ø⟩A , (2)
189
+ where dµψ is the uniform measure on the set of the in-
190
+ put states of Q originated from the Haar measure on
191
+ the associated unitary group [72] or from an exact or
192
+ approximate unitary 2-design S [71, 73] that simulates
193
+ the latter1. Notice that by expressing |ψ⟩Q = ˆUQ|Ø⟩Q,
194
+ Eq. (2) can equivalently be casted in the more compact
195
+ form
196
+ F(V, W) = QA⟨Ø|ˆρ(V,W )
197
+ QA
198
+ |Ø⟩QA ,
199
+ (3)
200
+ with |Ø⟩QA := |Ø⟩Q ⊗ |Ø⟩A and where the state
201
+ ˆρ(V,W )
202
+ QA
203
+ := 1
204
+ |S|
205
+
206
+ ˆUQ∈S
207
+ U†
208
+ Q ◦ V†
209
+ QA ◦ Φ(R)
210
+ QA ◦ ΦQA
211
+ ◦ VQA ◦ UQ
212
+
213
+ |Ø⟩QA⟨Ø|
214
+
215
+ ,
216
+ (4)
217
+ now includes the average over all possible inputs. An
218
+ ideal QEC procedure will enable one to get F(V, W) = 1.
219
+ A natural benchmark for lowest admissible F(V, W) is
220
+ represented instead by the value one would get if one
221
+ decides not to perform corrections on the register that we
222
+ 1 We remind that a unitary 2-design is a probability distribution
223
+ over the set of unitary operators which can duplicate properties of
224
+ the probability distribution over the Haar measure for polynomials
225
+ of degree 2 or less. When Q is a single qubit, a 2-design can
226
+ be realized by an uniform sampling over a set S composed by
227
+ only 6 elements ˆ1, ˆσ1, e±iπ/4ˆσ1, e±iπ/4ˆσ2 that maps its logical
228
+ state |0⟩Q into the vectors |0⟩Q , |1⟩Q , (|0⟩Q ±i |1⟩Q)/
229
+
230
+ 2, (|0⟩Q ∓
231
+ |1⟩Q)/
232
+
233
+ 2.
234
+
235
+ 3
236
+ Yα1
237
+ Yα7
238
+ Xα2
239
+ Xα8
240
+ Xα9
241
+ Yα5
242
+ Xα6
243
+ Xα10
244
+ Xα3
245
+ Xα4
246
+ Q
247
+ A
248
+ Yα11 Xα12
249
+ Yα13 Xα14 Xα15 Xα16
250
+ Yα17 Xα18
251
+ Y /2
252
+ π
253
+ Xθ′ ′
254
+ k
255
+ Xθ′ ′ ′
256
+ k
257
+ Zθ′ k
258
+ =
259
+ Xθk
260
+ Y /2
261
+ π
262
+ B
263
+ Q
264
+ A
265
+ Yβ1
266
+ Yβ11
267
+ Yβ21
268
+ Yβ7
269
+ Yβ17
270
+ Yβ27
271
+ Yβ31
272
+ Yβ35
273
+ Yβ33
274
+ Yβ37
275
+ Xβ2
276
+ Xβ12
277
+ Xβ22
278
+ Xβ3
279
+ Xβ13
280
+ Xβ23
281
+ Xβ5
282
+ Xβ15
283
+ Xβ25
284
+ Xβ8
285
+ Xβ18
286
+ Xβ28
287
+ Xβ32
288
+ Xβ36
289
+ Xβ34
290
+ Xβ38
291
+ Zβ4
292
+ Zβ14
293
+ Zβ24
294
+ Zβ6
295
+ Zβ16
296
+ Zβ26
297
+ Xβ10
298
+ Xβ20
299
+ Xβ30
300
+ Zβ9
301
+ Zβ19
302
+ Zβ29
303
+ =
304
+ Zθk
305
+ Y /2
306
+ π
307
+ Zθ′ ′
308
+ k
309
+ Zθ′ ′ ′
310
+ k
311
+ Zθ′ k
312
+ Y /2
313
+ π
314
+ Figure 1.
315
+ Parametric gates ˆVQA(⃗α) (green element) and
316
+ ˆ
317
+ WQAB(⃗β) (red element) used for case of quantum registers Q,
318
+ A, and B with k = 1, n − k = 2, and r = 2 qubits respectively.
319
+ Indicating with ˆσ1, ˆσ2, and ˆσ3 the Pauli operators, the Xθ, Yθ,
320
+ and Zθ elements of the figure represent single qubit rotations
321
+ e−iθˆσ1, e−iθˆσ2, and e−iθˆσ3 with the angles θ determined to
322
+ the components of the vectors ⃗α, ⃗β, respectively. Vertical
323
+ lines indicate instead quantum control operations which are
324
+ activated when the control qubits (indicated by the full or
325
+ empty circles) are in the logical state |1⟩ (full circle) or in |0⟩
326
+ (empty circle). As shown on the inset, each one of those gates
327
+ depend parametrically upon elements of the control vectors ⃗α
328
+ and ⃗β through single qubit operations.
329
+ compute by setting ˆVQA and ˆWQAB equal to the identity
330
+ operators i.e.2
331
+ F 0 := QA⟨Ø|ˆρ(11,11)
332
+ QA
333
+ |Ø⟩QA .
334
+ (5)
335
+ III.
336
+ VARIATIONAL QUANTUM ALGORITHM
337
+ While the enormous progress has been made in the
338
+ study of QEC procedures, identifying efficient choices for
339
+ the operations that leads to (non trivial) high values of
340
+ F(V, W) for a specific noise model, is still a challenging
341
+ open problem. A possible solution in this case is to employ
342
+ variational quantum algorithms to run numerical searches.
343
+ Our approach follows a training strategy inspired by the
344
+ work of Johnson et al. [20]. Assuming hence Q, A, and
345
+ B to be formed by collections of independent qubits (k
346
+ for Q, n − k for A, and r for B), we introduce a manifold
347
+ 2 Equation (5) accounts for the noise effects both on Q and A. A
348
+ more conservative estimation of F 0 can be obtained by focusing
349
+ directly on the noise on Q alone, i.e. tracing out the A component
350
+ of ˆρ(11,11)
351
+ QA
352
+ and studying its fidelity with |Ø⟩Q, i.e. F (strong)
353
+ 0
354
+ :=
355
+ Q⟨Ø|ˆρ(11,11)
356
+ Q
357
+ |Ø⟩Q ≥ F 0, with ˆρ(11,11)
358
+ Q
359
+ := trA ˆρ(11,11)
360
+ QA
361
+ . Notice that
362
+ for the noise model of Sec. III C the two are directly connected
363
+ via the identity F 0 = F (strong)
364
+ 0
365
+ − n−1
366
+ n p(1 − |⟨0|ˆσ|0⟩|2).
367
+ ï
368
+ ï
369
+ ï
370
+ ï
371
+ ï
372
+ ï
373
+ |0ï
374
+ |0ï
375
+ |0ï
376
+ |0ï
377
+ |0ï
378
+ |0ï
379
+ |0ï
380
+ |0ï
381
+ |0ï
382
+ Q
383
+ A
384
+ B
385
+ UQ(j)
386
+ ï
387
+ ï
388
+ ï
389
+ M
390
+ M
391
+ M
392
+ M
393
+ C(
394
+ ÷³ ,
395
+ ÷
396
+ ´ )
397
+ M
398
+ M
399
+ U†
400
+ Q(j)
401
+ WQAB(
402
+ ÷
403
+ ´ )
404
+ VQA(
405
+ ÷³ )
406
+ V†
407
+ QA(
408
+ ÷³ )
409
+ j
410
+ ÷³ ,
411
+ ÷
412
+ ´
413
+ n aˆK
414
+ ic
415
+ r t
416
+ ovth
417
+ er
418
+ e w
419
+ qu
420
+ we
421
+ = 1
422
+ F
423
+ cor
424
+ NOISE
425
+ Figure 2. (Color online) Sketch of the variational quantum
426
+ algorithm: Q, A and B are quantum registers formed respec-
427
+ tively by k, n−k and r qubits. The initial information we wish
428
+ to protect is written in Q by the unitary gate ˆUQ(j) extracted
429
+ from a 2-design set S; A and B are two auxiliary elements
430
+ (containing respectively n − k and r qubits) that are used to
431
+ implement the QEC procedure described by the parametric
432
+ gates ˆVQA(⃗α), ˆ
433
+ WQAB(⃗β), and ˆV †
434
+ QA(⃗α) of Fig. 1. The patterned
435
+ element in the central part of the scheme represents the noise
436
+ on Q and A (no noise is assumed to be active on B). Lastly,
437
+ the D-shaped measurements at the end of the circuit represent
438
+ local measurements on QA whose outcomes over the entire
439
+ collection of the possible inputs generated by the entire set S,
440
+ are processed by a classical computer which, evaluating the
441
+ cost function C(⃗α, ⃗β) defined in Section III B, decides how to
442
+ update the values of the parameters ⃗α and ⃗β. Thick grey lines
443
+ in the figure represent classical control lines.
444
+ of transformations ˆVQA(⃗α), ˆWQAB(⃗β) parametrized by
445
+ classical controls vectors ⃗α, ⃗β (see Fig. 1), and construct
446
+ the quantum circuit of Fig. 2. The method then proceeds
447
+ along the following stages:
448
+ 1. Having selected the values of ⃗α and ⃗β, the regis-
449
+ ter Q is prepared into a collection of known quan-
450
+ tum state {|ψ(1)⟩Q, · · · , |ψ(m)⟩Q} operating on the
451
+ vector |Ø⟩Q = |0⟩⊗k through action of the con-
452
+ trol gates ˆUQ(1), · · · , ˆUQ(m) (first cyan element of
453
+ the figure) which define the 2-design S entering
454
+ in Eq. (4). Each of such inputs is hence evolved
455
+ via a circuit (pale-orange area of the figure) that
456
+ emulates both the effect of the noise (patterned
457
+ square of the figure, see Section III C and Fig. 4),
458
+ and the transformations ˆVQA(⃗α), ˆWQAB(⃗β), and
459
+ ˆV †
460
+ QA(⃗α) that are meant to implement the steps ii)
461
+ and iii) of the QEC procedure (green and red ele-
462
+ ments of the figure). Notice that in the ideal case
463
+ (i.e. if ˆVQA(⃗α) and ˆWQAB(⃗β) manage to completely
464
+ suppress the noise) then in correspondence with
465
+
466
+ 4
467
+ the input |ψ(j)⟩Q the registers QA should emerge
468
+ in the state |ψ(j)⟩Q ⊗ |Ø⟩A := |ψ(j)⟩Q ⊗ |0⟩⊗n−k,
469
+ which will be hence mapped into the final configu-
470
+ ration |Ø⟩QA := |0⟩⊗n by the inverse ˆU †
471
+ Q(j) of the
472
+ state preparation gate (second cyan element of the
473
+ figure).
474
+ 2. For each choice of the index j ∈ {1, · · · , m} a mea-
475
+ surement on the system is performed at the end of
476
+ the transformations described in the stage 1 and the
477
+ resulting m collected outcomes used to compute a
478
+ cost function C(⃗α, ⃗β) which evaluates the effective-
479
+ ness of the adopted QEC strategy in leading large
480
+ values of the average input-output fidelity. The spe-
481
+ cific choice of the cost function is very important
482
+ and is discussed in Section III A.
483
+ 3. A classical computer decides, given the results of
484
+ the measurement, how to change the value of the
485
+ parameters ⃗α and ⃗β to be used in the subsequent
486
+ run in order to minimize the cost function C(⃗α, ⃗β).
487
+ This is discussed in detail in Section III B.
488
+ A.
489
+ Cost function
490
+ The natural choice for the cost function at the stage 2
491
+ of our algorithm is provided by the expectation value of
492
+ the self-adjoint operator
493
+ ˆH(fid)
494
+ QA := ˆ1QA − |Ø⟩QA ⟨Ø| ,
495
+ (6)
496
+ computed on the mean state of system QA which emerges
497
+ at the output of the quantum circuit of Fig. 2, i.e. the
498
+ quantity
499
+ C(fid)(⃗α, ⃗β) := tr{ˆρ(V (⃗α),W (⃗β))
500
+ QA
501
+ ˆH(fid)
502
+ QA } ,
503
+ (7)
504
+ where ˆρ(V (⃗α),W (⃗β))
505
+ QA
506
+ is the density matrix (4) evaluated
507
+ for ˆVQA = ˆVQA(⃗α) and ˆWQAB = ˆWQAB(⃗β). This choice
508
+ has two main advantages. First of all, the expectation
509
+ value C(fid)(⃗α, ⃗β) can be evaluated by performing (sim-
510
+ ple) local measurement on the qubits of Q and A (in-
511
+ deed it can be computed by simply checking whether of
512
+ not each one of them is in the logical state |0⟩). Most
513
+ importantly, since by explicit evaluation one has that
514
+ C(fid)(⃗α, ⃗β) = 1 − F(V (⃗α), W(⃗β)), it is clear that by us-
515
+ ing (7) the algorithm will be forced to luck for values
516
+ of ⃗α, ⃗β that yield higher average input-output fidelities.
517
+ Despite all this, the use of C(fid)(⃗α, ⃗β) as a cost function,
518
+ has a major drawback associated with the fact that the
519
+ spectrum of the Hamiltonian ˆH(fid)
520
+ QA exhibits maximum
521
+ degeneracy with respect to space orthogonal to the target
522
+ state |Ø⟩QA (see Fig. 3). Due do this fact a numerical
523
+ search based on a training procedure that simply target
524
+ the minimization of C(fid)(⃗α, ⃗β), has non trivial chances
525
+ to get stuck somewhere in the large flat plateau associated
526
+ with the eigenvalue 1 of ˆH(fid)
527
+ QA without finding any good
528
+ direction. in the large flat plateau A possible way to avoid
529
+ this problem is to introduce new cost-functions Hamilto-
530
+ nians which, while maintaining the target vector |Ø⟩QA
531
+ as unique ground state and still being easy to compute,
532
+ manage to remove the huge degeneracy of the excited
533
+ part of the spectra of ˆH(fid)
534
+ QA . Our choice is based on the
535
+ quantum Wasserstein distance of order 1 (W1) introduced
536
+ Ref. [63] which, even though it lacks some interesting
537
+ properties that the fidelity has, is less likely to be affected
538
+ by the barren plateaus phenomena [22]. A good estima-
539
+ tion of the W1 distance that separate ˆρ(V (⃗α),W (⃗β))
540
+ QA
541
+ from
542
+ the target state, is provided by the following quantity
543
+ C(wass)(⃗α, ⃗β) := tr{ˆρ(V (⃗α),W (⃗β))
544
+ QA
545
+ ˆH(wass)
546
+ QA
547
+ } ,
548
+ (8)
549
+ ˆH(wass)
550
+ QA
551
+ :=
552
+ n
553
+
554
+ j=1
555
+ j ˆΠ(j)
556
+ QA ,
557
+ (9)
558
+ where ˆΠ(j)
559
+ QA represents the sub-space of the register QA
560
+ in which we have j qubits in |1⟩ and the remaining one in
561
+ |0⟩. Observe that ˆH(wass)
562
+ QA
563
+ is nothing but the sum of the
564
+ number operators acting on the individual qubits of the
565
+ register QA, (i.e. ˆH(wass)
566
+ QA
567
+ = �n
568
+ ℓ=1 ˆπℓ with ˆπℓ the projector
569
+ on the logical state |1⟩ of the ℓ-th qubit): accordingly,
570
+ as C(fid)(⃗α, ⃗β), C(wass)(⃗α, ⃗β) can be computed from local
571
+ measurement. What C(wass)(⃗α, ⃗β) does is to count the
572
+ total number of logical ones present in the system. To
573
+ understand why using (8) could in principle lead to a
574
+ more efficient numerical search than the one obtained by
575
+ using (7), notice that Eq. (6) can be equivalently written
576
+ as ˆH(fid)
577
+ QA =
578
+ n
579
+
580
+ j=1
581
+ ˆΠ(j)
582
+ QA. A comparison with (9) reveals hence
583
+ that indeed while both ˆH(fid)
584
+ QA and ˆH(wass)
585
+ QA
586
+ admit |Ø⟩QA
587
+ as unique ground state, the Wasserstein Hamiltonian
588
+ removes large part of the degeneracy of the high energy
589
+ spectrum of the fidelity Hamiltonian. Accordingly it is
590
+ reasonable to expect that a numerical search that uses
591
+ ˆH(wass)
592
+ QA
593
+ , has less chances to get trapped into regions of
594
+ constant energy (barren plateau) than a search based on
595
+ ˆH(fid)
596
+ QA ,3.
597
+ 3 It goes without mentioning that alternative choices for the cost
598
+ function Hamiltonians are also available. For instance one can
599
+ use operators that also remove the residual degeneracies that
600
+ affect ˆH(wass)
601
+ QA
602
+ – e.g. using the operator ˆH(full)
603
+ QA
604
+ = �n
605
+ ℓ=1 wℓˆπℓ
606
+ with ωℓ positive weights selected so that different allocation of |1⟩
607
+ states inside the eigenspaces of ˆH(wass)
608
+ QA
609
+ get an assigned ordering.
610
+ Our numerical analysis however seems to indicate that these
611
+ refinement do not contribute significantly in improving numerical
612
+ search of the algorithm.
613
+
614
+ 5
615
+ eigenvalues of
616
+ ̂H(fid)
617
+ QA
618
+ eigenvalues of
619
+ ̂H(wass)
620
+ QA
621
+ N = 3
622
+ N = 3
623
+ N = 2
624
+ N = 1
625
+ N = 2
626
+ N = 0
627
+ 1
628
+ 2
629
+ 3
630
+ 0
631
+ N = 1
632
+ N = 2
633
+ N = 3
634
+ N = 0
635
+ 0
636
+ 1
637
+ Figure 3. Pictorial rendering of the spectra of the Hamiltonians
638
+ ˆH(fid)
639
+ QA
640
+ (top panel) and ˆH(wass)
641
+ QA
642
+ (lower panel). While ˆH(fid)
643
+ QA
644
+ is characterized by a unique, flat plateau that includes all
645
+ the excited state, ˆH(wass)
646
+ QA
647
+ partially removes the associated
648
+ degeneracy assigning higher energy to subspaces that have
649
+ higher number of qubits in the logical state |1⟩.
650
+ B.
651
+ Descent algorithm
652
+ The algorithm that we used for this work is a gradient
653
+ descent algorithm with momentum [74]. To overcome the
654
+ numerical difficulties of using finite differences to estimate
655
+ the gradients of the cost function C(⃗α, ⃗β), we exploit a
656
+ variation of the parameter-shift rule introduced in [75]
657
+ which reduces the problem to compute linear combina-
658
+ tions of the function itself evaluated in different points
659
+ that are not infinitesimally close. Specifically we observe
660
+ that, irrespectively from the choice of the operator ˆHQA,
661
+ the functional dependence of C(⃗α, ⃗β) upon the j-th com-
662
+ ponent of the vector ⃗β is of the form
663
+ C(⃗α, ⃗β) = f(βj) :=
664
+
665
+ k
666
+ tr
667
+ �ˆΩ(k)
668
+ 1 eiβj ˆσ ˆΩ(k)
669
+ 2 e−iβj ˆσ�
670
+ ,
671
+ (10)
672
+ with ˆΩ(k)
673
+ 1,2 being multi-qubits operators which do not de-
674
+ pend upon βj, and with e−iβj ˆσ a single qubit rotation
675
+ generated by an element ˆσ of the Pauli set. Therefore its
676
+ gradient can be written as
677
+ ∂C(⃗α, ⃗β)
678
+ ∂βj
679
+ = i
680
+
681
+ k
682
+ tr
683
+ �ˆΩ(k)
684
+ 1 eiβj ˆσ[ˆσ, ˆΩ(k)
685
+ 2 ]e−iβj ˆσ�
686
+ = f(βj + π
687
+ 4 ) − f(βj − π
688
+ 4 ) ,
689
+ (11)
690
+ where in the last passage we used the identity
691
+ i[ˆσ, ˆΩ(k)
692
+ 2 ] = ei π
693
+ 4 ˆσ ˆ
694
+ Ω2
695
+ (k)e−i π
696
+ 4 ˆσ − e−i π
697
+ 4 ˆσ ˆ
698
+ Ω2
699
+ (k)ei π
700
+ 4 ˆσ.
701
+ (12)
702
+ The gradient with respect the vector ⃗α can be computed
703
+ similarly. In this case however we observe that, due to
704
+ the fact that ˆρ(V (⃗α),W (⃗β))
705
+ QA
706
+ (ψ) depends upon the parame-
707
+ ters ⃗α via ˆVQA(⃗α) and through its adjoint ˆV †
708
+ QA(⃗α), the
709
+ dependence of C(⃗α, ⃗β) upon the j-th component of ⃗α is
710
+ slightly more complex. Indeed in this case we have
711
+ C(⃗α, ⃗β) = g(αj, αj) ,
712
+ (13)
713
+ where g(α(1)
714
+ j , α(2)
715
+ j ) is the function
716
+ g(α(1)
717
+ j , α(2)
718
+ j ) :=
719
+
720
+ k
721
+ tr
722
+ �ˆΩ(k)
723
+ 1 eiα(1)
724
+ j
725
+ ˆσ ˆΩ(k)
726
+ 2 e−iα(1)
727
+ j
728
+ ˆσ
729
+ (14)
730
+ ׈Ω(k)
731
+ 3 eiα(2)
732
+ j
733
+ ˆσ ˆΩ(k)
734
+ 4 e−iα(2)
735
+ j
736
+ ˆσ�
737
+ ,
738
+ with ˆΩ(k)
739
+ 1,2,3,4 representing multi-qubits operators which
740
+ do not depend neither upon α(1)
741
+ j
742
+ nor α(2)
743
+ j . It is important
744
+ to stress that g(α(1)
745
+ j , α(2)
746
+ j ) can be computed using the
747
+ same circuit of Fig. 2, by simply replacing the phases αj
748
+ of ˆVQA(⃗α) and ˆV †
749
+ QA(⃗α) with α(1)
750
+ j
751
+ and α(2)
752
+ j
753
+ respectively.
754
+ Notice finally that exploiting the identity Eq. (12) we can
755
+ write
756
+ ∂C(⃗α, ⃗β)
757
+ ∂αj
758
+ =
759
+ ∂g(α(1)
760
+ j , αj)
761
+ ∂α(1)
762
+ j
763
+ �����
764
+ α(1)
765
+ j
766
+ =αj
767
+ +
768
+ ∂g(αj, α(2)
769
+ j )
770
+ ∂α(2)
771
+ j
772
+ �����
773
+ α(2)
774
+ j
775
+ =αj
776
+ (15)
777
+ = g(αj + π
778
+ 4 , αj) − g(αj − π
779
+ 4 , αj)
780
+ + g(αj, αj + π
781
+ 4 ) − g(αj, αj − π
782
+ 4 ) ,
783
+ which shows that computing the gradient of C(⃗α, ⃗β) with
784
+ respect to αj simply accounts to evaluate the circuit
785
+ that express g(α(1)
786
+ j , α(2)
787
+ j ) for four distinct values of the
788
+ parameters.
789
+ C.
790
+ Noise model
791
+ The scheme presented so far can in principle be applied
792
+ to arbitrary classes of noises. In our research however
793
+ we focused on a specific model that has been extensively
794
+ studied in the literature producing explicit examples of
795
+ efficient QEC solutions which can be used as a theoretical
796
+ benchmark for our variational search. Specifically we
797
+ assume Q and A to be respectively a single qubit register
798
+ (k = 1) and a two qubit register (n = 3), globally affected
799
+ by a given species of single-qubit noise [76, 77]. These
800
+ transformations can be represented in terms of a LCPT
801
+ map of the form
802
+ ΦQA(· · · ) =
803
+ n
804
+
805
+ ℓ=0
806
+ ˆK(ℓ)
807
+ QA · · · ˆK(ℓ)†
808
+ QA ,
809
+ (16)
810
+
811
+ 6
812
+ Q
813
+ NOISE
814
+ io
815
+ d l
816
+ est
817
+ , t
818
+ noise,
819
+ ep
820
+ tsmth
821
+ A
822
+ =
823
+ 3
824
+ K(3)
825
+ QA
826
+ Figure 4. Circuital implementation of the noise element of
827
+ Fig. 2: here ˆK(ℓ)
828
+ QA are weighted unitaries of Eq. (17).
829
+ Xπ/2
830
+ Xπ/4
831
+ Q
832
+ A
833
+ Xπ/2
834
+ B
835
+ Q
836
+ A
837
+ Xπ/2
838
+ Xπ/4
839
+ Xπ/4
840
+ Zπ/2
841
+ Xπ/4
842
+ Xπ/4
843
+ Xπ/4
844
+ Xπ/4
845
+ Xπ/2
846
+ Xπ/2
847
+ Xπ/2
848
+ Zπ/2
849
+ Zπ/2
850
+ Figure 5. Circuital implementations of the ideal transforma-
851
+ tions ˆVQA(⃗α) (left) and ˆ
852
+ WQAB(⃗β) (right) which allow for exact
853
+ noise suppression of a single-qubit bit-flip noise model [i.e. (16)
854
+ with ˆσ(ℓ) = ˆσ(ℓ)
855
+ 1 ] using a quantum register B with r = 2 qubit.
856
+ Here H represent Hadamard gates, while the control-element
857
+ are C-NOT gates.
858
+ with Kraus operators [71]
859
+ ˆK(0)
860
+ QA :=
861
+
862
+ 1 − p ˆ1QA ,
863
+ ˆK(ℓ)
864
+ QA :=
865
+ � p
866
+ n ˆσ(ℓ) ,
867
+ (17)
868
+ where for ℓ ∈ {1, · · · , n}, ˆσ(ℓ) is the Pauli operator acting
869
+ on the ℓ-th qubit of QA which defines the noise species
870
+ we have selected.
871
+ For instance in the case we choose
872
+ to describe phase-flip noise then ˆσ(ℓ) = ˆσ(ℓ)
873
+ 3 , while for
874
+ describing bit-flip we have ˆσ(ℓ) = ˆσ(ℓ)
875
+ 1 . Explicit exam-
876
+ ples of ˆVQA, ˆWQAB which allow for exact suppression
877
+ of the noise (F(V, W) = 1) are shown in Fig. 5. No-
878
+ tice that by construction the circuit parametrization of
879
+ ˆVQA(⃗α), ˆWQAB(⃗β) given in Fig. 2 include such gates as
880
+ special solution: accordingly if properly guided by an
881
+ efficient cost function, our numerical VQA search has a
882
+ chance to find the solution of Fig. 5.
883
+ IV.
884
+ RESULTS
885
+ In this section we study the impact of the cost func-
886
+ tion on the efficiency of the optimization algorithm of
887
+ Sec. III. Assuming the single-qubit noise model detailed
888
+ in Sec. III C and taking B to be a r = 2 qubit register,
889
+ we run two distinct numerical searches: the first obtained
890
+ 7
891
+ 0
892
+ 500
893
+ 1,000
894
+ 1,500
895
+ 2,000
896
+ 0
897
+ 10
898
+ 20
899
+ 30
900
+ 40
901
+ Iterations
902
+ 1 � F
903
+ W1
904
+ 0.82
905
+ 0.83
906
+ 0.84
907
+ 0.85
908
+ 0.86
909
+ 0.87
910
+ 0
911
+ 50
912
+ 100
913
+ 150
914
+ Average fidelity
915
+ 1 � F
916
+ W1
917
+ Figure 7.
918
+ Comparison of the the input-output average fi-
919
+ delity (3) attainable by running our optimization algorithm us-
920
+ ing the cost function C(fid)(~↵, ~�) (blue data) and C(wass)(~↵, ~�)
921
+ (orange data). Here the error model is a single-qubit bit-flip
922
+ noise (ˆ� = ˆ�1 in (16)) with p = 0.8. The no error correction
923
+ threshold (5) of this scheme is F 0 ⇡ 0.822 – orange peak in
924
+ the fidelity plot, up to numerical precision. Only the runs
925
+ that produced a fidelity of at least F 0 have been included.
926
+ For the C(fid)(~↵, ~�) this is 0.2%, while for C(wass)(~↵, ~�) this
927
+ corresponds to 29.6%.
928
+ to begin with it succeeds in overcoming the threshold F 0
929
+ in one third of the simulations (specifically 40.6% for the
930
+ phase-flip noise model and 29.6% for the bit-flip noise
931
+ model). Furthermore the algorithm reach convergency
932
+ with a number of iterations which are typically smaller
933
+ than those required by C(fid)(~↵, ~�).
934
+ V.
935
+ CONCLUSIONS
936
+ TO BE REWRITTEN To summarize, we have
937
+ shown a variational quantum algorithm that allows finding
938
+ the most suitable error correction procedure for a specific
939
+ noise on quantum hardware. We compared the perfor-
940
+ mance of two di↵erent versions of this algorithm using two
941
+ di↵erent cost functions, the fidelity and an approximation
942
+ of the Wasserstein distance of order one. We compared
943
+ the di↵erence in speed and the ability to obtain a useful
944
+ solution between the two algorithms, finding really di↵er-
945
+ ent trends between the two optimization procedures. The
946
+ optimization process based on the fidelity su↵ers greatly
947
+ from the phenomenon of the barren plateaus, leading to
948
+ very slow convergence or no convergence at all, while the
949
+ algorithm based on the W1 approximation allows us to
950
+ find the configurations that correct the errors, at least in
951
+ the examples that we explored. The results obtained are
952
+ still not enough to use this method as a silver bullet to
953
+ handle this problem, but show a clear improvement and
954
+ allow us to explore further improvements of these meth-
955
+ ods, like using di↵erent algorithms for the minimization
956
+ process, like stochastic gradient descent or higher-order
957
+ algorithms like Newton or pseudo-Newton algorithms.
958
+ Given that the gradient can be expressed only with the
959
+ cost function evaluated in a small number of circuits that
960
+ di↵er only for the parameter choice, allows us to compute
961
+ the gradient of the cost function on the same hardware
962
+ that will be used for the correction procedure. Moreover,
963
+ simulating this circuit may be di�cult because of the
964
+ exponential scaling of the dimension of the Hilbert space
965
+ of a set of qubits, but this problem does not apply when
966
+ all the circuit is built on hardware, gaining a quantum
967
+ advantage.
968
+ Moreover, given that the gradient of the cost function
969
+ can be expressed in terms of the same cost function, the
970
+ same procedure can be iterated to compute the exact Hes-
971
+ sian of the cost function and then apply a second-order
972
+ method like the Newton method as a descent algorithm.
973
+ However, this has not been done because the circuits that
974
+ we marked as useful have a relatively big number of pa-
975
+ rameters, and computing the hessian scales quadratically
976
+ with this number, leading to intractable computations. A
977
+ second-order pseudo-newton method may improve conver-
978
+ gence speed once the algorithm has got near convergence
979
+ and this is a good idea for future developments.
980
+ Acknowledgments
981
+ FZ and VG acknowledge financial support by MIUR
982
+ (Ministero dell’ Istruzione, dell’ Universit`a della Ricerca)
983
+ by PRIN 2017 Taming complexity via Quantum Strate-
984
+ gies: a Hybrid Integrated Photonic approach (QUSHIP)
985
+ Id.
986
+ 2017SRN-BRK, and via project PRO3 Quantum
987
+ Pathfinder. GDP is a member of the “Gruppo Nazionale
988
+ per la Fisica Matematica (GNFM)” of the “Istituto
989
+ Nazionale di Alta Matematica “Francesco Severi” (IN-
990
+ (M)
991
+ Number of simulations
992
+ Number of simulations
993
+ C(fid)
994
+ C(wass)
995
+ C(fid)
996
+ C(wass)
997
+ F0
998
+ Figure 6.
999
+ Comparison of the the input-output average fi-
1000
+ delity (3) attainable by running our optimization algorithm us-
1001
+ ing the cost function C(fid)(⃗α, ⃗β) (blue data) and C(wass)(⃗α, ⃗β)
1002
+ (orange data). Here the error model is a single-qubit bit-flip
1003
+ noise (ˆσ = ˆσ1 in (16)) with p = 0.8. The no error correction
1004
+ threshold (5) of this scheme is F 0 ≈ 0.822 – orange peak in
1005
+ the fidelity plot, up to numerical precision. Only the runs
1006
+ that produced a fidelity of at least F 0 have been included.
1007
+ For the C(fid)(⃗α, ⃗β) this is 0.2%, while for C(wass)(⃗α, ⃗β) this
1008
+ corresponds to 29.6%.
1009
+ by identifying C(⃗α, ⃗β) with C(fid)(⃗α, ⃗β) and the second
1010
+ choosing instead C(wass)(⃗α, ⃗β). Results are reported in
1011
+ Figs. 6 and 7 for two different choices of the noise mod-
1012
+ els (16), i.e. phase-flip and bit-flip. For both we compare
1013
+ the input-output average fidelity (3) at the end of the
1014
+ procedure obtained with the two different cost functions,
1015
+ and the number of iterations M needed for convergence.
1016
+ Regarding this last quantity we set a maximum value
1017
+ Mmax equal to 2000 before convergence and we chose this
1018
+
1019
+ 7
1020
+ limit mainly with practical choices like the maximum time
1021
+ for the simulation, enforcing that a single run does not
1022
+ require more than a few hours of computational time:
1023
+ in case the algorithm fails to reach the convergency we
1024
+ simply stop the numerical search (this is the reason for
1025
+ the peak at the end of the upper orange plot in Fig. 7).
1026
+ The plots report only the simulations that manage to
1027
+ achieve an average fidelity that is greater or equal than
1028
+ no-correction threshold bound F 0.
1029
+ The first thing to observe is that for both noise mod-
1030
+ els, C(fid)(⃗α, ⃗β) has problem in reaching the do-nothing
1031
+ threshold F 0: the probability of success being 2.6% for
1032
+ the phase-flip case of Fig. 7 and only 0.2% for the bit-flip
1033
+ case of Fig. 6 (for both noise models the total number of
1034
+ simulations analyzed was 500). Observe also that in this
1035
+ last case the algorithm never yields average input-output
1036
+ fidelity values strictly larger than F 0 and that, even in
1037
+ those cases, it requires a number M of iterations which
1038
+ saturate the maximum allow value Mmax (blue peak in the
1039
+ upper plot of Fig. 7). C(was)(⃗α, ⃗β) performs definitely bet-
1040
+ ter: to begin with it succeeds in overcoming the threshold
1041
+ F 0 in one third of the simulations (specifically 40.6% for
1042
+ the phase-flip noise model and 29.6% for the bit-flip noise
1043
+ model). Furthermore the algorithm reach convergency
1044
+ with a number of iterations which are typically smaller
1045
+ than those required by C(fid)(⃗α, ⃗β).
1046
+ To better enlighten the differences between the two cost
1047
+ functions, we proceeded with further simulations, whose
1048
+ results are summarized in Fig. 8. The idea here is to run a
1049
+ two-step optimization process composed by two sequences
1050
+ of runs: in the first run we start the optimization proce-
1051
+ dure from a random point in the parameter space (⃗α, ⃗β)
1052
+ with one of the two cost functions (say C(fid)(⃗α, ⃗β)), up
1053
+ to convergence; after that we start a second optimization
1054
+ run using the other cost function (say C(wass)(⃗α, ⃗β)) but
1055
+ assuming as initial condition for the parameters the final
1056
+ point reached by the first run. The plots report the dif-
1057
+ ference in fidelity between the second and the first run:
1058
+ when we start using the C(wass)(⃗α, ⃗β) in the first run, the
1059
+ fidelity cannot further improve the result that is already
1060
+ found, and this is represented by the fact that the best
1061
+ improvement is of the order of 10−5; on the contrary if
1062
+ we started employing C(fid)(⃗α, ⃗β) in the first run, the use
1063
+ of C(wass)(⃗α, ⃗β) in the second run typically yields sub-
1064
+ stantial improvements of the performance4. Moreover, we
1065
+ sampled some single descent processes and plotted the
1066
+ cost in function of the iteration. When we move from
1067
+ y w
1068
+ cte
1069
+ vec
1070
+ 0
1071
+ 500
1072
+ 1,000
1073
+ 1,500
1074
+ 2,000
1075
+ 0
1076
+ 20
1077
+ 40
1078
+ 60
1079
+ Iterations
1080
+ 1 � F
1081
+ W1
1082
+ 0.85
1083
+ 0.9
1084
+ 0.95
1085
+ 1
1086
+ 0
1087
+ 50
1088
+ 100
1089
+ 150
1090
+ Average fidelity
1091
+ 1 � F
1092
+ W1
1093
+ (M)
1094
+ Number of simulations
1095
+ Number of simulations
1096
+ C(fid)
1097
+ C(wass)
1098
+ C(fid)
1099
+ C(wass)
1100
+ F0
1101
+ Figure 7.
1102
+ Comparison of the the input-output average fi-
1103
+ delity (3) attainable by running our optimization algorithm us-
1104
+ ing the cost function C(fid)(⃗α, ⃗β) (blue data) and C(wass)(⃗α, ⃗β)
1105
+ (orange data). Here the error model is a single-qubit phase-flip
1106
+ noise (ˆσ = ˆσ3 in (16) with p = 0.8. The no error correction
1107
+ threshold (5) of this scheme is F 0 ≈ 0.822 – orange peak in
1108
+ the fidelity plot, up to numerical precision. Only the runs
1109
+ that produced a fidelity of at least F 0 have been included.
1110
+ For the C(fid)(⃗α, ⃗β) this is 2.6%, while for C(wass)(⃗α, ⃗β) this
1111
+ corresponds to 40.6%.
1112
+ fidelity to W1, the descent part after the change of cost
1113
+ function is qualitatively indistinguishable from starting
1114
+ from a random point.
1115
+ 4 It has to be said that in few cases the figure of merit is worse
1116
+ after the second optimization – see the negative bar in right panel
1117
+ of Fig. 8. This is due to the fact that when using C(wass)(⃗α, ⃗β)
1118
+ we are not maximizing the fidelity but minimizing a function
1119
+ whose stationary point corresponds to the maximum of the latter:
1120
+ accordingly the final point of convergence for C(wass)(⃗α, ⃗β) can
1121
+ be slightly off mark in terms of fidelity. This is not a problem
1122
+ because these two functions do not have a constant ratio, and we
1123
+ checked that the inequalities between them are still satisfied.
1124
+
1125
+ 8
1126
+ 0
1127
+ 1
1128
+ 2
1129
+ 3
1130
+ 4
1131
+ 5
1132
+ ·10−5
1133
+ 0
1134
+ 100
1135
+ 200
1136
+ 300
1137
+ 400
1138
+ 500
1139
+ Average fidelity
1140
+ Number of simulations
1141
+ 0
1142
+ 0.2
1143
+ 0.4
1144
+ 0.6
1145
+ 0.8
1146
+ 0
1147
+ 10
1148
+ 20
1149
+ 30
1150
+ 40
1151
+ 50
1152
+ Average fidelity
1153
+ Number of simulations
1154
+ Figure 8. Improvement of simulations when changing the cost function in a two run optimization process that uses different cost
1155
+ functions to drive the descent algorithm. In the left plot, we started the descent on a random initial point, ran the optimization
1156
+ using C(wass)(⃗α, ⃗β) as cost function until convergence and then we started the descent algorithm again but using C(fid)(⃗α, ⃗β)
1157
+ as cost function, starting from the final point of the previous descent. In the right part, the roles of the two cost functions
1158
+ are inverted (we start using C(fid)(⃗α, ⃗β) and then we use C(wass)(⃗α, ⃗β)). The histograms represent the difference in average
1159
+ input-output fidelity (2) after the change of cost function, namely the difference between the fidelity achieved after the second
1160
+ descent and the fidelity after the first descent (positive values correspond to improved performances).Please notice the scale
1161
+ difference on the x-axis between the left and right plot.
1162
+ V.
1163
+ CONCLUSIONS
1164
+ To summarize, we have shown a variational quantum al-
1165
+ gorithm that allows finding the most suitable error correc-
1166
+ tion procedure for a specific noise on quantum hardware.
1167
+ We compared the performance of two different versions
1168
+ of this algorithm using two different cost functions, the
1169
+ fidelity and an approximation of the quantum Wasser-
1170
+ stein distance of order one. We compared the difference
1171
+ in speed and the ability to obtain a useful solution be-
1172
+ tween the two algorithms, finding really different trends
1173
+ between the two optimization procedures. The optimiza-
1174
+ tion process based on the fidelity suffers greatly from the
1175
+ phenomenon of the barren plateaus, leading to very slow
1176
+ convergence or no convergence at all, while the algorithm
1177
+ based on the quantum W1 distance allows us to find the
1178
+ configurations that correct the errors in the examples
1179
+ that we explored. The obtained results show a clear im-
1180
+ provement and allow us to explore further improvements
1181
+ of these methods, as using different algorithms for the
1182
+ minimization process, e.g. stochastic gradient descent or
1183
+ higher-order algorithms like Newton or pseudo-Newton
1184
+ algorithms.
1185
+ Given that the gradient can be expressed only with
1186
+ the cost function evaluated in a small number of circuits
1187
+ that differ only in the parameter choice, the gradient of
1188
+ the cost function can be computed on the same hardware
1189
+ that will be used for the correction procedure. Moreover,
1190
+ simulating this circuit may be difficult because of the
1191
+ exponential scaling of the dimension of the Hilbert space
1192
+ of a set of qubits, but this problem does not apply when
1193
+ all the circuit is built on hardware, gaining a quantum
1194
+ advantage. For the same reason, the same procedure
1195
+ can be iterated to compute the exact Hessian of the cost
1196
+ function and then apply a second-order method like the
1197
+ Newton method as a descent algorithm. However, this
1198
+ has not been done because the circuits that we marked
1199
+ as useful have a relatively big number of parameters,
1200
+ and computing the hessian scales quadratically with this
1201
+ number, leading to intractable computations.
1202
+ Acknowledgments
1203
+ FZ and VG acknowledge financial support by MIUR
1204
+ (Ministero dell’ Istruzione, dell’ Universit`a della Ricerca)
1205
+ by PRIN 2017 Taming complexity via Quantum Strate-
1206
+ gies: a Hybrid Integrated Photonic approach (QUSHIP)
1207
+ Id.
1208
+ 2017SRN-BRK, and via project PRO3 Quantum
1209
+ Pathfinder. GDP is a member of the “Gruppo Nazionale
1210
+ per la Fisica Matematica (GNFM)” of the “Istituto
1211
+ Nazionale di Alta Matematica “Francesco Severi” (IN-
1212
+ dAM)”.
1213
+ VI.
1214
+ BIBLIOGRAPHY
1215
+ [1] J. W. Z. Lau, K. H. Lim, H. Shrotriya, and L. C. Kwek,
1216
+ Nisq computing: where are we and where do we go?,
1217
+ AAPPS Bulletin 32, 27 (2022).
1218
+
1219
+ 9
1220
+ [2] J. Preskill, Quantum computing and the entanglement
1221
+ frontier (2012).
1222
+ [3] J. Preskill, Quantum computing in the NISQ era and
1223
+ beyond, Quantum 2, 79 (2018).
1224
+ [4] N. Gisin, G. Ribordy, W. Tittel, and H. Zbinden, Quan-
1225
+ tum cryptography, Reviews of Modern Physics 74, 145
1226
+ (2002).
1227
+ [5] H.-K. Lo, M. Curty, and K. Tamaki, Secure quantum key
1228
+ distribution, Nature Photonics 8, 595 (2014).
1229
+ [6] K. Banaszek, Optimal receiver for quantum cryptogra-
1230
+ phy with two coherent states, Physics Letters A 253, 12
1231
+ (1999).
1232
+ [7] S. Pirandola, U. L. Andersen, L. Banchi, M. Berta,
1233
+ D. Bunandar, R. Colbeck, D. Englund, T. Gehring,
1234
+ C. Lupo, C. Ottaviani, J. L. Pereira, M. Razavi, J. S.
1235
+ Shaari, M. Tomamichel, V. C. Usenko, G. Vallone, P. Vil-
1236
+ loresi, and P. Wallden, Advances in quantum cryptogra-
1237
+ phy, Adv. Opt. Photon. 12, 1012 (2020).
1238
+ [8] F. Cavaliere, E. Prati, L. Poti, I. Muhammad, and
1239
+ T. Catuogno, Secure quantum communication technolo-
1240
+ gies and systems: From labs to markets, Quantum Reports
1241
+ 2, 80 (2020).
1242
+ [9] E. Knill, R. Laflamme, R. Martinez, and C. Negrevergne,
1243
+ Implementation of the five qubit error correction bench-
1244
+ mark, arXiv preprint quant-ph/0101034 (2001).
1245
+ [10] A. S. Fletcher, P. W. Shor, and M. Z. Win, Channel-
1246
+ adapted quantum error correction for the amplitude damp-
1247
+ ing channel, IEEE Transactions on Information Theory
1248
+ 54, 5705 (2008).
1249
+ [11] R. L. Kosut, A. Shabani, and D. A. Lidar, Robust quan-
1250
+ tum error correction via convex optimization, Phys. Rev.
1251
+ Lett. 100, 020502 (2008).
1252
+ [12] S. Taghavi, R. L. Kosut, and D. A. Lidar, Channel-
1253
+ optimized quantum error correction, IEEE Transactions
1254
+ on Information Theory 56, 1461 (2010).
1255
+ [13] M. Chiani and L. Valentini, Short Codes for Quantum
1256
+ Channels With One Prevalent Pauli Error Type, IEEE
1257
+ Journal on Selected Areas in Information Theory 1, 480
1258
+ (2020).
1259
+ [14] J. Koch, T. M. Yu, J. Gambetta, A. A. Houck, D. I.
1260
+ Schuster, J. Majer, A. Blais, M. H. Devoret, S. M. Girvin,
1261
+ and R. J. Schoelkopf, Charge-insensitive qubit design
1262
+ derived from the cooper pair box, Physical Review A 76,
1263
+ 042319 (2007).
1264
+ [15] M. J. Peterer, S. J. Bader, X. Jin, F. Yan, A. Kamal, T. J.
1265
+ Gudmundsen, P. J. Leek, T. P. Orlando, W. D. Oliver,
1266
+ and S. Gustavsson, Coherence and decay of higher energy
1267
+ levels of a superconducting transmon qubit, Phys. Rev.
1268
+ Lett. 114, 010501 (2015).
1269
+ [16] S. Boixo, S. V. Isakov, V. N. Smelyanskiy, R. Babbush,
1270
+ N. Ding, Z. Jiang, M. J. Bremner, J. M. Martinis, and
1271
+ H. Neven, Characterizing quantum supremacy in near-
1272
+ term devices, Nature Physics 10.1038/s41567-018-0124-x
1273
+ (2018).
1274
+ [17] M. Cerezo, A. Arrasmith, R. Babbush, S. C. Benjamin,
1275
+ S. Endo, K. Fujii, J. R. McClean, K. Mitarai, X. Yuan,
1276
+ L. Cincio, and et al., Variational quantum algorithms,
1277
+ Nature Reviews Physics 3, 625–644 (2021).
1278
+ [18] J. Tilly, H. Chen, S. Cao, D. Picozzi, K. Setia, Y. Li,
1279
+ E. Grant, L. Wossnig, I. Rungger, G. H. Booth, and
1280
+ J. Tennyson, The variational quantum eigensolver: a
1281
+ review of methods and best practices (2021).
1282
+ [19] S. Hadfield, Z. Wang, B. O’Gorman, E. Rieffel, D. Ven-
1283
+ turelli, and R. Biswas, From the quantum approximate
1284
+ optimization algorithm to a quantum alternating operator
1285
+ ansatz, Algorithms 12, 34 (2019).
1286
+ [20] P.
1287
+ D.
1288
+ Johnson,
1289
+ J.
1290
+ Romero,
1291
+ J.
1292
+ Olson,
1293
+ Y.
1294
+ Cao,
1295
+ and A. Aspuru-Guzik, Qvector:
1296
+ an algorithm for
1297
+ device-tailored
1298
+ quantum
1299
+ error
1300
+ correction
1301
+ (2017),
1302
+ arXiv:1711.02249 [quant-ph].
1303
+ [21] J. R. McClean, S. Boixo, V. N. Smelyanskiy, R. Bab-
1304
+ bush, and H. Neven, Barren plateaus in quantum neural
1305
+ network training landscapes, Nature Communications
1306
+ 10.1038/s41467-018-07090-4 (2018).
1307
+ [22] M. Cerezo, A. Sone, T. Volkoff, L. Cincio, and P. J.
1308
+ Coles, Cost function dependent barren plateaus in shallow
1309
+ parametrized quantum circuits, Nature Communications
1310
+ 10.1038/s41467-021-21728-w (2021).
1311
+ [23] G. Monge, M´emoire sur la th´eorie des d´eblais et des
1312
+ remblais (Me´emoires de l’Acade´emie royale des sciences
1313
+ de Paris vol 1781, 1781) p. 625–704.
1314
+ [24] L. V. Kantorovich, On the translocation of masses, Jour-
1315
+ nal of Mathematical Sciences 133, 1381 (2006).
1316
+ [25] L. Ambrosio, Gradient flows in metric spaces and in the
1317
+ spaces of probability measures, and applications to fokker-
1318
+ planck equations with respect to log-concave measures,
1319
+ Bollettino dell’Unione Matematica Italiana 1, 223 (2008).
1320
+ [26] G. Peyr´e and M. Cuturi, Computational optimal trans-
1321
+ port: With applications to data science, Foundations and
1322
+ Trends® in Machine Learning 11, 355 (2019).
1323
+ [27] A. M. Vershik, Long history of the monge-kantorovich
1324
+ transportation problem, The Mathematical Intelligencer
1325
+ 35, 1 (2013).
1326
+ [28] E. A. Carlen and J. Maas, An analog of the 2-Wasserstein
1327
+ metric in non-commutative probability under which the
1328
+ Fermionic Fokker–Planck equation is gradient flow for
1329
+ the entropy, Communications in Mathematical Physics
1330
+ 331, 887 (2014).
1331
+ [29] E. A. Carlen and J. Maas, Gradient flow and entropy
1332
+ inequalities for quantum Markov semigroups with detailed
1333
+ balance, Journal of Functional Analysis 273, 1810 (2017).
1334
+ [30] E. A. Carlen and J. Maas, Non-commutative calculus,
1335
+ optimal transport and functional inequalities in dissipative
1336
+ quantum systems, Journal of Statistical Physics 178, 319
1337
+ (2020).
1338
+ [31] C. Rouz´e and N. Datta, Concentration of quantum states
1339
+ from quantum functional and transportation cost inequal-
1340
+ ities, Journal of Mathematical Physics 60, 012202 (2019).
1341
+ [32] N. Datta and C. Rouz´e, Relating relative entropy, opti-
1342
+ mal transport and Fisher information: A quantum HWI
1343
+ inequality, Annales Henri Poincar´e 21, 2115 (2020).
1344
+ [33] T. Van Vu and Y. Hasegawa, Geometrical Bounds of the
1345
+ Irreversibility in Markovian Systems, Phys. Rev. Lett.
1346
+ 126, 010601 (2021).
1347
+ [34] M. Wirth, A dual formula for the noncommutative trans-
1348
+ port distance, Journal of Statistical Physics 187, 1 (2022).
1349
+ [35] L. Gao, M. Junge, and N. LaRacuente, Fisher informa-
1350
+ tion and logarithmic sobolev inequality for matrix-valued
1351
+ functions, Annales Henri Poincar´e 21, 3409 (2020).
1352
+ [36] Y. Chen, T. T. Georgiou, L. Ning, and A. Tannenbaum,
1353
+ Matricial Wasserstein-1 distance, IEEE control systems
1354
+ letters 1, 14 (2017).
1355
+ [37] E. K. Ryu, Y. Chen, W. Li, and S. Osher, Vector and
1356
+ matrix optimal mass transport: theory, algorithm, and
1357
+ applications, SIAM Journal on Scientific Computing 40,
1358
+ A3675 (2018).
1359
+ [38] Y. Chen, T. T. Georgiou, and A. Tannenbaum, Matrix
1360
+ optimal mass transport: a quantum mechanical approach,
1361
+
1362
+ 10
1363
+ IEEE Transactions on Automatic Control 63, 2612 (2018).
1364
+ [39] Y. Chen, T. T. Georgiou, and A. Tannenbaum, Wasser-
1365
+ stein geometry of quantum states and optimal transport
1366
+ of matrix-valued measures, in Emerging Applications of
1367
+ Control and Systems Theory (Springer, 2018) pp. 139–150.
1368
+ [40] J. Agredo, A Wasserstein-type distance to measure devi-
1369
+ ation from equilibrium of quantum Markov semigroups,
1370
+ Open Systems & Information Dynamics 20, 1350009
1371
+ (2013).
1372
+ [41] J. Agredo, On exponential convergence of generic quan-
1373
+ tum Markov semigroups in a Wasserstein-type distance,
1374
+ International Journal of Pure and Applied Mathematics
1375
+ 107, 909 (2016).
1376
+ [42] K. Ikeda, Foundation of quantum optimal transport and
1377
+ applications, Quantum Information Processing 19, 25
1378
+ (2020).
1379
+ [43] F. Golse, C. Mouhot, and T. Paul, On the mean field and
1380
+ classical limits of quantum mechanics, Communications
1381
+ in Mathematical Physics 343, 165 (2016).
1382
+ [44] E. Caglioti, F. Golse, and T. Paul, Towards Opti-
1383
+ mal Transport for Quantum Densities, arXiv:2101.03256
1384
+ 10.48550/ARXIV.2101.03256 (2021).
1385
+ [45] F. Golse, The quantum N-body problem in the mean-field
1386
+ and semiclassical regime, Philosophical Transactions of
1387
+ the Royal Society A: Mathematical, Physical and Engi-
1388
+ neering Sciences 376, 20170229 (2018).
1389
+ [46] F. Golse and T. Paul, The Schr¨odinger equation in the
1390
+ mean-field and semiclassical regime, Archive for Rational
1391
+ Mechanics and Analysis 223, 57 (2017).
1392
+ [47] F. Golse and T. Paul, Wave packets and the quadratic
1393
+ Monge–Kantorovich distance in quantum mechanics,
1394
+ Comptes Rendus Mathematique 356, 177 (2018).
1395
+ [48] E. Caglioti, F. Golse, and T. Paul, Quantum optimal
1396
+ transport is cheaper, Journal of Statistical Physics 181,
1397
+ 149 (2020).
1398
+ [49] S. Friedland, M. Eckstein, S. Cole, and K. ˙Zyczkowski,
1399
+ Quantum Monge-Kantorovich Problem and Transport
1400
+ Distance between Density Matrices, Phys. Rev. Lett. 129,
1401
+ 110402 (2022).
1402
+ [50] S. Cole, M. Eckstein, S. Friedland, and K. ˙Zyczkowski,
1403
+ Quantum
1404
+ Optimal
1405
+ Transport,
1406
+ arXiv:2105.06922
1407
+ 10.48550/ARXIV.2105.06922 (2021).
1408
+ [51] R. Duvenhage, Optimal quantum channels, Phys. Rev. A
1409
+ 104, 032604 (2021).
1410
+ [52] R. Bistro´n, M. Eckstein, and K. ˙Zyczkowski, Monotonicity
1411
+ of the quantum 2-Wasserstein distance, arXiv:2204.07405
1412
+ 10.48550/ARXIV.2204.07405 (2022).
1413
+ [53] T. Van Vu and K. Saito, Thermodynamic Unification of
1414
+ Optimal Transport: Thermodynamic Uncertainty Rela-
1415
+ tion, Minimum Dissipation, and Thermodynamic Speed
1416
+ Limits, arXiv preprint arXiv:2206.02684 (2022).
1417
+ [54] R. Duvenhage, Quadratic Wasserstein metrics for von
1418
+ Neumann algebras via transport plans, arXiv:2012.03564
1419
+ 10.48550/ARXIV.2012.03564 (2020).
1420
+ [55] R.
1421
+ Duvenhage,
1422
+ Wasserstein
1423
+ distance
1424
+ between
1425
+ non-
1426
+ commutative
1427
+ dynamical
1428
+ systems,
1429
+ arXiv:2112.12532
1430
+ 10.48550/ARXIV.2112.12532 (2021).
1431
+ [56] R. Duvenhage, S. Skosana, and M. Snyman, Extend-
1432
+ ing quantum detailed balance through optimal transport,
1433
+ arXiv preprint arXiv:2206.15287 (2022).
1434
+ [57] G. De Palma and D. Trevisan, Quantum optimal transport
1435
+ with quantum channels, Annales Henri Poincar´e 22, 3199
1436
+ (2021).
1437
+ [58] R. Duvenhage and M. Snyman, Balance between quantum
1438
+ Markov semigroups, Annales Henri Poincar´e 19, 1747
1439
+ (2018).
1440
+ [59] J. Agredo and F. Fagnola, On quantum versions of the
1441
+ classical Wasserstein distance, Stochastics 89, 910 (2017).
1442
+ [60] K. ˙Zyczkowski and W. Slomczynski, The Monge distance
1443
+ between quantum states, Journal of Physics A: Mathe-
1444
+ matical and General 31, 9095 (1998).
1445
+ [61] K. ˙Zyczkowski and W. Slomczynski, The Monge metric
1446
+ on the sphere and geometry of quantum states, Journal
1447
+ of Physics A: Mathematical and General 34, 6689 (2001).
1448
+ [62] I. Bengtsson and K. ˙Zyczkowski, Geometry of Quantum
1449
+ States: An Introduction to Quantum Entanglement (Cam-
1450
+ bridge University Press, 2017).
1451
+ [63] G. De Palma, M. Marvian, D. Trevisan, and S. Lloyd,
1452
+ The quantum wasserstein distance of order 1, IEEE Trans-
1453
+ actions on Information Theory 67, 6627 (2021).
1454
+ [64] B. T. Kiani, G. D. Palma, M. Marvian, Z.-W. Liu, and
1455
+ S. Lloyd, Learning quantum data with the quantum earth
1456
+ mover’s distance, Quantum Science and Technology 7,
1457
+ 045002 (2022).
1458
+ [65] R. W. Hamming, Error detecting and error correcting
1459
+ codes, The Bell System Technical Journal 29, 147 (1950).
1460
+ [66] L.
1461
+ Kim,
1462
+ S.
1463
+ Lloyd,
1464
+ and
1465
+ M.
1466
+ Marvian,
1467
+ Hamilto-
1468
+ nian
1469
+ Quantum
1470
+ Generative
1471
+ Adversarial
1472
+ Networks
1473
+ 10.48550/ARXIV.2211.02584 (2022).
1474
+ [67] D. Herr, B. Obert, and M. Rosenkranz, Anomaly detection
1475
+ with variational quantum generative adversarial networks,
1476
+ Quantum Science and Technology 6, 045004 (2021).
1477
+ [68] E. R. Anschuetz and B. T. Kiani, Beyond Barren Plateaus:
1478
+ Quantum Variational Algorithms Are Swamped With
1479
+ Traps, arXiv:2205.05786 10.48550/ARXIV.2205.05786
1480
+ (2022).
1481
+ [69] B.
1482
+ Coyle,
1483
+ Machine learning applications for noisy
1484
+ intermediate-scale quantum computers, Ph.D. thesis, Uni-
1485
+ versity of Edinburgh (2022).
1486
+ [70] S. Chakrabarti, H. Yiming, T. Li, S. Feizi, and X. Wu,
1487
+ Quantum wasserstein generative adversarial networks,
1488
+ in Advances in Neural Information Processing Systems
1489
+ (2019) pp. 6781–6792.
1490
+ [71] M. A. Nielsen and I. L. Chuang, Quantum Computation
1491
+ and Quantum Information (Cambridge University Press,
1492
+ 2000).
1493
+ [72] E. B. Vinberg, Linear representations of groups (Boston:
1494
+ Birkhauser Verlag, 1989).
1495
+ [73] C. Dankert, R. Cleve, J. Emerson, and E. Livine, Ex-
1496
+ act and approximate unitary 2-designs and their applica-
1497
+ tion to fidelity estimation, Physical Review A 80, 012304
1498
+ (2009).
1499
+ [74] J. Nocedal and S. J. Wright, Numerical Optimization, 2nd
1500
+ ed. (Springer, New York, NY, USA, 2006).
1501
+ [75] M. Schuld, V. Bergholm, C. Gogolin, J. Izaac, and N. Kil-
1502
+ loran, Evaluating analytic gradients on quantum hard-
1503
+ ware, Physical Review A 99, 032331 (2019).
1504
+ [76] D. Gottesman, An introduction to quantum error correc-
1505
+ tion and fault-tolerant quantum computation (2009).
1506
+ [77] E. Knill, R. Laflamme, R. Martinez, and C. Negrevergne,
1507
+ Benchmarking quantum computers: The five-qubit error
1508
+ correcting code, Physical Review Letters 86, 5811 (2001).
1509
+
1dE4T4oBgHgl3EQfzQ2-/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
29FLT4oBgHgl3EQfrS9R/content/2301.12143v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29c7c51f162fcf7eb05465d4e96fc26b38c2cc158035824c619f7747d9ce58c6
3
+ size 817441
29FLT4oBgHgl3EQfrS9R/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7791215c4175f8c697b4c6a1909f1207301cc596d0d78f50d72c6ee0917c0a7c
3
+ size 509456
39FQT4oBgHgl3EQf3zah/content/tmp_files/2301.13429v1.pdf.txt ADDED
@@ -0,0 +1,940 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.13429v1 [math.AP] 31 Jan 2023
2
+ ON STRICHARTZ ESTIMATE FOR MANY BODY SCHR¨ODINGER
3
+ EQUATION IN THE WAVEGUIDE SETTING
4
+ ZEHUA ZHAO
5
+ Contents
6
+ 1.
7
+ introduction
8
+ 1
9
+ 2.
10
+ Preliminaries
11
+ 4
12
+ 3.
13
+ The proof of Theorem 1.1
14
+ 6
15
+ 4.
16
+ The proof of Theorem 1.6
17
+ 8
18
+ 5.
19
+ Further remarks
20
+ 9
21
+ References
22
+ 9
23
+ Abstract. In this short paper, we prove Strichartz estimates for N-body Schr¨odinger
24
+ equations in the waveguide manifold setting (i.e. on semiperiodic spaces Rm ×Tn
25
+ where m ≥ 3), provided that interaction potentials are small enough (depending
26
+ on the number of the particles and the universal constants, not on the initial
27
+ data). The proof combines both the ideas of Tzvetkov-Visciglia [29] and Hong
28
+ [17]. As an immediate application, the scattering asymptotics for this model is
29
+ also obtained. This result extends Hong [17] to the waveguide case.
30
+ Keywords: Strichartz estimate, many body Schr¨odinger equations, scattering, waveg-
31
+ uide manifolds
32
+ Mathematics Subject Classification (2020) Primary: 35Q55; Secondary: 35R01,
33
+ 37K06, 37L50.
34
+ 1. introduction
35
+ 1.1. Background and Motivations. Let d = m + n, m ≥ 3, n ≥ 1 and N ≥ 1. We
36
+ consider the many body Schr¨odinger equations in the waveguide setting as follows,
37
+ (1.1)
38
+ (i∂t + HN)u(x1, ...xN) = 0,
39
+ u(0, x1, ...xN) = u0(x1, ...xN) ∈ L2
40
+ x1,...xN,
41
+ where HN = ∆x − VN = �N
42
+ α=1 ∆xα − �
43
+ 1≤α<β≤N V (xα − xβ),
44
+ and α-th particle xα ∈ Rm × Tn for any α ∈ [1, ..., N].
45
+ From physical explanations, N ≥ 1 indicates the number of particles in a quantum
46
+ system (which is often very large) and the interacting potentials of form V (xα −
47
+ xβ) indicates the interactions of any two particles, which depends on their relative
48
+ distance. Moreover, the product spaces of form Rm × Tn is known as semi-periodic
49
+ space or waveguide manifold.
50
+ d = m + n is the whole dimension while m is the
51
+ dimension for the Euclidean component and n is the dimension for the tori component.
52
+ When N = 1, initial value problem(1.1) is exactly the standard nonlinear Schr¨odinger
53
+ equation (NLS) with a potential, which has been well studied (in the Euclidean case,
54
+ i.e. replacing Rm×Tn by Rd). It is also known as ‘the one-body case’ and the research
55
+ on the decay properties has a long history (see the Introduction of [17], the survey
56
+ [25] and the references therein). In this paper, we mainly concern the general case
57
+ (N ≥ 1 can be arbitrarily large), i.e. the many body Schr¨odinger case, which will
58
+ involve some new difficulties than the single body case such as the issue of interacting
59
+ potentials.
60
+ 1
61
+
62
+ 2
63
+ ZEHUA ZHAO
64
+ The purpose of this paper is to investigate time decay properties of solutions to
65
+ the N-body Schr¨odinger equation (1.1) in the waveguide setting. In particular, we
66
+ discuss the Strichartz-type estimate and the scattering behavior for (1.1). We note
67
+ that the Euclidean case of (1.1) has been studied in [17]. (See also [7] for a recently
68
+ result which deals with the two body case via the scheme of [20].)
69
+ We intend to
70
+ generalize [17] to the waveguide case. That is one main motivation of this paper.
71
+ Another motivation is the recent developments for the topic: ‘Long time behavior for
72
+ NLS on waveguides’ so the author is interested in combining both of ‘waveguides’ and
73
+ ‘many body Schr¨odinger equations’ together, i.e. studying the estimates and the long
74
+ time behavior for many body Schr¨odinger equations on waveguides. We will briefly
75
+ mention the background for ‘NLS on waveguides’ in the next paragraph.
76
+ Waveguide manifolds of form Rm ×Tn are of particular interest in nonlinear optics
77
+ of telecommunications. Generally, well-posedness theory and long time behavior of
78
+ NLS are hot topics in the area of dispersive equations and have been studied widely
79
+ in recent decades. Naturally, the Euclidean case is first treated and the theory, at
80
+ least in the defocusing setting, has been well established.
81
+ We refer to [9, 10, 21]
82
+ for some typical Euclidean results. Moreover, we refer to [5, 4, 14, 15, 16, 18, 19,
83
+ 22, 31, 32, 34, 35, 36] with regard to the torus and waveguide settings. (See also
84
+ [24, 27, 33] for other dispersive equations on waveguides.) One may roughly think
85
+ of the waveguide case as the “intermediate point” between the Euclidean case and
86
+ the torus case since the waveguide manifold is a product of Euclidean spaces and the
87
+ tori. The techniques used in Euclidean and torus settings are frequently combined
88
+ and applied to the waveguides problems. At last, we refer to [2, 11, 28] for some
89
+ classical textbooks on the study of NLS.
90
+ Since the current paper concerns the estimates and the PDE-level of (1.1) rather
91
+ than the mathematical physics level, we will not mention too much for the background
92
+ of the many body problems/equations from physical perspectives. We refer to the
93
+ Introductions of [3, 6, 7, 8, 12, 26] and the references therein for more information.
94
+ To the authors’ best knowledge, the current paper is the first result towards un-
95
+ derstanding long time dynamics for the many body Schr¨odinger equations within the
96
+ context of waveguides.
97
+ As last, we note that, as in [17], we need to assume some smallness for the potential
98
+ V and this smallness does not depend on the initial data (only depends on the particle
99
+ number N and the universal constant).
100
+ 1.2. The statement of main results. Now we are ready to state the two main
101
+ results of this paper. We start with the Strichartz estimate as follows since the other
102
+ one is an application of it.
103
+ Theorem 1.1 (Strichartz estimate). Let m ≥ 3, n ≥ 1 and 1 < p < 2. There exists
104
+ a small number ǫ such that if ∥V ∥
105
+ L
106
+ d
107
+ 2 ,∞
108
+ y
109
+ L2z
110
+
111
+ ǫ
112
+ N 2 , then
113
+ (1.2)
114
+ ∥1[0,+∞)e−itHN u0∥V p
115
+ ∆x ≲ ∥u0∥L2x.
116
+ Remark 1.2. Here V p
117
+ ∆x-norm (known as variation spaces) is introduced by Koch-
118
+ Tataru [23] (see Section 2 for discussions). See also [15, 16, 18, 19] for more information
119
+ and some other applications.
120
+ In viewing of the properties of V p
121
+ ∆x-type spaces, Theorem 1.1 directly implies
122
+ Corollary 1.3. Let m ≥ 3 and n ≥ 1. There exists a small number ǫ such that if
123
+ ∥V ∥
124
+ L
125
+ d
126
+ 2 ,∞
127
+ y
128
+ L2z
129
+
130
+ ǫ
131
+ N 2 , then for any m-dimensional admissible pair (q, r) and 1 ≤ α ≤ N,
132
+ we have
133
+ (1.3)
134
+ ∥e−itHNu0∥Lq
135
+ tLryαL2zαL2
136
+ ˆxα ≲ ∥u0∥L2x,
137
+
138
+ MANY BODY SCHR ¨ODINGER EQUATION ON WAVEGUIDE MANIFOLDS
139
+ 3
140
+ where ˆxα is the N − 1 spatial variables except the α-th variable xα, i.e.,
141
+ (1.4)
142
+ ˆxα = (x1, ...xα−1, xα+1..., xN) ∈ Rd(N−1),
143
+ and xα is the α-th variable with Euclidean component yα and tori component zα, i.e.,
144
+ (1.5)
145
+ xα = (yα, zα) ∈ Rm × Tn.
146
+ Moreover, for any mN-dimensional admissible pair (q, r), we have
147
+ (1.6)
148
+ ∥e−itHNu0∥Lq
149
+ tLryL2z ≲ ∥u0∥L2x,
150
+ where y is for the whole Euclidean component (mN-dimensional) and z is for the
151
+ whole tori component nN-dimensional.
152
+ Remark 1.4. See Theorem 1.1 and Theorem 1.2 in [17] for the Euclidean case. We will
153
+ give the proof for Corollary 1.3 after the proof of Theorem 1.1 in the end of Section
154
+ 3.
155
+ Remark 1.5. As shown above, the formulation of the Strichartz estimates for (1.1)
156
+ combines both the ideas of [29] and [17]. As in [29] (see also [30]), we fix the tori
157
+ component by using L2-norm. (In other words, one decomposes the function along
158
+ the tori direction and derive the Strichartz estimate using the dispersion from the Eu-
159
+ clidean direction.) As in [17], we fix other particles by only considering the dispersion
160
+ of one certain particle. Thus, we consider the dispersion of the Euclidean component
161
+ of one particle; fixing other particles and the tori component of this particle by using
162
+ L2-norm.
163
+ As a direct application of Theorem 1.1, we show the scattering behavior for an
164
+ N-body Schr¨odinger operator with rough small interactions in the following sense,
165
+ Theorem 1.6 (Scattering). Let m ≥ 3, n ≥ 1 and 1 < p < 2. let ǫ be a small
166
+ constant given in Theorem 1.1. If ∥V ∥
167
+ L
168
+ m
169
+ 2 ,∞
170
+ y
171
+ L2
172
+ z ≤
173
+ ǫ
174
+ N 2 , then for each u0 ∈ L2
175
+ x, there
176
+ exist scattering states u± such that
177
+ (1.7)
178
+ lim
179
+ t→±∞
180
+ ��e−itHN u0 − eit∆xu±
181
+ ��
182
+ L2x = 0.
183
+ Remark 1.7. We note that for the tori case of (1.1), the scattering behavior is not
184
+ expected due to the lack of dispersion, though a Strichartz estimate can still be
185
+ possibly obtained with suitable modifications. We leave it for interested readers.
186
+ Remark 1.8. For the above results, the dimension of the tori component n ≥ 1 does
187
+ not matter. (When n = 0, it is exactly the Euclidean case [17]). However, if one
188
+ considers the long time dynamics of a nonlinear problem on waveguide manifolds,
189
+ the dimension of the tori component often matters a lot. In general, the difficulty of
190
+ the critical NLS problem on waveguide manifolds increases if the whole dimension is
191
+ increased or if the Euclidean component is decreased. See the Introductions in [18, 19]
192
+ for more information.
193
+ Remark 1.9. To be more general, the tori component Tn in (1.1) can be generalized
194
+ to a compact Riemannian manifold M such that Theorem 1.1, Corollary 1.3 and
195
+ Theorem 1.6 still hold.
196
+ Next, we briefly introduce the main strategy of the proofs for Theorem 1.1, Corol-
197
+ lary 1.3 and Theorem 1.6. In fact, the proofs for Corollary 1.3 and Theorem 1.6 are
198
+ standard and less complicated. Corollary 1.3 follows from Theorem 1.1 (see Section
199
+ 3) according to the transfer principle of the function space V p
200
+ ∆x. Theorem 1.6 also
201
+ follows from Theorem 1.1 (see Section 4), together with some other basic estimates
202
+ like in [17]. Thus we will focus on the proof of Theorem 1.1 as follows.
203
+ The proof of Theorem 1.1 (Strichartz estimate) is based on the properties of func-
204
+ tion space V p
205
+ ∆x and a perturbation method (see Section 3). The main idea is: one
206
+
207
+ 4
208
+ ZEHUA ZHAO
209
+ establishes nonlinear estimate for one arbitrary interacting potential (treating it as
210
+ a perturbation) and then sum them up. The key estimate is Proposition 3.1 which
211
+ deals with one arbitrary interacting potential by regarding it as a forcing term. With
212
+ the help of it, one can handle all of the interacting potentials by treating them as
213
+ perturbations. Eventually, according to the smallness assumption, one can use per-
214
+ turbation method to show the Strichartz estimate as desired. Compared with the
215
+ single potential case (N = 1), the interacting potentials (involves rotations) cause dif-
216
+ ficulties thus the ‘rotation flexible’ function space V p
217
+ ∆x is needed; compared with the
218
+ Euclidean analogue ([17]), the new difference is the appearance of the tori component.
219
+ 1.3. Structure of this paper. The rest of the article is organized as follows. In
220
+ Section 2, we discuss function spaces and some estimates for this model; in Section
221
+ 3, we give the proof for Theorem 1.1 (Strichartz estimate); in Section 4, we give the
222
+ proof for Theorem 1.6 (scattering asymptotics); in Section 5, we give a few further
223
+ remarks on this research line.
224
+ 1.4. Notations. We write A ≲ B to say that there is a constant C such that A ≤ CB.
225
+ We use A ≃ B when A ≲ B ≲ A. Particularly, we write A ≲u B to express that
226
+ A ≤ C(u)B for some constant C(u) depending on u. We use C for universal constants
227
+ and N for the number of particles.
228
+ We say that the pair (p, q) is d-(Strichartz) admissible if
229
+ (1.8)
230
+ 2
231
+ p + d
232
+ q = d
233
+ 2,
234
+ 2 ≤ p, q ≤ ∞
235
+ (p, q, d) ̸= (2, ∞, 2).
236
+ Throughout this paper, we regularly refer to the spacetime norms
237
+ (1.9)
238
+ ∥u∥Lp
239
+ t Lq
240
+ z(It×Rm×Tn) =
241
+ ��
242
+ It
243
+ ��
244
+ Rm×Tn |u(t, z)|qdz
245
+ � p
246
+ q
247
+ dt
248
+ � 1
249
+ p
250
+ .
251
+ Similarly we can define the composition of three Lp-type norms like Lp
252
+ tLq
253
+ xL2
254
+ y. As in
255
+ Theorems 1.1, 1.6 and Corollary 1.3, we use Lr,s for the Lorentz norm (see [1]). One
256
+ can define the composition of norms in a similar way.
257
+ As stated in the above Theorems, in general, we refer to x for the whole spatial
258
+ variable; y for the whole Euclidean spatial variable; z for the whole tori spatial vari-
259
+ able; xα for the α-th spatial variable; yα for the α-th Euclidean spatial variable; zα
260
+ for the α-th tori spatial variable for convenience.
261
+ Similar to the Euclidean case, function spaces such as V p
262
+ ∆ are also tightly involved.
263
+ we will discuss them in Section 2. (See also [17].)
264
+ To deal with the interacting potentials, we define the rotation operator Rαβ by
265
+ (1.10)
266
+ Rαβ(f(x1, ...xα−1, xα − xβ
267
+
268
+ 2
269
+ , xα+1...xβ−1, xα + xβ
270
+
271
+ 2
272
+ , xβ+1, ...xN)) = f(x1, ...xN).
273
+ Acknowledgment. The author was supported by the NSF grant of China (No.
274
+ 12101046, 12271032), Chinese overseas high-level young talents program (2022) and
275
+ the Beijing Institute of Technology Research Fund Program for Young Scholars. The
276
+ author has learned many body Schr¨odinger model and related background during his
277
+ postdoc career at University of Maryland (2019-2021). Thus he highly appreciates
278
+ Prof. M. Grillakis, Prof. M. Machedon and their group (Dr. J. Chong and Dr. X.
279
+ Huang) for related discussions, especially the paper of Hong [17].
280
+ 2. Preliminaries
281
+ In this section, we discuss function spaces and some estimates for the model (1.1).
282
+ See Section 2 to Section 4 in [17] for the Euclidean analogue.
283
+
284
+ MANY BODY SCHR ¨ODINGER EQUATION ON WAVEGUIDE MANIFOLDS
285
+ 5
286
+ First, similar to the Euclidean case, one can easily show: if the potential V is
287
+ small enough, then the Strichartz estimate for operator eit(∆x−V ) also holds for the
288
+ waveguide case as follows.
289
+ Lemma 2.1. Let m ≥ 3, n ≥ 1, and let c0 be the implicit constant given in Proposi-
290
+ tion 2.2. If ∥V ∥
291
+ L
292
+ d
293
+ 2 ,∞
294
+ y
295
+ L2z
296
+ <
297
+ 1
298
+ c0 , then
299
+ (2.1)
300
+ ∥eit(∆x−V )u0∥Lq
301
+ tLr
302
+ yL2
303
+ z(R×Rm×Tn) ≤
304
+ c0
305
+ 1 − c0∥V ∥
306
+ L
307
+ d
308
+ 2 ,∞
309
+ y
310
+ L2z
311
+ ∥u0∥L2y,z(Rm×Tn),
312
+ for all m-admissible pair (q, r).
313
+ As in the Euclidean case, to finish the proof of Lemma 2.1, recall the Strichartz
314
+ estimates in the waveguide setting as follows (see Proposition 2.1 in [29], in fact, this
315
+ result is more general since it concerns the compact Riemannian manifold case).
316
+ Proposition 2.2. For every n ≥ 1 and for every compact Riemannian manifold
317
+ M k
318
+ y , one considers functions f(x, y), F(x, y) on Rn ×M k
319
+ y , then the following estimate
320
+ holds:
321
+ (2.2) ∥eit∆x,yf∥Lp
322
+ t Lq
323
+ xL2y+
324
+ ��
325
+ � t
326
+ 0
327
+ ei(t−s)∆x,yF(s, x, y)ds
328
+ ��
329
+ Lp
330
+ t Lq
331
+ xL2
332
+ y ≲ ∥f∥L2x,y+∥F∥L ˜
333
+ p
334
+ t L˜
335
+ q
336
+ xL2y,
337
+ where (p, q) and (˜p, ˜q) are Strichartz admissible pairs.
338
+ Proof of Lemma 2.1. Since the potential is small, the proof is purely perturbative.
339
+ One can just use waveguide Strichartz estimate Proposition 2.2 to treat the potential
340
+ as a perturbation term provided the potential is small (the Duhamel’s formula and
341
+ the H¨older inequality are also used). Thus we omit the proof. See Theorem 2.1 in
342
+ [17] for the Euclidean analogue.
343
+
344
+ Next, we discuss Strichartz estimates with frozen spatial variables. (See Propo-
345
+ sition 1 in [17] and Theorem 3.1 in [3] for the Euclidean analogue.) The difference
346
+ is that: now we fix both of the tori component of a certain particle and the other
347
+ particles by using L2-norm. In other words, the ‘frozen spatial variables’ are the tori
348
+ component and the other particles. Standard dispersive estimate and an important
349
+ lemma in [20] which ‘lifts’ dispersive estimates to Strichartz estimates are used.
350
+ Proposition 2.3. Let m ≥ 3 and n ≥ 1. Then for any m-dimensional admissible
351
+ pair (q, r), (˜q, ˜r) and 1 ≤ α ≤ N, we have
352
+ (2.3)
353
+ ∥eit∆xu0∥Lq
354
+ tLr,2
355
+ yα L2zαL2
356
+ ˆxα ≲ ∥u0∥L2x,
357
+ (2.4)
358
+
359
+
360
+ R
361
+ e−is∆xF(s)ds∥L2x ≲ ∥F∥
362
+
363
+ q′
364
+ t L˜r′ ,2
365
+ yα L2zαL2
366
+ ˆxα
367
+ ,
368
+ and
369
+ (2.5)
370
+
371
+ � t
372
+ 0
373
+ e−i(t−s)∆xF(s)ds∥Lq
374
+ t Lr,2
375
+ yα L2zαL2
376
+ ˆxα ≲ ∥F∥
377
+
378
+ q′
379
+ t L˜r′ ,2
380
+ yα L2zαL2
381
+ ˆxα
382
+ ,
383
+ where
384
+ (2.6)
385
+ ˆxα = (x1, ...xα−1, xα+1..., xN) ∈ Rd(N−1),
386
+ and xα is the α-th variable with Euclidean component yα and tori component zα, i.e.,
387
+ (2.7)
388
+ xα = (yα, zα) ∈ Rm × Tn.
389
+ Proof. We consider a complex-valued function f(x) : RdN
390
+ x
391
+ → C in Lr,2
392
+ yα L2
393
+ zαL2
394
+ ˆxα with
395
+ the function-valued function f(yα; zα, ˆxα) in Lr,2
396
+ yα . We note that r ≥ 2. Using unitarity
397
+ property,
398
+ (2.8)
399
+ ∥eit∆xu0∥LryαL2zαL2
400
+ ˆxα = ∥eit∆yαu0∥LryαL2zαL2
401
+ ˆxα = ∥eit∆yαu0∥L2zαL2
402
+ ˆxαLryα.
403
+
404
+ 6
405
+ ZEHUA ZHAO
406
+ Then, by the standard dispersive estimate (for yα-direction which is m-dimensional)
407
+ (2.9)
408
+ ∥eit∆yαu0∥Lryα ≲
409
+ 1
410
+ |t|m( 1
411
+ 2 − 1
412
+ r ) ∥f∥Lr′
413
+ yα,
414
+ we obtain (the Minkowski allows one change the order of norms)
415
+ (2.10) ∥eit∆xu0∥LryαL2zαL2
416
+ ˆxα ≲
417
+ 1
418
+ |t|m( 1
419
+ 2 − 1
420
+ r ) ∥u0∥L2
421
+ zαL2
422
+ ˆxαLr′
423
+ yα ≲
424
+ 1
425
+ |t|m( 1
426
+ 2 − 1
427
+ r ) ∥u0∥Lr′
428
+ yαL2
429
+ zαL2
430
+ ˆxα.
431
+ The proposition follows from Theorem 10.1 in [20].
432
+
433
+ Now we briefly discuss the function spaces and corresponding estimates. They will
434
+ be essentially used in the following two sections. As mentioned in the end of Section
435
+ 3.3 in [17], Strichartz estimates with frozen spatial variables are still not sufficient to
436
+ complete the proof of Theorem 1.1 (Strichartz estimate) because of the interacting
437
+ potentials. That is why a space-time norm that plays the role of the rotated space-
438
+ time norm is needed. This part is almost the same as Section 4.1 in [17] with natural
439
+ modifications. We also refer to [15, 16, 23] for more details.
440
+ We note that the definitions and properties in Subsection 4.1. of [17] are general
441
+ enough which can be applied for our model in the waveguide setting naturally. They
442
+ construct function spaces with nice properties for a separable Hilbert space H and
443
+ self-adjoint operator S. In this paper, we can just choose H to be L2
444
+ x and S to be ∆x in
445
+ the waveguide setting, where x = (x1, ..., xN) and xα ∈ Rm × Tn for α ∈ {1, ..., N} as
446
+ in (1.1). Then the definitions and associated properties for our case will hold as well.
447
+ Thus we refer to Subsection 4.1. of [17] for the function spaces and corresponding
448
+ estimates/properties. For instance, we will use the following property of V p
449
+ ∆-space.
450
+ (It follows from the definition. See Proposition 2 in [17].)
451
+ (2.11)
452
+ ∥1[0,∞)]eit∆xu0∥V p
453
+ ∆x = ∥u0∥L2
454
+ x.
455
+ Moreover, the duality, the inclusion properties and the transference principle of V p
456
+ ∆-
457
+ space are also often used. (See Subsection 4.1. of [17])
458
+ 3. The proof of Theorem 1.1
459
+ In this section, we discuss the proof of Theorem 1.1 (Strichartz estimate). Corollary
460
+ 1.3 will also be obtained using the properties of function space V (xα−xβ). Like in [17],
461
+ we handle the potential terms by treating them as perturbations. The key estimate
462
+ is as follows,
463
+ Proposition 3.1. Let m ≥ 3, n ≥ 1 and 1 < p < 2. Consider u in the waveguide
464
+ setting as in Theorem 1.1. Then, we have
465
+ (3.1)
466
+ ��1[0,+∞)
467
+ � t
468
+ 0
469
+ ei(t−s)∆x(V (xα − xβ)u(s))ds
470
+ ��
471
+ V p
472
+ ∆ ≤ C∥V ∥
473
+ L
474
+ m
475
+ 2 ,∞
476
+ y
477
+ L2z∥u∥V p
478
+ ∆,
479
+ where C is for the universal constant.
480
+ Remark 3.2. Proposition 3.1 indicates that one can regard the potential terms as
481
+ perturbations. As we can see from the proof below, it suffices to consider one arbitrary
482
+ interacting potential V (xα − xβ) since the V p
483
+ ∆-norm is rotation-flexible.
484
+ Remark 3.3. See Proposition 4 in [17] for the Euclidean analogue. The main new
485
+ difference for the waveguide case is the appearance of the tori component.
486
+ Proof. For notational convenience, we denote
487
+ (3.2)
488
+ w = 1[0,∞)
489
+ � t
490
+ 0
491
+ ei(t−s)∆x(F(s))ds,
492
+ where F = V (xα −xβ)u(s) is treated as the forcing term (or say a perturbative term).
493
+
494
+ MANY BODY SCHR ¨ODINGER EQUATION ON WAVEGUIDE MANIFOLDS
495
+ 7
496
+ We will estimate w by the duality argument. Since we only expect w ∈ V p
497
+ −, not
498
+ w ∈ V p, we consider ˜w(t) = w(−t).
499
+ Similar to Proposition 4 of [17], using duality, it suffices to show that
500
+ (3.3)
501
+ J
502
+
503
+ j=1
504
+ ⟨a(tj−1), ˜w(j) − ˜w(tj−1)⟩L2x ≲ ∥V ∥
505
+ L
506
+ m
507
+ 2 ,∞
508
+ y
509
+ L2z∥u∥V p
510
+
511
+ for any fine partition of unity t = {tj}J
512
+ j=0 and any U p
513
+
514
+ -atom a(t) = �K
515
+ k=1 1(sk−1,sk)φk−1.
516
+ (We note that the U p
517
+
518
+ -space is the dual of the V p
519
+ ∆-space.)
520
+ Doing some standard simplifications as in Proposition 4 of [17] (expanding atoms
521
+ a in terms of φk), one can get a simpler sum
522
+ (3.4)
523
+ K
524
+
525
+ k=1
526
+ ⟨φk−1, ˜w(sk) − ˜w(sk−1)⟩L2x.
527
+ We further write it as
528
+ K
529
+
530
+ k=1
531
+ ⟨φk−1, ˜w(sk) − ˜w(sk−1)⟩L2x
532
+ (3.5)
533
+ = −
534
+ K
535
+
536
+ k=1
537
+ � −sk−1
538
+ −sk
539
+ ⟨φk−1, e−is∆x(F(s))⟩L2xds
540
+ (3.6)
541
+ = −
542
+ K
543
+
544
+ k=1
545
+ � −sk−1
546
+ −sk
547
+ ⟨eis∆xRφk−1, R(F(s))⟩L2xds
548
+ (3.7)
549
+ = −
550
+ K
551
+
552
+ k=1
553
+
554
+ R
555
+ ⟨eis∆xRφk−1, 1[−sk,−sk−1]R(F(s))⟩L2
556
+ xds,
557
+ (3.8)
558
+ where R denotes any rotation operator.
559
+ (It is just Rαβ for interacting potential
560
+ V (xα − xβ).) We want to control it by ∥V ∥
561
+ L
562
+ m
563
+ 2 ,∞
564
+ y
565
+ L2
566
+ z∥u∥V p
567
+ ∆.
568
+ Then, applying the H¨older inequality and the Strichartz estimate Proposition 2.3,
569
+ we estimate it by
570
+ K
571
+
572
+ k=1
573
+ ⟨φk−1, ˜w(sk) − ˜w(sk−1)⟩L2x
574
+ (3.9)
575
+
576
+ K
577
+
578
+ k=1
579
+ ∥eit∆Rφk−1∥
580
+ L2
581
+ tL
582
+ 2m
583
+ m−2 ,2
584
+
585
+ L2zαL2
586
+ ˆxα
587
+ ∥1[−sk,−sk−1]R(F(s))∥
588
+ L2
589
+ tL
590
+ 2m
591
+ m+2 ,2
592
+
593
+ L2zαL2
594
+ ˆxα
595
+ (3.10)
596
+
597
+ K
598
+
599
+ k=1
600
+ ∥φk−1∥L2
601
+ x∥V ∥
602
+ L
603
+ m
604
+ 2 ,∞
605
+ y
606
+ L2z∥1[−sk,−sk−1]R(u)∥
607
+ L2
608
+ tL
609
+ 2m
610
+ m−2 ,2
611
+
612
+ L2zαL2
613
+ ˆxα
614
+ (3.11)
615
+
616
+ K
617
+
618
+ k=1
619
+ ∥φk−1∥L2
620
+ x∥V ∥
621
+ L
622
+ m
623
+ 2 ,∞
624
+ y
625
+ L2z∥1[−sk,−sk−1](u)∥V p
626
+ ∆x
627
+ (3.12)
628
+ ≲ ∥V ∥
629
+ L
630
+ m
631
+ 2 ,∞
632
+ y
633
+ L2z
634
+ ��∥φk−1∥L2
635
+ x
636
+ ��
637
+ lp′ ·
638
+ ��∥1[−sk,−sk−1](u)∥V p
639
+ ∆x
640
+ ��
641
+ lp
642
+ (3.13)
643
+ ≲ ∥V ∥
644
+ L
645
+ m
646
+ 2 ,∞
647
+ y
648
+ L2z
649
+ ��∥1[−sk,−sk−1](u)∥V p
650
+ ∆x
651
+ ��
652
+ lp.
653
+ (3.14)
654
+ We note that we have used the inclusion property of discrete Lp spaces (i.e. lp-spaces).
655
+ (1 < p < 2 implies p
656
+ ′ > 2.)
657
+ To close the argument, now it remains to show that
658
+ (3.15)
659
+ ��∥1[−sk,−sk−1](u)∥V p
660
+ ∆x
661
+ ��
662
+ lp =
663
+ � K
664
+
665
+ k=1
666
+ ∥1[−sk,−sk−1)u∥p
667
+ V p
668
+ ∆x
669
+ � 1
670
+ p ≤ ∥u∥V p
671
+ ∆x.
672
+
673
+ 8
674
+ ZEHUA ZHAO
675
+ This estimate follows exactly as the Euclidean case (using the definition of V p
676
+ ∆x).
677
+ There is no difference in the waveguide setting. Thus the proof of Proposition 3.1 is
678
+ complete.
679
+
680
+ With the help of Proposition 3.1, we give the proof of Theorem 1.1 as follows. We
681
+ can now treat the potential terms as perturbations.
682
+ Proof. Applying Proposition 3.1 to the Duhamel formula for u = e−itHN u0, we have,
683
+ (3.16)
684
+ ∥1[0,+∞)u(t)∥V p
685
+ ∆ ≤ ∥u0∥L2x + N(N − 1)
686
+ 2
687
+ C∥V ∥
688
+ L
689
+ d
690
+ 2 ,∞
691
+ y
692
+ L2
693
+ z
694
+ ∥u∥V p
695
+ ∆.
696
+ Theorem 1.1 now follows from the smallness assumption of potential V . ( N(N−1)
697
+ 2
698
+ is
699
+ the number of interacting potentials.)
700
+
701
+ Corollary 1.3 follows from Theorem 1.1 in viewing of the following lemma:
702
+ Lemma 3.4 (Transference principle). Let d ≥ 1, 1 < p < 2, q ≥ 2 and X be a
703
+ Banach space. If a function u : R → X satisfies the bound
704
+ (3.17)
705
+ ∥e∆xu0∥Lq
706
+ tX ≲ ∥u0∥L2x,
707
+ then
708
+ (3.18)
709
+ ∥u∥Lq
710
+ tX ≲ ∥u∥V p
711
+ ∆x.
712
+ Remark 3.5. We note that the Bourgain spaces Xs,b (also known as Fourier restriction
713
+ space) enjoy the similar transfer principle (see [28] for more info.). As summarized in
714
+ [17], the Strichartz estimates in the V p
715
+ ∆x sharpen the bounds in Xs,b by 0+ in that
716
+ Strichartz estimates in the Xs,b space do not cover the endpoint Strichartz estimates,
717
+ while those in the V p
718
+ ∆x-space do.
719
+ See Proposition 3 in [17] for the proof. As a direct consequence, it shows that
720
+ the V p
721
+ ∆x-norm dominates the two Strichartz-type space-time norms in Corollary 1.3.
722
+ Thus, Corollary 1.3 follows from Theorem 1.1.
723
+ 4. The proof of Theorem 1.6
724
+ Now we are ready to discuss the proof of Theorem 1.6, i.e.
725
+ the scattering for
726
+ (1.1). Since we have established proper Strichartz-type estimate, the proof will follow
727
+ similarly as in [17]. For the sake of completeness, we include it as follows.
728
+ Without loss of generality, we only consider for the positive time. It suffices to
729
+ show that
730
+ (4.1)
731
+ u+ =
732
+ lim
733
+ t→+∞ e−it∆xe−itHN u0
734
+ exists in L2
735
+ x as t → ∞. Indeed, by the Duhamel formula
736
+ ∥e−it2∆e−it2HN u0 − e−it1∆e−it1HN u0∥L2x
737
+ (4.2)
738
+
739
+
740
+ 1≤α<β≤N
741
+ ��
742
+ � t2
743
+ t1
744
+ e−is∆x((V (xα − xβ))e−isHN u0)ds
745
+ ��
746
+ L2x.
747
+ (4.3)
748
+
749
+ MANY BODY SCHR ¨ODINGER EQUATION ON WAVEGUIDE MANIFOLDS
750
+ 9
751
+ It suffices to consider one single potential. According to Theorem 1.1 and Corollary
752
+ 1.3, we have that
753
+ ��
754
+ � t2
755
+ t1
756
+ e−is∆x((V (xα − xβ))e−isHN u0)ds
757
+ ��
758
+ L2x
759
+ (4.4)
760
+ =
761
+ ��Rαβ
762
+ � t2
763
+ t1
764
+ e−is∆x((V (xα − xβ))e−isHN u0)ds
765
+ ��
766
+ L2x
767
+ (4.5)
768
+ =
769
+ ��
770
+ � t2
771
+ t1
772
+ e−is∆x(V (
773
+
774
+ 2xα)(Rαβe−isHN u0))ds
775
+ ��
776
+ L2x
777
+ (4.6)
778
+ ≤ c0
779
+ ��V (
780
+
781
+ 2xα)(Rαβe−isHN u0)
782
+ ��
783
+ L2
784
+ t∈[t1,t2]L
785
+ 2d
786
+ d+2 ,2
787
+
788
+ L2zαL2
789
+ ˆxα
790
+ (4.7)
791
+ ≤ c0
792
+ 2 ∥V ∥
793
+ L
794
+ d
795
+ 2 ,∞
796
+ z
797
+ L2z
798
+ ��(Rαβe−isHN u0)
799
+ ��
800
+ L2
801
+ t∈[t1,t2]L
802
+ 2d
803
+ d−2 ,2
804
+
805
+ L2zαL2
806
+ ˆxα
807
+ → 0
808
+ (4.8)
809
+ as t1, t2 → ∞. Then we can see that the limit exists.
810
+ 5. Further remarks
811
+ In this section, we make a few more remarks for many body model (1.1) and
812
+ Theorems 1.1, 1.6 as follows.
813
+ 1. The main results in this paper and [17] are based on perturbative scheme which
814
+ are tightly dependent on the smallness assumption of the potentials. One may consider
815
+ removing the smallness assumption to prove Strichartz estimates like Theorem 1.1 or
816
+ Corollary 1.3. It may be hard to consider the general case thus the two body case
817
+ may be a good model to start with. (See [7] for the Euclidean case.)
818
+ 2.
819
+ It is also interesting to consider many body equation with a nonlinearity
820
+ F(t, x1, ..., xN) and study the long time behavior.
821
+ There are few general theories
822
+ and results regarding this topic, especially the scattering-type results. Also, it may
823
+ be hard to consider the general case thus the two body case may still be a good
824
+ model to start with. (The Λ-equation in the Hartree–Fock–Bogoliubov equations is
825
+ an example for the two body case, though it is in a coupled system which makes it
826
+ more complicated. See [6, 7].)
827
+ We also note that via the standard T -T ∗ argument and the Christ-Kiselev lemma,
828
+ one can obtain the inhomogeneous Strichartz analogue of Corollary 1.3 (excluding
829
+ the double endpoint case). (See [28].) With the help of it, one may obtain the local
830
+ well-posedness for (1.1) with a subcritical nonlinearity in the energy space. We leave
831
+ it ifor interested readers.
832
+ 3. One may also consider the tori analogue of (1.1) (replacing Rm × Tn by Td))
833
+ and obtain some estimates. The reason we consider the waveguide case is that we
834
+ intend to study the scattering behavior, which is not expected for the tori case.
835
+ 4. The results in the current paper is only about the estimates and the PDE-level
836
+ of many body Schr¨odinger equations. One may consider the many body Schr¨odinger
837
+ equations in the tori setting or waveguide setting from the perspectives of mathemat-
838
+ ical physics. (See [8, 12, 13] for examples.)
839
+ References
840
+ 1. J¨oran Bergh and J¨orgen L¨ofstr¨om, Interpolation spaces:
841
+ an introduction, vol. 223, Springer
842
+ Science & Business Media, 2012.
843
+ 2. T. Cazenave, Semilinear Schr¨odinger equations, Courant Lecture Notes in Mathematics, vol. 10,
844
+ New York University, Courant Institute of Mathematical Sciences, New York; American Math-
845
+ ematical Society, Providence, RI, 2003. MR 2002047
846
+ 3. Thomas Chen, Younghun Hong, and Nataˇsa Pavlovi´c, Global well-posedness of the nls system
847
+ for infinitely many fermions, Archive for rational mechanics and analysis 224 (2017), 91–123.
848
+
849
+ 10
850
+ ZEHUA ZHAO
851
+ 4. X. Cheng, Z. Guo, and Z. Zhao, On scattering for the defocusing quintic nonlinear Schr¨odinger
852
+ equation on the two-dimensional cylinder, SIAM J. Math. Anal. 52 (2020), no. 5, 4185–4237.
853
+ MR 4147586
854
+ 5. X. Cheng, Z. Zhao, and J. Zheng, Well-posedness for energy-critical nonlinear Schr¨odinger
855
+ equation on waveguide manifold, J. Math. Anal. Appl. 494 (2021), no. 2, Paper No. 124654, 14.
856
+ MR 4158753
857
+ 6. Jacky Chong, Xin Dong, Manossos Grillakis, Matei Machedon, and Zehua Zhao, Global uniform
858
+ in N estimates for solutions of a system of Hartree–Fock–Bogoliubov type in the case β < 1,
859
+ arXiv preprint arXiv:2203.05447 (2022).
860
+ 7. Jacky Chong, Manoussos Grillakis, Matei Machedon, and Zehua Zhao, Global estimates for the
861
+ hartree–fock–bogoliubov equations, Communications in Partial Differential Equations 46 (2021),
862
+ no. 10, 2015–2055.
863
+ 8. Jacky J Chong and Zehua Zhao, Dynamical Hartree–Fock–Bogoliubov approximation of inter-
864
+ acting bosons, Annales Henri Poincar´e, Springer, 2019, pp. 1–59.
865
+ 9. J. Colliander, M. Keel, G. Staffilani, H. Takaoka, and T. Tao, Global well-posedness and scatter-
866
+ ing for the energy-critical nonlinear Schr¨odinger equation in R3, Ann. of Math. (2) 167 (2008),
867
+ no. 3, 767–865. MR 2415387
868
+ 10. B. Dodson, Global well-posedness and scattering for the defocusing, L2-critical nonlinear
869
+ Schr¨odinger equation when d ≥ 3, J. Amer. Math. Soc. 25 (2012), no. 2, 429–463. MR 2869023
870
+ 11. Benjamin Dodson, Defocusing nonlinear schr¨odinger equations, vol. 217, Cambridge University
871
+ Press, 2019.
872
+ 12. M Grillakis and M Machedon, Pair excitations and the mean field approximation of interacting
873
+ bosons, I, Communications in Mathematical Physics 324 (2013), 601–636.
874
+ 13. M Grillakis and M36052901371 Machedon, Pair excitations and the mean field approximation
875
+ of interacting bosons, II, Communications in Partial Differential Equations 42 (2017), no. 1,
876
+ 24–67.
877
+ 14. Z. Hani and B. Pausader, On scattering for the quintic defocusing nonlinear Schr¨odinger equa-
878
+ tion on R × T2, Comm. Pure Appl. Math. 67 (2014), no. 9, 1466–1542. MR 3245101
879
+ 15. S. Herr, D. Tataru, and N. Tzvetkov, Global well-posedness of the energy-critical nonlinear
880
+ Schr¨odinger equation with small initial data in H1(T3), Duke Math. J. 159 (2011), no. 2, 329–
881
+ 349. MR 2824485
882
+ 16.
883
+ , Strichartz estimates for partially periodic solutions to Schr¨odinger equations in 4d and
884
+ applications, J. Reine Angew. Math. 690 (2014), 65–78. MR 3200335
885
+ 17. Younghun Hong, Strichartz estimates for n-body schr¨odinger operators with small potential in-
886
+ teractions, Discrete and Continuous Dynamical Systems 37 (2017), no. 10, 5355.
887
+ 18. A. D. Ionescu and B. Pausader, The energy-critical defocusing NLS on T3, Duke Math. J. 161
888
+ (2012), no. 8, 1581–1612. MR 2931275
889
+ 19.
890
+ , Global well-posedness of the energy-critical defocusing NLS on R × T3, Comm. Math.
891
+ Phys. 312 (2012), no. 3, 781–831. MR 2925134
892
+ 20. Markus Keel and Terence Tao, Endpoint strichartz estimates, American Journal of Mathematics
893
+ 120 (1998), no. 5, 955–980.
894
+ 21. C. E. Kenig and F. Merle, Global well-posedness, scattering and blow-up for the energy-critical,
895
+ focusing, non-linear Schr¨odinger equation in the radial case, Invent. Math. 166 (2006), no. 3,
896
+ 645–675. MR 2257393
897
+ 22. R. Killip and M. Vi¸san, Scale invariant Strichartz estimates on tori and applications, Math.
898
+ Res. Lett. 23 (2016), no. 2, 445–472. MR 3512894
899
+ 23. Herbert Koch and Daniel Tataru, A priori bounds for the 1d cubic nls in negative sobolev spaces,
900
+ International Mathematics Research Notices 2007 (2007), no. 9, rnm053–rnm053.
901
+ 24. Yongming Luo, Xueying Yu, Haitian Yue, and Zehua Zhao, On well-posedness results for the
902
+ cubic-quintic nls on T3, arXiv preprint (2023).
903
+ 25. Wilhelm Schlag, Dispersive estimates for schr¨odinger operators: a survey, Mathematical aspects
904
+ of nonlinear dispersive equations 163 (2005), 255–285.
905
+ 26. Israel M Sigal and Avy Soffer, The n-particle scattering problem: asymptotic completeness for
906
+ short-range systems, Annals of mathematics (1987), 35–108.
907
+ 27. Yannick Sire, Xueying Yu, Haitian Yue, and Zehua Zhao, On scattering for generalized nls on
908
+ waveguide manifolds, arXiv preprint arXiv:2207.00485 (2022).
909
+ 28. T. Tao, Nonlinear dispersive equations, CBMS Regional Conference Series in Mathematics, vol.
910
+ 106, Published for the Conference Board of the Mathematical Sciences, Washington, DC; by the
911
+ American Mathematical Society, Providence, RI, 2006, Local and global analysis. MR 2233925
912
+ 29. N. Tzvetkov and N. Visciglia, Small data scattering for the nonlinear Schr¨odinger equation on
913
+ product spaces, Comm. Partial Differential Equations 37 (2012), no. 1, 125–135. MR 2864809
914
+ 30.
915
+ , Well-posedness and scattering for nonlinear Schr¨odinger equations on Rd × T in the
916
+ energy space, Rev. Mat. Iberoam. 32 (2016), no. 4, 1163–1188. MR 3593518
917
+
918
+ MANY BODY SCHR ¨ODINGER EQUATION ON WAVEGUIDE MANIFOLDS
919
+ 11
920
+ 31. Kailong Yang and Zehua Zhao, On scattering asymptotics for the 2D cubic resonant system,
921
+ Journal of Differential Equations 345 (2023), 447–484.
922
+ 32. X. Yu, H. Yue, and Z. Zhao, Global Well-posedness for the focusing cubic NLS on the product
923
+ space R × T3, SIAM J. Math. Anal. 53 (2021), no. 2, 2243–2274. MR 4244536
924
+ 33. Xueying Yu, Haitian Yue, and Zehua Zhao, Global well-posedness and scattering for fourth-order
925
+ schr¨odinger equations on waveguide manifolds, arXiv preprint arXiv:2111.09651 (2021).
926
+ 34. Z. Zhao, Global well-posedness and scattering for the defocusing cubic Schr¨odinger equation on
927
+ waveguide R2 × T2, J. Hyperbolic Differ. Equ. 16 (2019), no. 1, 73–129. MR 3954678
928
+ 35.
929
+ , On scattering for the defocusing nonlinear Schr¨odinger equation on waveguide Rm × T
930
+ (when m = 2, 3), J. Differential Equations 275 (2021), 598–637. MR 4191335
931
+ 36. Z. Zhao and J. Zheng, Long time dynamics for defocusing cubic nonlinear Schr¨odinger equa-
932
+ tions on three dimensional product space, SIAM J. Math. Anal. 53 (2021), no. 3, 3644–3660.
933
+ MR 4277925
934
+ Zehua Zhao
935
+ Department of Mathematics and Statistics, Beijing Institute of Technology, Beijing,
936
+ China.
937
+ MIIT Key Laboratory of Mathematical Theory and Computation in Information Secu-
938
+ rity, Beijing, China.
939
+ Email address: [email protected]
940
+
39FQT4oBgHgl3EQf3zah/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
3tFAT4oBgHgl3EQflR3K/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d70f58558577d14341c505bad491fd7e53f9c3e22eb96fd97b857c90e4e0934a
3
+ size 3997741
4dFKT4oBgHgl3EQfRy0L/content/tmp_files/2301.11772v1.pdf.txt ADDED
@@ -0,0 +1,1563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 3-space orthogonal to uµ
2
+ 3-space orthogonal to uµ
3
+ Observer’s worldline, uµ = dxµ
4
+
5
+
6
+
7
+ x1
8
+ x2
9
+ x3
10
+ Before the wave passes
11
+ After the wave passes
12
+
13
+
14
+ arXiv:2301.11772v1 [gr-qc] 27 Jan 2023
15
+ Electromagnetic memory in arbitrary curved space-times
16
+ Susmita Jana1, ∗ and S. Shankaranarayanan1, †
17
+ 1Department of Physics, Indian Institute of Technology Bombay, Mumbai 400076, India
18
+ Abstract
19
+ The gravitational memory effect and its electromagnetic (EM) analog are potential probes in
20
+ the strong gravity regime. In the literature, this effect is derived for static observers at asymptotic
21
+ infinity. While this is a physically consistent approach, it restricts the space-time geometries for
22
+ which one can obtain the EM memory effect. To circumvent this, we evaluate the EM memory
23
+ effect for comoving observers (defined by the 4-velocity uµ) in arbitrary curved space-times. Using
24
+ the covariant approach, we split Maxwell’s equations into two parts — projected parallel to the 4-
25
+ velocity uµ and into the 3-space orthogonal to uµ. Further splitting the equations into 1+1+2-form,
26
+ we obtain master equation for the EM memory in an arbitrary curved space-time. We provide a
27
+ geometrical understanding of the contributions to the memory effect. We then obtain EM memory
28
+ for specific space-time geometries and discuss the salient features.
29
30
31
+ 1
32
+
33
+ I.
34
+ INTRODUCTION
35
+ LIGO-VIRGO-KAGRA has detected close to 100 gravitational wave (GW) sources. GW
36
+ signals emanating from a black hole or neutron star binaries have opened many new research
37
+ avenues in astronomy, cosmology, and fundamental physics [1–4]. GWs provide a unique
38
+ way to test gravity’s most extreme, non-linear regime in novel ways. The planned third-
39
+ generation ground-based detector (Cosmic Explorer and the Einstein Telescope) will allow
40
+ us to peer far deeper, and LISA will open a new observational window at low frequencies.
41
+ With more sensitive detectors shortly, the focus has been to understand the physical effects
42
+ of GWs. Gravitational wave memory is one such effect [5–13].
43
+ GW memory effects — physically observable phenomena that modify the state of
44
+ gravitational-wave detectors a little bit from their original undisturbed state — are one
45
+ of the key predictions of general relativity [6, 7, 9, 14]. GW memory effects can be divided
46
+ into two types [12, 13]: null memory that occurs when radiation or massless particles es-
47
+ cape from a system to null infinity, and ordinary memory that occurs when the detector
48
+ recoils relative to its initial center of mass frame. The GW memory is characterized as a
49
+ gravitational wave signal approaching a nonzero finite value. This aspect of the GW signal
50
+ is yet to be observed, although LISA is predicted to observe it [15].
51
+ Recently, it has been realized that the memory effect can be thought of as a vacuum
52
+ transition between two different states related by an asymptotic transformation [16, 17].
53
+ Since such asymptotic transformations also occur for other gauge theories, there has been
54
+ an intense activity to obtain analogous memory effects in other gauge theories [18–22]. Since
55
+ electromagnetic (EM) theory is the simplest of all gauge theories and can be a potential
56
+ probe, electromagnetic memory has received much attention [23–33]. Like in GW memory,
57
+ an EM wave generates a permanent change in the relative velocity of test-charged particles
58
+ attached to a detector in the 2-D surface perpendicular to the direction of propagation of the
59
+ wave while passing through the detector [cf. (Fig. 1)]. In other words, EM waves directly
60
+ displace test particles by giving them a momentum (kick), resulting in a relative velocity
61
+ change. This is different from GW memory as the GW does not displace test particles.
62
+ Instead, GW distorts the space-time geometry itself, which causes a change in separation
63
+ between two test particles.
64
+ Bieri and Garfinkle were the first to propose the memory effect due to electromagnetic
65
+ 2
66
+
67
+ x1
68
+ x2
69
+ x3
70
+ Before the wave passes
71
+ After the wave passes
72
+ FIG. 1. Electromagnetic memory effect that lies in the 2-D surface orthogonal to the direction of
73
+ the coming wave.
74
+ waves [18]. Like in GW memory, they showed that EM waves produce two types of momen-
75
+ tum kicks. In Ref. [19], Winicour showed the absence of memory effect generated by the
76
+ electromagnetic field coming from distant sources for a bound charge distribution and the
77
+ non-existence of memory effect due to the magnetic field.
78
+ In the case of GW memory, gravitational radiation must reach the detector. Likewise, EM
79
+ radiation also has to reach null infinity to generate null kick memory. Hence to calculate
80
+ EM memory, one needs to know the properties of the electric field and radiation at null
81
+ infinity [18]. More specifically, the original approach by Bieri and Garfinkle requires prior
82
+ knowledge about the behavior of the fields in asymptotic limits. It can be extended to
83
+ conformally flat space-times
84
+ [32, 34].
85
+ Also, the analysis does not provide any physical
86
+ understanding of why the EM memory has such a form in flat and conformally flat space-
87
+ times.
88
+ This leads us to the following questions: Can we derive a master equation for EM memory
89
+ in a generic curved space-time? What role does curved geometry play in EM memory? Can
90
+ we have a physical understanding of the various contributions to EM memory? This work
91
+ addresses these three questions using 1 + 3 covariant formalism [35–40].
92
+ 3
93
+
94
+ There are two reasons why covariant formalism is better suited to studying EM memory.
95
+ First, as mentioned earlier, when the EM wave propagates in a given spatial direction,
96
+ the net momentum experienced by the particle lies in the 2-D surface orthogonal to the
97
+ direction of propagation of the EM wave (for a pictorial representation, see Fig. 1). In other
98
+ words, the EM memory affects the test particle lying on the 2-D surface. Hence, it is more
99
+ natural to have a formalism that identifies such a dynamical 2-D surface and evaluates EM
100
+ memory. Second, like in fluid mechanics, we can observe the flow of EM radiation in two
101
+ ways. First, as in Refs. [18, 19], an asymptotic stationary observer monitors changes in
102
+ Electric and Magnetic fields of the incoming EM radiation. Second, a comoving observer
103
+ monitors changes in Electric and Magnetic fields. In fluid mechanics, these are referred to
104
+ as the Lagrangian and Eulerian descriptions of flow, respectively. It is well-known that the
105
+ Eulerian description is better suited for fluids and in cosmology [37, 38, 40].
106
+ In this work, we evaluate the memory effect using the 1+1+2 covariant formalism [37, 41–
107
+ 44]. The 1 + 1 + 2 decomposition of space-time is a natural extension of the 1 + 3 formalism
108
+ in which the three-space is further decomposed to a given spatial direction. This approach
109
+ is also referred to as semi-tetrad formalism [45–49]. The principle advantage is that we can
110
+ evaluate the net momentum (kick) vector on the 2-D surface for arbitrary space-time. Since
111
+ this affects all the test particles on the 2-D surface, we refer to this as memory vector. This
112
+ can also be understood using the fact that the electric and magnetic fields are transverse
113
+ to the direction of propagation of the EM wave. Using the 1 + 1 + 2 covariant formalism,
114
+ we obtain the master equation for the EM memory in arbitrary space-time. We provide
115
+ a geometrical understanding of the various contributions to the memory effect. We then
116
+ obtain the EM memory for specific space-times.
117
+ The rest of this work is organized as follows: In Sec. II, we provide an overview of the
118
+ two — 1+3 and 1+1+2 — covariant formalisms and obtain the key geometrical quantities.
119
+ Then, in Sec. III, we rewrite Maxwell’s equation in 1+3 and 1+1+2 covariant formalisms in
120
+ arbitrary space-time. Next, in Sec. IV, we obtain the master equation for the EM memory
121
+ in arbitrary space-time and discuss the key features. In Sec. V, we then obtain EM memory
122
+ for specific space-times and compare them with the known results in the literature. Finally,
123
+ in Sec. VI, we summarise our results and discuss possible future directions.
124
+ In this work, we use (−, +, +, +) metric signature and set c = 1/(4πǫ0) = 1. A dot
125
+ 4
126
+
127
+ denotes a derivative with respect to the proper time τ. A prime denote derivative w.r.t the
128
+ space-like vector nµ. For easy comparison, we follow the notations of Ref. [40].
129
+ II.
130
+ OVERVIEW OF COVARIANT FORMALISM
131
+ A covariant theory like general relativity does not favor any particular coordinates. How-
132
+ ever, splitting tensors in time and space is typically required for its physical meaning. Thus,
133
+ the splitting achieves this by rewriting Einstein’s equations as a set of constraint and evo-
134
+ lution equations in a three-dimensional framework. This allows for an intuitive evaluation
135
+ of the relevant physical system.
136
+ A choice of coordinates defines a threading of space-time into lines and a slicing into
137
+ hypersurfaces [50]. Thus, the splitting procedure can be carried out in two distinct ways:
138
+ First, by employing the so-called (3 + 1)− formalism or slicing of space-time [51]. Second,
139
+ by employing (1 + 3)− formalism, or threading of space-time [37, 38, 40]. In the (3 + 1)−
140
+ decomposition, the time is a label of space-like slices Σt with space coordinates xi.
141
+ In
142
+ contrast, in the (1+3)− splitting, the time-like world lines have coordinate τ and are labeled
143
+ by xµ. In the (3 + 1)− formulation, the construction only requires space-like hypersurfaces
144
+ and does not demand causality of the time curves. However, in the (1+3)− approach, every
145
+ tensor is split into the parallel and orthogonal directions to a time-like vector (curves).
146
+ Furthermore, it does not provide any condition on the causality of the spatial distances.
147
+ Though the two approaches provide different points of view, it has been shown that they
148
+ are equivalent for space-times with symmetries [50]. We use the covariant 1 + 3 formalism
149
+ in this work to obtain EM memory. As mentioned in the introduction, covariant formalism
150
+ provides a physical understanding of the origin of EM memory in arbitrary space-time.
151
+ A.
152
+ Covariant 1+3 Formalism
153
+ Heckmann, Schucking, and Raychaudhuri developed the covariant approach to General
154
+ relativity in the 1950s [35, 36] and was later used in different gravitational and cosmological
155
+ models [37–40]. To decompose the 4-D space-time in (1 + 3)− formalism, we introduce a
156
+ family of observers with worldlines tangent to a timelike 4-velocity vector uµ satisfy the
157
+ 5
158
+
159
+ 3-space orthogonal to uµ
160
+ 3-space orthogonal to uµ
161
+ Observer’s worldline, uµ = dxµ
162
+
163
+ FIG. 2. Visualisation of 1 + 3 formalism.
164
+ following:
165
+ uµ = dxµ
166
+ dτ ;
167
+ uµuµ = −1 ,
168
+ (1)
169
+ where τ is the proper time measured along the fundamental world line. See Fig. 2. Using
170
+ the 4-velocity (uµ) we can define the following projection tensors [38, 40]:
171
+
172
+ ν = −uµuν;
173
+
174
+ ν Uν
175
+ γ = Uµ
176
+ γ;
177
+
178
+ µ = 1
179
+ (2a)
180
+ hµν = gµν + uµuν;
181
+
182
+ ν hν
183
+ γ = hµ
184
+ γ;
185
+
186
+ µ = 3;
187
+ hµν uν = 0
188
+ (2b)
189
+ uµ, and hence Uµ ν, projects physical quantities parallel to the 4-velocity of the observer
190
+ and hµν projects quantities into the 3-space orthogonal to uµ. The tensor hµν provides the
191
+ metric properties of the instantaneous 3-space as well in the absence of rotation or vorticity.
192
+ In this formalism, the projection of the vector (V ν) orthogonal to uµ is defined as V<µ>.
193
+ Similarly, the trace-less part of a rank-2 tensor (Sαβ) projected into space orthogonal to uµ
194
+ 6
195
+
196
+ is defined as S<µν>. Mathematically, these are given by:
197
+ V<µ> := hµν V ν;
198
+ S<µν> :=
199
+
200
+ hµαhνβ − 1
201
+ 3hµνhαβ
202
+
203
+ Sαβ
204
+ (3)
205
+ The projection of the time derivative and orthogonal spatial derivative of any vector (V ν)
206
+ and tensor (Sαβ) are defined as:
207
+ ˙V <µ> := hµ
208
+ αuν∇ν V α;
209
+ Dα Sβγ := hµ
210
+ α hβ
211
+ ν hγ
212
+ ρ ∇µ Sνρ
213
+ (4)
214
+ The covariant derivative of uµ can be split into two parts: 1) directional derivative along
215
+ the tangent to the world line, 2) spatial derivative in the 3-space orthogonal to uν. This
216
+ can further be split into trace, traceless symmetric and anti-symmetric tensor:
217
+ ∇νuµ = Θ
218
+ 3 hµν + σµν + ωµν − ˙uµuν .
219
+ (5)
220
+ In the above equation, σµν is the symmetric expansion tensor that describes the distortion
221
+ in the matter flow, Θ corresponds to the expansion rate of the matter w.r.t the observer,
222
+ ωµν is the anti-symmetric vorticity tensor describing the rotation of the matter w.r.t a non-
223
+ rotating frame. The last term refers to the relativistic acceleration vector (the directional
224
+ derivative) ˙uµ = uν∇ν which corresponds to the degree to which the matter moves under
225
+ forces other than gravity plus inertia. Further, using the vorticity tensor, we can define the
226
+ following quantity called the vorticity vector:
227
+ ων = −1
228
+ 2ǫµναβωαβ uµ
229
+ (6)
230
+ where, ǫµναβ =
231
+ 1
232
+ √−gηµνρσ is fully antisymmetric tensor, ηµνρσ is Levi-Civita symbol whose
233
+ values are ±1 and we set η0123 = 1 = −η0123 [52]. The Levi-Civita 3-tensor is defined as:
234
+ ǫµνα ≡ ǫµναβuβ ,
235
+ (7)
236
+ and satisfies the following relations: ǫµνuν = 0 and ǫµναβ = 2
237
+
238
+ u[µǫν]αβ − ǫµν[αuβ] �
239
+ . The
240
+ square bracket w.r.t the indices refers to antisymmetrization.
241
+ B.
242
+ 1+1+2 covariant formalism
243
+ The 1 + 3-covariant formalism is well-suited for relativistic cosmology because, at the
244
+ largest observable scales, the universe is homogeneous and isotropic [38]. These symmetries
245
+ 7
246
+
247
+ allow the slicing or threading of the 4-D space-time manifold into a one-parameter family of
248
+ spacelike hypersurfaces corresponding to cosmic time. Interestingly, it is easy to show that
249
+ in the Friedmann-Lemaitre-Robertson-Walker (FLRW) background, all physical quantities
250
+ except for the volume expansion Θ and the energy density vanish.
251
+ Using the Stewart-
252
+ Walker lemma, in this formalism, it was possible to construct gauge invariant quantities
253
+ up to second order in cosmological perturbations [53, 54]. However, the 1 + 3-formalism is
254
+ not suited if the space-time is inhomogeneous, like spherical symmetry or space-times with
255
+ local rotational symmetry (LRS) [41]. In such cases, splitting the 3-space orthogonal to
256
+ the time-like congruence into one spacelike direction and a 2-space is apt [37]. Thus, the
257
+ 1 + 1 + 2 decomposition of space-time is a natural extension of the 1 + 3 formalism in which
258
+ the three-space is further decomposed to a given spatial direction. This approach is called
259
+ semi-tetrad formalism [45–49].
260
+ As mentioned in the Introduction, our interest is to evaluate the net momentum experi-
261
+ enced by a test particle after the electromagnetic wave passes through the space-time point.
262
+ In the covariant 1 + 3 formalism, the test particle is the fundamental time-like observer.
263
+ As depicted in (Fig. 1), when the EM wave propagates in a given spatial direction, the net
264
+ momentum experienced by the particle lies in the 2-D surface orthogonal to the direction
265
+ of propagation of the EM wave. In other words, the net momentum (kick) vector lies in the
266
+ 2-D surface. Thus, the net memory effect of the test particle will lie on the 2-D surface;
267
+ hence, we will refer to this as the memory vector. This can also be understood using the
268
+ fact that the electric and magnetic fields are transverse to the direction of propagation of
269
+ the EM wave. Thus, it is cogent to further split the 3-space to 1 + 2-space.
270
+ More specifically, choosing a generic space-like vector (nµ), we split the 3-space into 1 +
271
+ 2-space [41–44]. The space-like vector (nµ) satisfies the following conditions:
272
+ nµnµ = 1,
273
+ nµuµ = 0 .
274
+ Like in the 1 + 3-formalism, we project the vectors and tensors defined in 3-space along
275
+ the space-like direction (nµ) and into the 2-space that is orthogonal to nµ. Here again, the
276
+ projection tensor (˜hµν) need to be defined:
277
+ ˜hµν = hµν − nµnν;
278
+ ˜hµ
279
+ ν ˜hν
280
+ γ = ˜hµ
281
+ γ;
282
+ ˜hµ
283
+ µ = 2;
284
+ ˜hµν uν = 0;
285
+ ˜hµν nν = 0 .
286
+ (8)
287
+ All the vectors and tensors defined in the 3-space in the 1 + 3-formalism can be split into
288
+ 8
289
+
290
+ 1 + 2 form. For instance, an arbitrary space-like vector V µ (defined in the 3-space) can be
291
+ written as:
292
+ V µ = V nµ + V µ
293
+ (9)
294
+ where, V = V µnµ and V µ = ˜hµ νV ν. Similarly an arbitrary tensor vµν on the 3-space can
295
+ be split as:
296
+ vµν = V
297
+
298
+ nµnν − 1
299
+ 2
300
+ ˜hµν
301
+
302
+ + 2V(µnν) + Vµν ,
303
+ (10)
304
+ where V(µnν) = (Vµnν +nνVµ)/2. Similarly, the relative acceleration of the time-like observer
305
+ and other geometrical quantities defined in 3-space can be written in 1 + 2 space as:
306
+ ˙uµ = A nµ + A µ
307
+ (11)
308
+ ˙nµ = A uµ + αµ
309
+ (12)
310
+ ωµ = Ωnµ + Ωµ
311
+ (13)
312
+ σµν = Σ
313
+
314
+ nµnν − 1
315
+ 2
316
+ ˜hµν
317
+
318
+ + 2Σ(µnν) + Σµν
319
+ (14)
320
+ where ˙nµ := uν∇ν nµ is the relative acceleration of the space-like vector along the time-like
321
+ observer. Here, A µ, αµ, Σµν, Ωµ are orthogonal to nµ as well as uµ. Also, A µ, Ωµ(Σµν) are
322
+ the vectors (tensor) projected on the 2-space. In this formalism, we define the alternating
323
+ Levi-Civita 2-tensor
324
+ ǫµν ≡ ǫµναnα
325
+ (15)
326
+ which is orthogonal to nµ and has components only in the 2-space. Given an arbitrary
327
+ vector V µ in the 2-space, we can construct another vector ǫµνV ν that is orthogonal to V µ
328
+ which is in the 2-space and has the same length.
329
+ The 1+2 splitting of the 3-space leads to a new directional derivative along the space-like
330
+ vector nµ:
331
+ v′
332
+ µν ≡ nαDαvµν
333
+ (16)
334
+ ˜Dαvµν ≡ ˜hα
335
+ β˜hµ
336
+ ρ˜hν
337
+ σDβvρσ .
338
+ (17)
339
+ The derivative in Eq. (16) physically correspond to the variation of the physical quantities
340
+ on the 2-space along the space-like vector nµ. The derivative ( ˜D) in Eq. (17) corresponds
341
+ 9
342
+
343
+ to the variation of the physical quantities that lie in the 2-space. These will contribute to
344
+ the memory vector.
345
+ As we split the covariant derivative of uµ in Eq. (5), similarly we can split the covariant
346
+ derivative of nµ as:
347
+ Dνnµ = ˜Dνnµ + nµn′
348
+ ν = ˜σµν + ˜ωµν + 1
349
+ 2
350
+ ˜Θ˜hµν + nµn′
351
+ ν
352
+ (18)
353
+ where, ˜σµν ≡ ˜D<νnµ>, ˜ωµν ≡ ˜D(νnµ) and ˜Θ = ˜Dµnµ are shear, vorticity and the surface
354
+ expansion-contraction scalar respectively and n
355
+
356
+ µ is the spatial derivative along nµ. Thus,
357
+ ˜Dνnµ describes the kinematic properties or the relative motion of the space-like curves in
358
+ the 2-surface orthogonal to nµ. We can obtain the relation between the kinematic quantities
359
+ derived from the motion of time-like vector uµ and kinematic quantities in 2-space derived
360
+ from the space-like vector nµ. See, for instance, Ref. [44].
361
+ III.
362
+ ELECTROMAGNETIC THEORY IN COVARIANT FORMALISM
363
+ The covariant formalism has been extensively employed in studying the evolution of
364
+ electromagnetic fields in curved space-time [43]. In the covariant formulation, the dynamics
365
+ and kinematics are constricted by the Bianchi and Ricci identities. The (1 + 3)− covariant
366
+ formulation permits the classification of cosmological models, a fluid description of the
367
+ matter field in FLRW universes.
368
+ However, as mentioned earlier, the 1 + 3-formalism is
369
+ not suited if the space-time is inhomogeneous, like spherical symmetry or space-times with
370
+ LRS [41]. In such cases, the 1 + 1 + 2-covariant or semi-triad formalism are better suited.
371
+ Since we aim to derive EM memory for arbitrary space-times, we use 1 + 1 + 2-covariant
372
+ formalism. We obtain a generic form of the EM memory effect by evaluating the change
373
+ in the velocity vector ∆uµ that lie in the 2-space. In order to do so, we fix the space-
374
+ like direction to be the direction of the propagation of the wave. In the case of spherically
375
+ symmetric space-time, this naturally translates to the radial direction. One key advantage is
376
+ that the electromagnetic theory in the 1+1+2 formalism helps to understand the evolution
377
+ and dynamics of the EM fields along the space-like direction and in the 2-space normal
378
+ to nµ and uµ. Our approach makes geometrical contributions to the memory effect more
379
+ transparent.
380
+ In the next subsection, we rewrite Maxwell’s equations in 1+3 formalism in an arbitrary
381
+ 10
382
+
383
+ space-time. Later, we formulate the evolution equations of the EM fields in the 2-space and
384
+ two constraint equations of the same along uµ and nµ [44]. The key advantage is that we
385
+ can obtain the memory vector from the projected acceleration vector onto the 2-space.
386
+ A.
387
+ In 1+3 formalism
388
+ The fundamental objects are the Maxwell electromagnetic field tensor F µν. The (1 +
389
+ 3) covariant formalism of Maxwell’s electromagnetic theory provides a way to study the
390
+ interaction of EM fields with different components of general space-time geometry [43].
391
+ With the (1 + 3) decomposition, it is possible to split F µν into the electric and magnetic
392
+ fields. Note that the local coordinates are mathematical parameters that label the points
393
+ of the space-time manifold M; therefore, the electric and magnetic fields may not have a
394
+ direct physical meaning. In order to make measurements, an observer brings in an additional
395
+ structure on M by introducing the orthonormal coframe field. This gives rise to the split of
396
+ Maxwell’s tensor F into the physical electric and magnetic fields.
397
+ Specifically, formalism allows us to split the equations of motion of the fields and currents
398
+ into two parts:
399
+ 1. projected parallel to the 4-velocity uµ of the fundamental observer
400
+ 2. projected into the 3-space orthogonal to uµ.
401
+ To keep the calculations tractable, we perform all the calculations in source-free and lossless
402
+ regions.
403
+ However, the EM memory analysis can be straightforwardly extended to these
404
+ regions. In the source-free regions, Maxwell’s equations are:
405
+ ∇νF µν = 0
406
+ (19)
407
+ ∇[γFµν] = 0;
408
+ or
409
+ ∇νF ∗µν = 0 ,
410
+ (20)
411
+ where F ∗µν is the dual to F µν and is defined as F ∗µν = (1/2)ǫµναβFαβ.
412
+ In the 1 + 3 formalism, by projecting F µν and F ∗µν along the time-like 4-velocity vector,
413
+ we can decompose them into electric and magnetic parts. The electric (Eµ) and magnetic
414
+ (Bµ) 4-vectors are defined as:
415
+ Eµ := F µνuν
416
+ (21)
417
+ Bµ := F ∗µνuν
418
+ (22)
419
+ 11
420
+
421
+ From the above definitions, we infer:
422
+ Eµuµ = 0;
423
+ Bµuµ = 0
424
+ (23)
425
+ which implies Eµ and Bµ have only spatial components. Given this, we can rewrite Fµν and
426
+ F ∗µν as:
427
+ Fµν = uµEν − uνEµ + ǫµναβBαuβ
428
+ (24)
429
+ ˜F αβ = ǫαβµνuµEν +
430
+
431
+ uαBβ − uβBα �
432
+ .
433
+ (25)
434
+ From the above expressions, we see that the simultaneous transformations Eµ → −Bµ,
435
+ Bµ → Eµ leads to F ∗µν → F µν. This implies that we can obtain the second Maxwell’s
436
+ equation (20) from the first Maxwell’s equation (19) or vice-versa. More specifically, if we
437
+ obtain the time-like part and space-like part of Maxwell’s equations (20), we can write the
438
+ time-like part and space-like part of the other Maxwell’s equations (19) by substituting
439
+ Eµ → −Bµ, Bµ → Eµ.
440
+ In the rest of this subsection, we obtain Maxwell’s equations by projecting along uµ
441
+ (time-like part) and hµν (space-like part) [55]. We first obtain the time-like part of Eq. (20)
442
+ by multiplying it with uµ:
443
+
444
+
445
+ ∇β ˜F αβ �
446
+ = 0
447
+ (26)
448
+ Using the decomposition in Eq. (25), the above expression becomes:
449
+ ∇βBβ − Bβ ˙uβ + (∇βuα) ǫαβµνuµEν = 0
450
+ (27)
451
+ We simplify the above equation using the following steps: First, we combine the first two
452
+ terms in the LHS. From Eq. (26), we have Bβ ˙uβ = −uβ ˙Bβ = −uβuα∇αBβ. Substituting
453
+ in the second term of the above expression, we have δα
454
+ β ∇αBβ + uβuα∇αBβ = hα
455
+ β
456
+
457
+ ∇αBβ�
458
+ .
459
+ Substituting ∇βuα from Eq. (5) and using the definition of vorticity vector in Eq. (6), the
460
+ third term in the LHS of the above expression simplifies to −2ωβEβ. Thus, the time-like
461
+ part of Eq. (20) reduces to:
462
+ DβBβ = 2ωβEβ .
463
+ (28)
464
+ The space-like part of Eq. (20) can be obtained by multiplying it with hµ ν,
465
+
466
+ ρ �
467
+ ∇β ˜F αβ �
468
+ = 0
469
+ (29)
470
+ 12
471
+
472
+ Using a series of steps, the above expression can be rewritten as:
473
+ ˙B<ρ> =
474
+
475
+ σρ
476
+ β + ωρ
477
+ β − 2Θ
478
+ 3 hρ
479
+ β
480
+
481
+ Bβ − ǫρµν ˙uµEν − ǫρµν ∇µEν .
482
+ (30)
483
+ where, ǫµνα is defined in Eq. (7). The above equation provides the dynamical evolution of
484
+ the magnetic field, while Eq. (28) is the constraint equation.
485
+ As mentioned above, performing simultaneous transformation Eµ → −Bµ and Bµ → Eµ
486
+ in Eqs. (31) and (32), we obtain the time-like and space-like parts of the first Maxwell’s
487
+ equation (19):
488
+ DβEβ = −2ωνBν
489
+ (31)
490
+ ˙E<ρ> =
491
+
492
+ σρ
493
+ β + ωρ
494
+ β − 2Θ
495
+ 3 hρ
496
+ β
497
+
498
+ Eβ + ǫρµν ˙uµBν + ǫρµν DµBν .
499
+ (32)
500
+ Similarly, the above equation provides the dynamical evolution of the electric field, while
501
+ Eq. (31) is the constraint equation.
502
+ B.
503
+ In 1+1+2 formalism
504
+ We aim to calculate the memory effect of EM fields. As the memory vector resides in
505
+ the 2-surface orthogonal to the direction of propagation of the in-coming wave, we need to
506
+ decompose the 3-space to 1 + 2-space w.r.t a given spatial direction. In this subsection,
507
+ we rewrite Maxwell’s equations (19, 20) using the space-like vector nν and the projection
508
+ tensor (8) in 1 + 1 + 2 formalism.
509
+ To do this, we first express the EM fields and currents in 3-space into 1 + 2 form:
510
+ Eµ = E nµ + E µ
511
+ (33)
512
+ Bµ = Bnµ + Bµ .
513
+ (34)
514
+ where, E ≡ Eµnµ, E µ ≡ ˜hµ νEν, B ≡ Bµnµ, and Bµ ≡ ˜hµ νBν. Following the discussion in
515
+ Sec. (II B), it follows that ǫµνE ν is orthogonal to E µ and, similarly, ǫµνBν is orthogonal to
516
+ Bµ. If electric and magnetic fields are orthogonal to each other in 2 space, then we have
517
+ E ν = ǫµνBν
518
+ Bν = − ǫµνE ν .
519
+ (35)
520
+ These relations will play an important role in Sec. (IV) to derive the memory effect.
521
+ 13
522
+
523
+ The second step is to split the evolution equations (30, 32) interms of E , E µ, B, Bµ.
524
+ To do that, we project Eq. (32) along spacelike direction nµ and multiply Eq. (32) with
525
+ projection tensor (8). After a long calculation, we obtain the following evolution equations
526
+ for E (along nµ) and E µ (in the orthogonal 2-space):
527
+ ˙E + ΘE = αµEµ − 2˜ωB + ǫµρ ˜DµBρ
528
+ (36)
529
+ ˙E¯µ + Θ
530
+ 2 Eµ = − (αµ + 2ǫµρΩρ) E + (Σµρ + Ωǫµρ) E ρ + ǫµρ
531
+
532
+ A ρ − n′ρ + ˜Dρ�
533
+ B
534
+ − ǫµρ
535
+
536
+ A Bρ + B′ρ −
537
+
538
+ ˜DρBν
539
+
540
+ nν�
541
+ ,
542
+ (37)
543
+ where, ˜ω = ˜ωµν ǫµν, Θ is the expansion factor defined in Eq. (5), A µ is the relative accel-
544
+ eration vector in 2-space defined in Eq. (11), ˜ω is the vorticity defined in Eq. (18). Ωµ, Ω
545
+ is defined in Eq. (13) and Σµν is in Eq. (14). The 2-space component of ˙nµ is αµ which is
546
+ defined in Eq. (12), whereas A = nµ ˙uµ = −uµ ˙nµ mentioned in Eq. (11), (12).
547
+ We want to highlight the following points regarding the above expressions: First, the
548
+ above equations generalize Ampere’s law for arbitrary space-time. For example, in Eq. 36,
549
+ the first term in the LHS corresponds to the time derivative of the electric field along space-
550
+ like direction nµ and the last term in RHS is the curl of the magnetic field in 2-space.
551
+ Similarly, the LHS of Eq. (37) is the time derivative of the electric field in 2-space, and in
552
+ the last term in the RHS is the curl of Bρ. Second, in the flat space-time, the expansion
553
+ factor (Θ), the relative acceleration vector (αµ), and vorticity (˜ω) vanish, and the above
554
+ expression lead to Ampere’s law in flat space-time. Thus, background space-time introduces
555
+ new couplings between the electric and magnetic field components. Lastly, we showed that
556
+ the simultaneous transformation Eµ → −Bµ, Bµ → Eµ leads to F ∗µν → F µν. Substituting
557
+ E → B; E µ → Bµ and B → −E ; Bµ → −E µ in Eqs. (36, 37), we have:
558
+ ˙
559
+ B + ΘB =Bµαµ + 2˜ωE − ǫµρ ˜DµE ρ
560
+ (38)
561
+ ˙
562
+ B¯µ + 1
563
+ 2ΘBµ = − (αµ + 2ǫµρΩρ) B + (Σµρ + Ωǫµρ) Bρ − ǫµρ
564
+
565
+ A ρ + ˜Dρ − n′ρ�
566
+ E
567
+ + ǫµρ
568
+
569
+ A E ρ + ǫµρE ′ρ −
570
+
571
+ ˜DρEν
572
+
573
+ nν�
574
+ (39)
575
+ Note that we obtain the above equations by projecting Eq. (30) along spacelike direction
576
+ nµ and multiply Eq. (30) with projection tensor (8). Again, the above equations generalize
577
+ Faraday’s law for arbitrary space-time.
578
+ 14
579
+
580
+ The last step is to split the constraint equations (31, 28) interms of E , E µ, B, Bµ. Sub-
581
+ stituting (33, 34) and the kinematic quantities (11-14), we get:
582
+ ˜DµEµ + nµE ′
583
+ µ + E ′ + ˜ΘE + 2 (ΩB + ΩµBµ) = 0
584
+ (40)
585
+ ˜DµBµ − n′µBµ + B′ + ˜ΘB − 2 (ΩE + ΩµEµ) = 0
586
+ (41)
587
+ where ˜Θ is the expansion along the space-like vector defined in Eq. (18). The above equations
588
+ are generalizations of Gauss law. Here again, in the flat space-time, the expansion factor
589
+ (˜Θ), the relative acceleration vector (αµ), vorticity (Ω) vanish, and the above expressions
590
+ lead to Gauss law in flat space-time.
591
+ C.
592
+ Energy-momentum tensor of the electromagnetic field
593
+ As we will show in the next section, the electromagnetic stress tensor plays a crucial role
594
+ in understanding the memory effect. This subsection evaluates the electromagnetic stress
595
+ tensor in 1 + 1 + 2 formalism for an arbitrary space-time. The EM action in an arbitrary
596
+ background is:
597
+ S = −1
598
+ 4
599
+
600
+ d4x √−g FµνFρσgµρgνσ .
601
+ (42)
602
+ Varying the above action w.r.t the metric (gµν) leads to the following energy-momentum
603
+ tensor:
604
+ Tµν = 1
605
+ 2gρσFµρFνσ − 1
606
+ 8gµνgρσgαβFραFσβ .
607
+ (43)
608
+ In 1 + 3-formalism, the stress-tensor of matter field (Tµν) can written as:
609
+ Tµν = ρ uµuν + 2 S(µ uν) + Wµν ,
610
+ (44)
611
+ where, the energy-density ρ, the energy flux Sα and stress-tensor W αβ as measured in the
612
+ observer’s worldline are given by [56]:
613
+ ρ = Tµνuµuν,
614
+ Sα = −hα
615
+ µ T µνuν,
616
+ W αβ = hα
617
+ µ T µνhβ
618
+ ν
619
+ (45)
620
+ For the electromagnetic fields in 1 + 3-formalism, ρ, Sµ and Wµν are:
621
+ ρ ≡ 1
622
+ 2 (EµEµ + BµBµ) ;
623
+ Sµ ≡ ǫµνρEνBρ
624
+ (46)
625
+ Wµν ≡ 1
626
+ 2 (EµEµ + BµBµ) hµν − EµEν − BµBν
627
+ (47)
628
+ 15
629
+
630
+ Rewriting ρ interms of the variables (E , E µ, B, Bµ) in 1 + 1 + 2 formalism, we have:
631
+ ρ = 1
632
+ 2
633
+
634
+ E 2 + B2�
635
+ + 1
636
+ 2 (E µEµ + BµBµ) = ρ(n) + ρ2−space
637
+ (48)
638
+ Thus, ρ(n) corresponds to the energy of the EM field along nµ and ρ2−space corresponds to
639
+ the energy of the EM field in the 2-space. The energy flux Sµ (a vector in 3-space) can be
640
+ rewritten in 1 + 2 space as:
641
+ Sµ = S nµ + Sµ
642
+ (49)
643
+ where S is the Poynting vector of the EM field along the space-like vector nµ and Sµ is
644
+ the energy flux in the 2-space. These are given by:
645
+ S = Sµnµ = ǫµνE µBν
646
+ (50)
647
+ Sµ = −ǫµν (E Bν − BE ν) = − (E E ν + BBν)
648
+ (51)
649
+ In deriving the last expression, we have used the orthogonality condition between the electric
650
+ and magnetic fields in the 2-space, i. e., Eν = ǫνµBµ. As we will see in the next section, the
651
+ memory vector depends on the part of the electromagnetic energy density ρ and Sµ.
652
+ IV.
653
+ MEMORY EFFECT IN ARBITRARY SPACE-TIME
654
+ Having written Maxwell’s equations in 1 + 1 + 2 formalism for an arbitrary space-time,
655
+ we now evaluate the memory effect. Usually, in the literature, one uses the Lorentz force
656
+ equation to derive EM memory. The equation of motion of a charged body (of mass m and
657
+ charge e) in both gravitational and electromagnetic fields are:
658
+ mduα
659
+ dτ − m
660
+ 2 gβγ,αuβuγ = eFαβuβ
661
+ (52)
662
+ However, the above expression does not consider the new couplings between the electric
663
+ and magnetic field components in Eqs. (36) - (39). Hence, we use the complete Maxwell’s
664
+ equations (36) - (41) and explicitly obtain the change in velocity (∆uµ) of the time-like
665
+ observer. More specifically, using Eqs. (37, 39), we first calculate the acceleration vector
666
+ A µ in the 2-space. We can then integrate the expression for the acceleration vector (A µ
667
+ in the 2-space) with respect to time t or null time coordinate u ≡ (t − r) leading to the
668
+ memory vector.
669
+ 16
670
+
671
+ In the rest of this section, we calculate A µ for observers whose tangents are congruent to
672
+ the space-like geodesics. This implies nσDσnρ = n′ρ = 0, i. e., nµ is tangent to a congruence
673
+ of space-like geodesics [44].
674
+ Using this condition and substituting
675
+ ˙E¯µ = ˜hµν ˙E ν, B′ ρ =
676
+ nνDνBρ in Eqs. (37, 39), we get:
677
+ ˜hµν ˙E ν + ǫµρnνDνBρ = − 1
678
+ 2ΘEµ − (αµ + 2ǫµρΩρ) E + (Σµρ + Ωǫµρ) E ρ
679
+ +
680
+
681
+ ǫµρA ρ + ǫµν ˜Dν�
682
+ B − ǫµν
683
+
684
+ ˜Dνnρ�
685
+ Bρ − ǫµρA Bρ
686
+ (53)
687
+
688
+ ˜hµν ˙
689
+ Bν − ǫµρnνDνE ρ�
690
+ = −1
691
+ 2ΘBµ − (αµ + 2ǫµρΩρ) B + (Σµρ + Ωǫµρ) Bρ
692
+
693
+
694
+ ǫµρA ρ + ǫµν ˜Dν�
695
+ E + ǫµν
696
+
697
+ ˜Dνnρ�
698
+ Eρ + ǫµρA E ρ
699
+ (54)
700
+ Multiplying Eq. (53) with B, multiplying Eq. (54) with E and subtracting the resultant
701
+ equations leads to:
702
+ ǫµνA ν = − ǫµν
703
+ 2
704
+ Dν(E 2 + B2)
705
+ (E 2 + B2)
706
+ +
707
+
708
+ Σµν + Ωǫµν − Θ
709
+ 2
710
+ ˜hµν
711
+ � (E Bν − BE ν)
712
+ (E 2 + B2)
713
+ + ǫµν
714
+
715
+ ˜σρν + ˜ωρν +
716
+ ˜Θ
717
+ 2
718
+ ˜hρν
719
+
720
+ (BBρ + E Eρ)
721
+ (E 2 + B2)
722
+ + ǫµρA (E E ρ + BBρ)
723
+ (E 2 + B2)
724
+ +
725
+ B
726
+ (E 2 + B2)
727
+
728
+ ˜hµν ˙E ν + ǫµρnνDνBρ�
729
+
730
+ E
731
+ (E 2 + B2)
732
+
733
+ ˜hµν ˙
734
+ Bν − ǫµρnνDνE ρ�
735
+ (55)
736
+ To have a transparent understanding, we substitute the definitions (48) - (51) in the expres-
737
+ sion above, resulting in:
738
+ ǫµνA ν = − ǫµν
739
+ 2
740
+ Dνρ(n)
741
+ ρ(n)
742
+ − ǫνα
743
+ 2
744
+
745
+ Σµν + Ωǫµν − Θ
746
+ 2
747
+ ˜hµν
748
+ � Sα
749
+ ρ(n)
750
+ − ǫµν
751
+ 2
752
+
753
+ ˜σρν + ˜ωρν +
754
+ ˜Θ
755
+ 2
756
+ ˜hρν
757
+
758
+
759
+ ρ(n)
760
+ − ǫµρS ρ A
761
+ 2ρ(n)
762
+ +
763
+ B
764
+ 2ρ(n)
765
+
766
+ ˜hµν ˙E ν + ǫµρnνDνBρ�
767
+
768
+ E
769
+ 2ρ(n)
770
+
771
+ ˜hµν ˙
772
+ Bν − ǫµρnνDνE ρ�
773
+ . (56)
774
+ This is the master equation for the EM memory in arbitrary space-time regarding which we
775
+ would like to discuss the following points: First, to our understanding, this is a first time the
776
+ EM memory has been obtained for an arbitrary space-time. In the previous calculations [18,
777
+ 19], the authors have restricted to asymptotic flat space-times. Second, the last two terms
778
+ in the RHS of the above expression vanishes in the asymptotic limit. To see this, let us
779
+ consider a spherically symmetric space-time. Let t refer to the time coordinate and r to the
780
+ radial coordinate and the null coordinate is u ≡ t − r. In the asymptotic limit ∂u ∼ ∂t and
781
+ 17
782
+
783
+ ∂u ∼ −∂r. Setting uµ ≡ (1, 0, 0, 0) and nµ ≡ (0, 1, 0, 0), the penultimate term in the RHS
784
+ of the above equation simplifies to:
785
+ ˜hµν ˙E ν + ǫµρnνDνBρ ≃ ˜hµνu0∇0E ν + ǫµρn1∇1Bρ ≃ ˜hµν∂uE ν − ǫµρ∂uBρ
786
+ = f(u)∂u
787
+ �¯˜hµνE ν − ¯EµνBν�
788
+ (57)
789
+ where, ˜hµν = f(u)¯˜hµν and ¯ǫµν = f(u)ǫµν. The terms with bar represent their time indepen-
790
+ dent parts. The above expression vanishes if E ν and Bν are orthogonal to each other in
791
+ the 2-space. As we mentioned earlier (35), in 2-space, the electric and magnetic fields are
792
+ always orthogonal to each other. Similarly, the last term can also be shown to vanish in the
793
+ asymptotic limit. Thus, the above expression reduces to:
794
+ ǫµνA ν = − ǫµν
795
+ 2
796
+ Dνρ(n)
797
+ ρ(n)
798
+ − ǫνα
799
+ 2
800
+
801
+ Σµν + Ωǫµν − Θ
802
+ 2
803
+ ˜hµν
804
+ � Sα
805
+ ρ(n)
806
+ − ǫµν
807
+ 2
808
+
809
+ ˜σρν + ˜ωρν +
810
+ ˜Θ
811
+ 2
812
+ ˜hρν
813
+
814
+
815
+ ρ(n)
816
+ − ǫµρ
817
+ 2ρ(n)
818
+ S ρ A
819
+ (58)
820
+ Third, the above expression provides a nice geometrical understanding of the various contri-
821
+ butions to memory effect. The first term in the RHS corresponds to the change in the EM
822
+ field energy (ρ(n)) along nµ in the 2-space. This does not contain any contribution from the
823
+ kinematical properties of the space-time. In other words, this term will vanish if the EM
824
+ field energy does not change in the 2-space, like a 2-D flat sheet. However, as we show in the
825
+ next section, this is non-zero in flat space-time expressed in spherical coordinates. The next
826
+ two terms in the RHS are proportional to the energy flux (Sα) in the 2-space. However,
827
+ both these terms have different kinematical information of the space-time and vanish for flat
828
+ space-time. The second term in the RHS carries information about shear (Σµν), vorticity
829
+ scalar (Ω) related to nµ and expansion scalar (Θ) corresponding to time-like observer uµ.
830
+ The third term in the RHS carries information about shear (˜σµν), vorticity tensor (˜ωµν) and
831
+ expansion scalar (˜Θ) corresponding to the space-like vector nµ.
832
+ Fourth, as mentioned earlier, we have not included external currents or charges in our
833
+ analysis.
834
+ Hence, the acceleration vector does not have contribution from the external
835
+ sources.
836
+ Hence, the memory vector we obtain is equivalent to the null-kick derived in
837
+ Refs. [18, 19]. It is also important to note that these authors did not obtain the contribu-
838
+ tions due to the kinematical properties of the space-time. However, as we will see in the
839
+ next section, their contribution can be significant.
840
+ 18
841
+
842
+ Lastly, to obtain the memory vector, we need to integrate the above expression w.r.t the
843
+ proper time of the observer — ∆uµ is the memory vector. It is interesting to note that
844
+ initially if the observer has non-zero velocity only along the time direction, at a later time,
845
+ due to the memory effect, there is a non-zero velocity in the 2-space.
846
+ V.
847
+ APPLICATION TO SPECIFIC SPACE-TIMES
848
+ In the previous section, we obtained a master equation for the EM vector for an arbitrary
849
+ 4-D space-time using 1 + 1 + 2-formalism. As we discussed, the memory vector has three
850
+ distinct contributions. In order to illustrate this fact, we consider specific examples and
851
+ obtain the memory vector. In this section we obtain memory vector for flat, FLRW, pp-
852
+ wave and Kerr space-times.
853
+ A.
854
+ Minkowski space-time
855
+ In order to compare the master equation with the existing results [18], we first consider
856
+ Minkowski space-time in spherical coordinates:
857
+ ds2 = −dt2 + dr2 + r2 γAB
858
+ (59)
859
+ where,
860
+ γAB =
861
+
862
+  1
863
+ 0
864
+ 0 sin2 θ
865
+
866
+
867
+ (60)
868
+ is the metric describing unit 2-sphere. In Minkowski space-time, the 4-velocity of the time-
869
+ like congruence observer is uµ ≡ (1, 0, 0, 0) and the space-like vector is nµ ≡ (0, 1, 0, 0).
870
+ Since ∇µuν = 0 and ∇µnν = 0, the kinematics quantities, defined in Sec. (II A, II B) vanish
871
+ for the Minkowski space-time. Hence only the first term in Eq. (56) will be non-zero, i. e.,
872
+ A ν
873
+ Flat = − 1
874
+ 2
875
+ Dνρn
876
+ ρn
877
+ .
878
+ (61)
879
+ As mentioned earlier, the acceleration vector corresponds to acceleration in the 2-Sphere.
880
+ Hence, it is appropriate to switch to the 2-Sphere index:
881
+ A A = uµ∇µuA = u0∂0uA + 2u0ΓA
882
+ 0 BuB .
883
+ 19
884
+
885
+ Since the 4-velocity uµ is zero in the 2-Sphere, we have A A = u0∂0uA = ∂tuA. In null
886
+ coordinate, this becomes A A = ∂tuA. Substituting the above expression in Eq. (61) and
887
+ integrating in the null coordinate, we have:
888
+ ∆uA ≡
889
+
890
+ du A A = −1
891
+ 2
892
+
893
+ du DAρn
894
+ ρn
895
+ .
896
+ (62)
897
+ The above expression is velocity kick w.r.t the Eulerian observers. To compare this with
898
+ the net momentum (kick) vector as seen by the asymptotic static observers (Lagrangian
899
+ observers), we need to do a coordinate transformation. Specifically, we need to transform
900
+ from coordinate basis
901
+
902
+ ⃗e θ,⃗e φ�
903
+ to orthogonal coordinate basis
904
+
905
+ ˆθ, ˆφ
906
+
907
+ . In terms of
908
+
909
+ ˆθ, ˆφ
910
+
911
+ ,
912
+ we have ∆⃗u ≡ ∆uµ⃗eµ, where, ⃗e θ = ˆθ/r , ⃗e φ = ˆφ/(r sin θ). Thus, the velocity kick w.r.t the
913
+ asymptotic static observers is given by:
914
+ ∆⃗uFlat = 1
915
+ r
916
+
917
+ ∆uθˆθ + ∆uφ
918
+ sin θ
919
+ ˆφ
920
+
921
+ (63)
922
+ Interestingly, the EM memory vector in Minkowski space-time is inversely proportional to r
923
+ and matches with Ref. [18]. This passes the first test that the master equation (56) indeed
924
+ describes the EM memory vector. In the rest of this section, we obtain the memory vector
925
+ for non-flat geometries and show the robustness of our approach.
926
+ B.
927
+ FLRW space-time
928
+ The conformally flat FLRW metric in spherical coordinates is:
929
+ ds2 = a(η)2 �
930
+ −dη2 + dr2 + r2 γAB
931
+
932
+ (64)
933
+ where, the conformal time (η) is related to the cosmic time (t) by dt = a(η) dη. In 1 + 3
934
+ formalism, the fundamental observer with time-like 4−velocity in FLRW metric is uµ =
935
+ dxµ/dt = dxµ/(a(η)) dη = ( 1, 0, 0, 0 ) /a(η).
936
+ For this choice of observer, the 3−space
937
+ projection tensor (hµν) orthogonal to uµ is:
938
+ hµν =
939
+
940
+  a2(η)
941
+ 0
942
+ 0
943
+ a2(η) r2 γAB
944
+
945
+  .
946
+ (65)
947
+ Since the FLRW line-element is homogeneous and isotropic, only the expansion scalar (Θ)
948
+ is non-zero:
949
+ Θ = 3H (η)
950
+ a(η)
951
+ where
952
+ H = a′(η)
953
+ a(η)
954
+ 20
955
+
956
+ where ′ refers to derivative w.r.t. η. Other kinematic quantities vanish, i. e., σµν = ωµν = 0.
957
+ We now spilt the 3-space into 1 + 2 by choosing the following space-like vector nµ =
958
+ (0, 1, 0, 0)/a(η). This satisfies the conditions: nµnµ = 1 and uµnµ = 0. Repeating the
959
+ steps discussed in Sec. (II B) for the line-element (64), we get:
960
+ ˜Θ =
961
+ 2
962
+ a(η)
963
+ 1
964
+ r , ˜σµν = ˜ωµν = 0.
965
+ It is important to note that while Θ is a function of η only, ˜Θ depends on both η and r.
966
+ Also, Θ depends on the Hubble parameter H , while ˜Θ is inversely proportional of r. Hence,
967
+ at large distances, ˜Θ decays faster compared to Θ.
968
+ Substituting the above expressions in Eq. (58), we have:
969
+ A ν
970
+ FLRW = − 1
971
+ 2
972
+ Dνρn
973
+ ρn
974
+ +
975
+ 1
976
+ 4 ρn
977
+ S ν(Θ − ˜Θ) .
978
+ (66)
979
+ Like Minkowski space-time, A ν will have components only in the 2-Sphere. Using the
980
+ fact that the fundamental observers have zero velocity in the 2-Sphere and repeating the
981
+ earlier analysis, we have
982
+ A A = u0∂0uA =
983
+ 1
984
+ a(η)
985
+ ∂uA
986
+ ∂η .
987
+ In terms of the null coordinate u(≡ η − r), we have:
988
+ A A =
989
+ 1
990
+ a(u)
991
+ ∂uA
992
+ ∂u .
993
+ Substituting the above expression in Eq. (66), we have:
994
+ ∂uA
995
+ ∂u = − a(u)
996
+ 2
997
+ DAρn
998
+ ρn
999
+ + a(u)
1000
+ 4 ρn
1001
+ S A(Θ − ˜Θ) .
1002
+ (67)
1003
+ Integrating the above expression w.r.t u, leads to the following memory vector:
1004
+ ∆uA
1005
+ FLRW = −1
1006
+ 2
1007
+
1008
+ du a(u)
1009
+ ρn
1010
+ DAρn + 1
1011
+ 4
1012
+
1013
+ du a(u)
1014
+ ρn
1015
+ S A(Θ − ˜Θ)
1016
+ (68)
1017
+ This is the expression for the memory vector in FLRW space-time regarding which we want
1018
+ to highlight the following points: First, unlike Minkowski space-time, here the fundamental
1019
+ observers are Eulerian, and hence, we do not have to transform the above expression to
1020
+ Lagrangian observers. Second, our results differ from the results of Ref. [34]. In Ref. [34],
1021
+ the authors show that the EM memory effect in FLRW differs from the Minkowski only by
1022
+ the conformal factor a(η) or a(u). In other words, their analysis did not account for the geo-
1023
+ metric contribution to the memory effect. As mentioned earlier, the geometric contribution
1024
+ 21
1025
+
1026
+ leads to a non-zero energy flux (S A) contribution. Also note that the ordinary memory
1027
+ derived in Ref. [34] is not present in Eq. (68) as we have assumed any external charge or
1028
+ current to be zero. Third, we find that ρ(n) and the energy flux (S A) contribute oppositely.
1029
+ It will be interesting to see whether the two contributions nullify the EM memory.
1030
+ C.
1031
+ pp-wave space-times
1032
+ In this subsection, we derive the EM memory for a special kind of plane-fronted wave
1033
+ with parallel rays (pp-waves) called plane-wave metric [57]:
1034
+ ds2 = −2dudv − F(u, x, y) du2 + dx2 + dy2
1035
+ (69)
1036
+ where, F(u, x, y) = A(u)(x2 − y2) + 2B(u)xy describes the plane wave and A(u), B(u) are
1037
+ arbitrary functions such that F > 0. Note that u, v are not light-cone coordinates. u is
1038
+ time-like coordinate and v is a null coordinate.
1039
+ We split the above 4-D space-time into 1 + 3 form and later into 1 + 1 + 2-form by
1040
+ considering the following time-like velocity vector (uµ) and space-like vector (nµ):
1041
+ uµ ≡
1042
+
1043
+ F(u, x, y)(−1/2), 0, 0, 0
1044
+
1045
+ ,
1046
+ nµ ≡
1047
+
1048
+ F(u, x, y)(−1/2), − F(u, x, y)(1/2), 0, 0
1049
+
1050
+ .
1051
+ For the above choice of time-like vector, the 3-space projection tensor (hµν) is:
1052
+ hµν =
1053
+
1054
+ 
1055
+ 0
1056
+ 0
1057
+ 0 0
1058
+ 0
1059
+ 1
1060
+ F(u, x, y) 0 0
1061
+ 0
1062
+ 0
1063
+ 1 0
1064
+ 0
1065
+ 0
1066
+ 0 1
1067
+
1068
+ 
1069
+ (70)
1070
+ Substituting these in the definitions in Sec. (II), only non-zero quantity is the expansion
1071
+ scalar (Θ):
1072
+ Θ = −
1073
+ (x2 − y2) A′(u) + 2xy B′(u)
1074
+ 2 (2B(u) xy + A(u) (x2 − y2) )3/2 .
1075
+ (71)
1076
+ The non-zero projection tensor ˜hµν components in the 2-space are ˜hxx = 1, ˜hyy = 1. Thus,
1077
+ the memory vector for the special kind of pp-wave space-times is:
1078
+ A ν
1079
+ PP = − 1
1080
+ 2
1081
+ Dνρn
1082
+ ρn
1083
+ + Θ
1084
+ 4 ρn
1085
+ S ν .
1086
+ (72)
1087
+ 22
1088
+
1089
+ Here, the acceleration of the time-like observer is confined to the x − y plane, i. e.,
1090
+ A A
1091
+ PP = − 1
1092
+ 2
1093
+ DAρn
1094
+ ρn
1095
+ + Θ
1096
+ 4 ρn
1097
+ S A ,
1098
+ (73)
1099
+ where, the index A, B corresponds to (x, y). Evaluating the acceleration vector along x
1100
+ and y, we have:
1101
+ A (PP)
1102
+ x(y) = − 1
1103
+ 2 ρn
1104
+ ∂x(y) (ρn) + Θ
1105
+ 4 ρn
1106
+ Sx(y) .
1107
+ (74)
1108
+ Integrating the above equation w.r.t u, we have:
1109
+ ∆uPP
1110
+ x(y) = −1
1111
+ 2
1112
+
1113
+ du ∂x(y) (ρn)
1114
+ ρn
1115
+ + Θ
1116
+ 4
1117
+
1118
+ duSx(y)
1119
+ ρn
1120
+ .
1121
+ (75)
1122
+ The above expression for the velocity kick is for a generic plane-wave metric. To gain
1123
+ some physical intuition, we consider two specific forms — Penrose limit of the Schwarzschild
1124
+ and FLRW space-times [57]. For Schwarzschild space-time, we have
1125
+ A(u) =
1126
+ 6
1127
+ 25u2;
1128
+ B(u) = 0
1129
+ Substituting these in Eq. (71), we have:
1130
+ ΘPP,Sch =
1131
+ 5
1132
+
1133
+ 6(x2 − y2)
1134
+ .
1135
+ It is interesting to note that although the space-time metric does not differentiate between
1136
+ the two spatial coordinates (x, y), in order for Θ to be real, the above expression demands
1137
+ that x > y. Thus, velocity kick due to EM wave in PP-wave limit of Schwarzschild space-
1138
+ time can only occur if x > y and is given by:
1139
+ ∆uPP Sch
1140
+ x(y)
1141
+ = −1
1142
+ 2
1143
+
1144
+ du ∂x(y) (ρn)
1145
+ ρn
1146
+ +
1147
+ 5
1148
+ 4
1149
+
1150
+ 6(x2 − y2)
1151
+
1152
+ duSx(y)
1153
+ ρn
1154
+ .
1155
+ (76)
1156
+ In the case of Penrose limit of FLRW space-time with power-law scale factor a(t) ∼ th, we
1157
+ have:
1158
+ A(u) = −
1159
+ h
1160
+ (1 + h)u2,
1161
+ B(u) = 0 .
1162
+ Substituting these in Eq. (71), we have:
1163
+ ΘPP,FLRW =
1164
+
1165
+ (1 + h)
1166
+ h(y2 − x2);
1167
+ ∆uPP FLRW
1168
+ x(y)
1169
+ = −1
1170
+ 2
1171
+
1172
+ du ∂x(y) (ρn)
1173
+ ρn
1174
+ +
1175
+
1176
+ (1 + h)
1177
+ 4
1178
+
1179
+ h(y2 − x2)
1180
+
1181
+ duSx(y)
1182
+ ρn
1183
+ .
1184
+ (77)
1185
+ 23
1186
+
1187
+ Here again, we see that in-order for Θ to be real, the above expression demands that y > x.
1188
+ Thus, velocity kick due to EM wave in PP-wave limit of FLRW space-time occurs in a
1189
+ different region of the 2-space compared to Schwarzschild. Thus, EM memory has a distinct
1190
+ signature for different space-times and can potentially be used as a probe.
1191
+ D.
1192
+ Kerr space-time
1193
+ In this section, we derive the memory effect in Kerr space-time.
1194
+ In Boyer-Lindquist
1195
+ coordinates (t, r, χ, φ), the Kerr space-time is:
1196
+ ds2 =
1197
+
1198
+ 2mr
1199
+ r2 + a2χ2 − 1
1200
+
1201
+ dt2 +
1202
+
1203
+ r2 + a2χ2
1204
+ r2 − 2mr + a2
1205
+
1206
+ dr2 +
1207
+ �r2 + a2χ2
1208
+ 1 − χ2
1209
+
1210
+ dχ2
1211
+
1212
+ �4mar (1 − χ2)
1213
+ r2 + a2χ2
1214
+
1215
+ dtdϕ +
1216
+
1217
+ 1 − χ2� �
1218
+ r2 + a2 + 2ma2r (1 − χ2)
1219
+ r2 + a2χ2
1220
+
1221
+ dϕ2 .
1222
+ (78)
1223
+ where χ ≡ cos θ. In this case, the time-like observer 4-velocity (uµ) and the space-like vector
1224
+ (nµ) are [58]:
1225
+ uµ =
1226
+ ��
1227
+ r2 − 2mr + a2
1228
+ r2 + a2χ2
1229
+ , 0, 0, 0
1230
+
1231
+ , nµ =
1232
+
1233
+ 0,
1234
+
1235
+ r2 − 2mr + a2
1236
+ r2 + a2χ2
1237
+ , 0, 0
1238
+
1239
+ .
1240
+ We give below the kinematical quantities (discussed in Sec. (II B)) for Kerr space-time in
1241
+ 1 + 1 + 2 formalism obtained in Ref. [58]:
1242
+ Θ = 0;
1243
+ Σµν = 0 ;
1244
+ (79)
1245
+ Ω = −2marχ
1246
+
1247
+ L
1248
+ J
1249
+
1250
+ K 3
1251
+ ;
1252
+ ˜Θ =
1253
+ W
1254
+ J
1255
+
1256
+ K 3L
1257
+ ;
1258
+ (80)
1259
+ ˜ωµν = ˜ωǫµν = 0;
1260
+ A = −mD
1261
+
1262
+ L
1263
+ J
1264
+
1265
+ K 3 ;
1266
+ (81)
1267
+ ˜σµν =
1268
+
1269
+ 
1270
+ 0 0
1271
+ 0
1272
+ 0
1273
+ 0 0
1274
+ 0
1275
+ 0
1276
+ 0 0 −1
1277
+ 2
1278
+ a2(m−r)
1279
+
1280
+ K
1281
+ J
1282
+
1283
+ L
1284
+ 0
1285
+ 0 0
1286
+ 0
1287
+ 1
1288
+ 2
1289
+ a2(m−r)M 2√
1290
+ L K
1291
+ J 2
1292
+
1293
+ 
1294
+ (82)
1295
+ where,
1296
+ M = χ2 − 1;
1297
+ D = −r2 + a2χ2;
1298
+ L = r2 − 2mr + a2
1299
+ (83)
1300
+ J = r2 − 2mr + a2χ2;
1301
+ K = r2 + a2χ2
1302
+ (84)
1303
+ 24
1304
+
1305
+ W = 2r3(r − 2m)2 + a4χ2 �
1306
+ m + r − mχ2 + rχ2�
1307
+ + a2r2 �
1308
+ −3m + r + χ2(3r − 5m)
1309
+
1310
+ (85)
1311
+ Substituting these expressions in Eq. (58), and noting that the memory vector lies in
1312
+ the 2-D surface, we get:
1313
+ A A = − 1
1314
+ 2
1315
+ DAρ(n)
1316
+ ρ(n)
1317
+ − Ω
1318
+ 2
1319
+ ǫAB SB
1320
+ ρ(n)
1321
+ − 1
1322
+ 2
1323
+
1324
+ ˜σAB +
1325
+ ˜Θ
1326
+ 2
1327
+ ˜hAB
1328
+
1329
+ SB
1330
+ ρ(n)
1331
+
1332
+ A
1333
+ 2ρ(n)
1334
+ S A
1335
+ (86)
1336
+ This is the EM memory vector for an Eulerian observer in Kerr space-time. Note that this
1337
+ is a generic result for any value of angular momentum. For a better physical insight, we
1338
+ consider a → 0 limit. Substituting a → 0 in Eqs. (79 - 85), we have
1339
+ M0 = χ2 − 1;
1340
+ D0 = −r2;
1341
+ L0 = r2 − 2mr
1342
+ (87)
1343
+ J0 = r2 − 2mr;
1344
+ K0 = r2;
1345
+ W0 = 2r3(r − 2m)2
1346
+ (88)
1347
+ Ω0 = ˜σ0
1348
+ µν = 0;
1349
+ ˜Θ0 = 2
1350
+
1351
+ (r − 2m)
1352
+ r3
1353
+ ;
1354
+ A =
1355
+ m
1356
+
1357
+ r3(r − 2m)
1358
+ (89)
1359
+ Substituting the above quantities in Eq. (86), we have:
1360
+ A A = − 1
1361
+ 2
1362
+ DAρ(n)
1363
+ ρ(n)
1364
+ − 1
1365
+ 2
1366
+
1367
+ r − 2m
1368
+ r3
1369
+ S A
1370
+ ρ(n)
1371
+
1372
+ 1
1373
+ 2ρ(n)
1374
+ m
1375
+
1376
+ r3(r − 2m)
1377
+ S A .
1378
+ (90)
1379
+ This is the EM memory vector for an Eulerian observer in Schwarzschild space-time, regard-
1380
+ ing which we want to mention the following points: First, in the limit, r → ∞, reduces to
1381
+ Minkowski space-time expression (61). Second, in the limit r → ∞, the subleading term is
1382
+ proportional to r−1. Third, to derive the memory vector ∆uA, we have to switch to the null
1383
+ time coordinate u = t − r and integrate Eq. (90) with respect to u at the asymptotic limit.
1384
+ Lastly, to evaluate the memory effect experienced by the static asymptotic (Lagrangian)
1385
+ observer, we need to do the transformation from
1386
+
1387
+ ⃗e θ,⃗e φ�
1388
+ to the orthogonal coordinate basis
1389
+
1390
+ ˆθ, ˆφ
1391
+
1392
+ like in Sec. (V A).
1393
+ VI.
1394
+ CONCLUSIONS
1395
+ In this work, we have derived a master equation for electromagnetic memory in an arbi-
1396
+ trary space-time. We used the covariant formalism to obtain the same. More specifically,
1397
+ we used the 1 + 1 + 2 covariant formalism. The 1 + 1 + 2 decomposition of space-time is
1398
+ a natural extension of the 1 + 3 formalism in which the three-space is further decomposed
1399
+ 25
1400
+
1401
+ using a given spatial direction. This choice of covariant formalism is because the net mo-
1402
+ mentum (kick) vector lies on the 2-D surface for arbitrary space-time. Also, the electric and
1403
+ magnetic fields are transverse to the direction of propagation of the passing EM wave.
1404
+ The EM memory (58) has three distinct contributions: First contribution is due to the
1405
+ change in the EM field energy (ρ(n)) along nµ in the 2-space. This is non-zero for Minkowski
1406
+ space-time. The second contribution is proportional to the energy flux (Sα) in the 2-space.
1407
+ This has kinematical information of the space-time and vanishes for the flat space-time.
1408
+ The third contribution is proportional to the acceleration A along the time-like vector uµ.
1409
+ To our understanding, the earlier approaches could not isolate the different contributions
1410
+ to the EM memory as done in this work.
1411
+ We then obtained the EM memory for different space-times. In the case of FLRW space-
1412
+ time, we showed that the earlier analysis did not account for the geometric contribution
1413
+ to the memory effect [34]. Specifically, their analysis did not account for the geometric
1414
+ contribution leading to a non-zero energy flux (S A) contribution. We have also obtained
1415
+ the EM memory for Kerr space-time. We also showed that the EM memory has a distinct
1416
+ signature for different pp wave space-times and can potentially be used as a probe.
1417
+ It would be interesting to extend our analysis for black holes with multiple horizons
1418
+ and those that are not asymptotically flat. These may be particularly relevant for using
1419
+ EM memory as a probe to PBH. Finally, our analysis points to the possibility of using
1420
+ 1 + 1 + 2 covariant formalism to understand gravitational memory. These are currently
1421
+ under investigation.
1422
+ ACKNOWLEDGMENTS
1423
+ The authors thank S. Mahesh Chandran and A. Kushwaha for comments on the earlier
1424
+ version of the manuscript. The work is supported by the SERB-Core Research grant.
1425
+ [1] B. S. Sathyaprakash and B. F. Schutz, “Physics, Astrophysics and Cosmology with
1426
+ Gravitational Waves,” Living Rev. Rel. 12 (2009) 2, arXiv:0903.0338 [gr-qc].
1427
+ [2] K. G. Arun and A. Pai, “Tests of General Relativity and Alternative theories of gravity using
1428
+ 26
1429
+
1430
+ Gravitational Wave observations,” Int. J. Mod. Phys. D 22 (2013) 1341012,
1431
+ arXiv:1302.2198 [gr-qc].
1432
+ [3] L. Barack et al., “Black holes, gravitational waves and fundamental physics: a roadmap,”
1433
+ Class. Quant. Grav. 36 no. 14, (2019) 143001, arXiv:1806.05195 [gr-qc].
1434
+ [4] S. Shankaranarayanan and J. P. Johnson, “Modified theories of gravity: Why, how and
1435
+ what?,” Gen. Rel. Grav. 54 no. 5, (2022) 44, arXiv:2204.06533 [gr-qc].
1436
+ [5] Y. B. Zel’dovich and A. G. Polnarev, “Radiation of gravitational waves by a cluster of
1437
+ superdense stars,” Sov. Astron. 18 (1974) 17.
1438
+ [6] L. Smarr, “Gravitational radiation from distant encounters and from head-on collisions of
1439
+ black holes: The zero-frequency limit,” Phys. Rev. D 15 (Apr, 1977) 2069–2077.
1440
+ https://link.aps.org/doi/10.1103/PhysRevD.15.2069.
1441
+ [7] S. Kovacs Jr and K. Thorne, “The generation of gravitational waves. iv-bremsstrahlung,” The
1442
+ Astrophysical Journal 224 (1978) 62–85.
1443
+ [8] V. Braginsky and L. Grishchuk, “Kinematic Resonance and Memory Effect in Free Mass
1444
+ Gravitational Antennas,” Sov. Phys. JETP 62 (1985) 427–430.
1445
+ [9] V. B. Braginskii and K. S. Thorne, “Gravitational-wave bursts with memory and
1446
+ experimental prospects,” Nature 327 no. 6118, (May, 1987) 123–125.
1447
+ [10] K. S. Thorne, “Gravitational-wave bursts with memory: The christodoulou effect,”
1448
+ Phys. Rev. D 45 (Jan, 1992) 520–524.
1449
+ https://link.aps.org/doi/10.1103/PhysRevD.45.520.
1450
+ [11] A. G. Wiseman and C. M. Will, “Christodoulou’s nonlinear gravitational-wave memory:
1451
+ Evaluation in the quadrupole approximation,” Phys. Rev. D 44 (Nov, 1991) R2945–R2949.
1452
+ https://link.aps.org/doi/10.1103/PhysRevD.44.R2945.
1453
+ [12] M. Favata, “The gravitational-wave memory effect,” Class. Quant. Grav. 27 (2010) 084036,
1454
+ arXiv:1003.3486 [gr-qc].
1455
+ [13] L. Bieri and D. Garfinkle, “Perturbative and gauge invariant treatment of gravitational wave
1456
+ memory,” Phys. Rev. D 89 no. 8, (2014) 084039, arXiv:1312.6871 [gr-qc].
1457
+ [14] D. Christodoulou, “Nonlinear nature of gravitation and gravitational wave experiments,”
1458
+ Phys. Rev. Lett. 67 (1991) 1486–1489.
1459
+ [15] M. Favata, “Nonlinear gravitational-wave memory from binary black hole mergers,”
1460
+ 27
1461
+
1462
+ Astrophys. J. Lett. 696 (2009) L159–L162, arXiv:0902.3660 [astro-ph.SR].
1463
+ [16] A. Strominger and A. Zhiboedov, “Gravitational Memory, BMS Supertranslations and Soft
1464
+ Theorems,” JHEP 01 (2016) 086, arXiv:1411.5745 [hep-th].
1465
+ [17] P. M. Zhang, C. Duval, G. W. Gibbons, and P. A. Horvathy, “Soft gravitons and the memory
1466
+ effect for plane gravitational waves,” Phys. Rev. D 96 no. 6, (2017) 064013,
1467
+ arXiv:1705.01378 [gr-qc].
1468
+ [18] L. Bieri and D. Garfinkle, “An electromagnetic analogue of gravitational wave memory,”
1469
+ Class. Quant. Grav. 30 (2013) 195009, arXiv:1307.5098 [gr-qc].
1470
+ [19] J. Winicour, “Global aspects of radiation memory,” Class. Quant. Grav. 31 (2014) 205003,
1471
+ arXiv:1407.0259 [gr-qc].
1472
+ [20] A. Strominger, “Asymptotic Symmetries of Yang-Mills Theory,” JHEP 07 (2014) 151,
1473
+ arXiv:1308.0589 [hep-th].
1474
+ [21] M. Pate, A.-M. Raclariu, and A. Strominger, “Color Memory: A Yang-Mills Analog of
1475
+ Gravitational Wave Memory,” Phys. Rev. Lett. 119 no. 26, (2017) 261602,
1476
+ arXiv:1707.08016 [hep-th].
1477
+ [22] G. Satishchandran and R. M. Wald, “Asymptotic behavior of massless fields and the memory
1478
+ effect,” Phys. Rev. D 99 no. 8, (2019) 084007, arXiv:1901.05942 [gr-qc].
1479
+ [23] L. Bieri, P. Chen, and S.-T. Yau, “The Electromagnetic Christodoulou Memory Effect and
1480
+ its Application to Neutron Star Binary Mergers,” Class. Quant. Grav. 29 (2012) 215003,
1481
+ arXiv:1110.0410 [astro-ph.CO].
1482
+ [24] L. Susskind, “Electromagnetic Memory,” arXiv:1507.02584 [hep-th].
1483
+ [25] Y. Hamada, M.-S. Seo, and G. Shiu, “Electromagnetic Duality and the Electric Memory
1484
+ Effect,” JHEP 02 (2018) 046, arXiv:1711.09968 [hep-th].
1485
+ [26] Y. Hamada and S. Sugishita, “Notes on the gravitational, electromagnetic and axion memory
1486
+ effects,” JHEP 07 (2018) 017, arXiv:1803.00738 [hep-th].
1487
+ [27] P. Mao and W.-D. Tan, “Gravitational and electromagnetic memory,”
1488
+ Phys. Rev. D 101 no. 12, (2020) 124015, arXiv:1912.01840 [gr-qc].
1489
+ [28] N. Jokela, K. Kajantie, and M. Sarkkinen, “Memory effect in Yang-Mills theory,”
1490
+ Phys. Rev. D 99 no. 11, (2019) 116003, arXiv:1903.10231 [hep-th].
1491
+ [29] P. Mao and W.-D. Tan, “Gravitational and electromagnetic memory,”
1492
+ 28
1493
+
1494
+ Phys. Rev. D 101 no. 12, (2020) 124015, arXiv:1912.01840 [gr-qc].
1495
+ [30] P. Mao, “Note on electromagnetic memories,” Phys. Rev. D 104 (2021) 084026,
1496
+ arXiv:2105.06095 [gr-qc].
1497
+ [31] V. Taghiloo and M. H. Vahidinia, “Temporal vs Spatial Conservation and Memory Effect in
1498
+ Electrodynamics,” arXiv:2210.16770 [hep-th].
1499
+ [32] S. Atul Bhatkar, “Effect of a small cosmological constant on the electromagnetic memory
1500
+ effect,” Phys. Rev. D 105 no. 12, (2022) 124028, arXiv:2108.00835 [hep-th].
1501
+ [33] A. Seraj and T. Neogi, “Memory effects from holonomies,” arXiv:2206.14110 [hep-th].
1502
+ [34] M. Enriquez-Rojo and T. Schroeder, “Asymptotic symmetries and memories of gauge
1503
+ theories in FLRW spacetimes,” JHEP 01 (2023) 011, arXiv:2207.13726 [hep-th].
1504
+ [35] O. Heckmann and E. Schucking, “Bemerkungen zur Newtonschen Kosmologie. I. Mit 3
1505
+ Textabbildungen in 8 Einzeldarstellungen,” zap 38 (Jan., 1955) 95.
1506
+ [36] A. Raychaudhuri, “Relativistic Cosmology. I,”
1507
+ Physical Review 98 no. 4, (May, 1955) 1123–1126.
1508
+ [37] H. van Elst and G. F. R. Ellis, “The Covariant approach to LRS perfect fluid space-time
1509
+ geometries,” Class. Quant. Grav. 13 (1996) 1099–1128, arXiv:gr-qc/9510044.
1510
+ [38] G. F. R. Ellis and H. van Elst, “Cosmological models: Cargese lectures 1998,”
1511
+ NATO Sci. Ser. C 541 (1999) 1–116, arXiv:gr-qc/9812046.
1512
+ [39] C. G. Tsagas, A. Challinor, and R. Maartens, “Relativistic cosmology and large-scale
1513
+ structure,” Phys. Rept. 465 (2008) 61–147, arXiv:0705.4397 [astro-ph].
1514
+ [40] G. F. R. Ellis, R. Maartens, and M. A. H. MacCallum, Relativistic Cosmology. Cambridge
1515
+ University Press, 2012.
1516
+ [41] C. A. Clarkson and R. K. Barrett, “Covariant perturbations of Schwarzschild black holes,”
1517
+ Class. Quant. Grav. 20 (2003) 3855–3884, arXiv:gr-qc/0209051.
1518
+ [42] C. A. Clarkson, M. Marklund, G. Betschart, and P. K. S. Dunsby, “The electromagnetic
1519
+ signature of black hole ringdown,” Astrophys. J. 613 (2004) 492–505,
1520
+ arXiv:astro-ph/0310323.
1521
+ [43] C. G. Tsagas, “Electromagnetic fields in curved spacetimes,”
1522
+ Class. Quant. Grav. 22 (2005) 393–408, arXiv:gr-qc/0407080.
1523
+ [44] P. Mavrogiannis and C. G. Tsagas, “How the magnetic field behaves during the motion of a
1524
+ 29
1525
+
1526
+ highly conducting fluid under its own gravity: A new theoretical, relativistic approach,”
1527
+ Phys. Rev. D 104 no. 12, (2021) 124053, arXiv:2110.02489 [gr-qc].
1528
+ [45] S. Singh, G. F. R. Ellis, R. Goswami, and S. D. Maharaj, “Rotating and twisting locally
1529
+ rotationally symmetric spacetimes: a general solution,”
1530
+ Phys. Rev. D 96 no. 6, (2017) 064049, arXiv:1707.06407 [gr-qc].
1531
+ [46] S. Singh, R. Goswami, and S. D. Maharaj, “Existence of conformal symmetries in locally
1532
+ rotationally symmetric spacetimes: Some covariant results,”
1533
+ J. Math. Phys. 60 no. 5, (2019) 052503.
1534
+ [47] C. Hansraj, R. Goswami, and S. D. Maharaj, “Semi-tetrad decomposition of spacetime with
1535
+ conformal symmetry,” Gen. Rel. Grav. 52 no. 6, (2020) 63.
1536
+ [48] S. Singh, D. Baboolal, R. Goswami, and S. D. Maharaj, “Gaussian curvature of spherical
1537
+ shells: a geometric measure of complexity,” Class. Quant. Grav. 39 no. 23, (2022) 235010,
1538
+ arXiv:2206.03828 [gr-qc].
1539
+ [49] P. N. Khambule, R. Goswami, and S. D. Maharaj, “Matching conditions in Locally
1540
+ Rotationally Symmetric spacetimes and radiating stars,”
1541
+ Class. Quant. Grav. 38 no. 7, (2021) 075006, arXiv:2011.00853 [gr-qc].
1542
+ [50] S. Boersma and T. Dray, “Slicing, threading \& parametric manifolds,”
1543
+ Gen. Rel. Grav. 27 (1995) 319–339, arXiv:gr-qc/9407020.
1544
+ [51] M. Alcubierre, Introduction to 3+1 Numerical Relativity. Oxford University Press, 04, 2008.
1545
+ https://doi.org/10.1093/acprof:oso/9780199205677.001.0001.
1546
+ [52] G. F. R. Ellis, “Relativistic cosmology,” Proc. Int. Sch. Phys. Fermi 47 (1971) 104–182.
1547
+ [53] G. F. R. Ellis and M. Bruni, “Covariant and Gauge Invariant Approach to Cosmological
1548
+ Density Fluctuations,” Phys. Rev. D 40 (1989) 1804–1818.
1549
+ [54] G. F. R. Ellis, J. Hwang, and M. Bruni, “Covariant and Gauge Independent Perfect Fluid
1550
+ Robertson-Walker Perturbations,” Phys. Rev. D 40 (1989) 1819–1826.
1551
+ [55] K. Subramanian, “Magnetic fields in the early universe,”
1552
+ Astron. Nachr. 331 (2010) 110–120, arXiv:0911.4771 [astro-ph.CO].
1553
+ [56] K. S. Thorne and D. MacDonald, “Electrodynamics in curved spacetime: 3 + 1 formulation,”
1554
+ Monthly Notices of the Royal Astronomical Society 198 no. 2, (02, 1982) 339–343.
1555
+ https://doi.org/10.1093/mnras/198.2.339.
1556
+ 30
1557
+
1558
+ [57] M. Blau, M. Borunda, M. O’Loughlin, and G. Papadopoulos, “Penrose limits and space-time
1559
+ singularities,” Class. Quant. Grav. 21 (2004) L43, arXiv:hep-th/0312029.
1560
+ [58] C. Hansraj, R. Goswami, and S. D. Maharaj, “A semi-tetrad decomposition of the Kerr
1561
+ spacetime,” arXiv:2109.04162 [gr-qc].
1562
+ 31
1563
+
4dFKT4oBgHgl3EQfRy0L/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
4tAzT4oBgHgl3EQfuv3m/content/tmp_files/2301.01697v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
4tAzT4oBgHgl3EQfuv3m/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
4tE2T4oBgHgl3EQfkAch/content/tmp_files/2301.03973v1.pdf.txt ADDED
@@ -0,0 +1,1195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.03973v1 [cs.IT] 10 Jan 2023
2
+ Performance of RIS-empowered NOMA-based D2D
3
+ Communication under Nakagami-m Fading
4
+ Mohd Hamza Naim Shaikh, ◦Sultangali Arzykulov, ◦Abdulkadir Celik, ◦Ahmed M. Eltawil, and G. Nauryzbayev
5
+ School of Engineering and Digital Sciences, Nazarbayev University, Nur-Sultan City, 010000, Kazakhstan
6
+ ◦CEMSE Division, King Abdullah University of Science and Technology, Thuwal, 23955, Saudi Arabia
7
+ Emails: {hamza.shaikh, galymzhan.nauryzbayev}@nu.edu.kz,
8
+ ◦{sultangali.arzykulov, abdulkadir.celik, ahmed.eltawil}@kaust.edu.sa
9
+ Abstract—Reconfigurable
10
+ intelligent
11
+ surfaces
12
+ (RISs)
13
+ have
14
+ sparked a renewed interest in the research community envi-
15
+ sioning future wireless communication networks. In this study,
16
+ we analyzed the performance of RIS-enabled non-orthogonal
17
+ multiple access (NOMA) based device-to-device (D2D) wireless
18
+ communication system, where the RIS is partitioned to serve a pair
19
+ of D2D users. Specifically, closed-form expressions are derived for
20
+ the upper and lower limits of spectral efficiency (SE) and energy
21
+ efficiency (EE). In addition, the performance of the proposed
22
+ NOMA-based system is also compared with its orthogonal counter-
23
+ part. Extensive simulation is done to corroborate the analytical
24
+ findings. The results demonstrate that RIS highly enhances the
25
+ performance of a NOMA-based D2D network.
26
+ Keywords— Device-to-device, energy efficiency, non-orthogonal
27
+ multiple access, RIS, spectral efficiency.
28
+ I. INTRODUCTION
29
+ Reconfigurable intelligent surfaces (RISs) have recently been
30
+ emerged as a revolutionary technique to realize the smart and
31
+ programmable wireless environment for the next generation 6G
32
+ systems [1]. Inherently, RIS consists of a planar array of large
33
+ number of passive reflecting elements (REs). These REs can
34
+ reflect the incident signal so that the reflected signal can be
35
+ aligned towards the desired location. Because of their potential
36
+ to transform a hostile wireless environment into an adaptive
37
+ and favorable propagating channel, RISs have received much
38
+ attention from the research community. RIS has the potential
39
+ to enhance spectral efficiency (SE) significantly, and energy
40
+ efficiency (EE) due to the large number of passive REs [2].
41
+ On the other hand, device-to-device (D2D) communication
42
+ is also considered a promising technology proposed in the
43
+ 5G standard that enables direct communications between D2D
44
+ users. In D2D communication, the same time-frequency re-
45
+ sources of cellular users are re-utilized by the D2D users,
46
+ thus, allowing massive access without aggravating the spectrum
47
+ crunch [3]. However, in D2D communication, a successful
48
+ transmission is highly reliant on the propagation environment
49
+ due to the limited power budget available at nodes. Unfortu-
50
+ nately, this restriction limits the applicability of D2D in many of
51
+ the existing scenarios, especially in dense urban environments.
52
+ Since RIS can adapt an unknown channel to a favorable prop-
53
+ agation environment, deploying RISs can effectively alleviate
54
+ this constraint [4]. In [5], the authors have optimized the RIS-
55
+ aided underlay D2D communication to maximize the capacity
56
+ by optimizing RIS phase shifts along with spectrum reuse and
57
+ Blockage
58
+ RIS
59
+ BS
60
+ g2
61
+ DR1
62
+ g1
63
+ g
64
+ DR2
65
+ DT
66
+ h
67
+ h1
68
+ h2
69
+ Fig. 1. Schematic for RIS-empowered D2D Communication.
70
+ transmit beamforming. A joint resource allocation to maximize
71
+ the sum rate of a RIS-assisted D2D underlay cellular network
72
+ was studied in [6]. Likewise, in [7], performance analysis for
73
+ RIS-assisted D2D communication was carried out for underlay
74
+ and overlay modes.
75
+ This paper investigates the performance of a RIS-empowered
76
+ NOMA-based D2D communication system. The proposed sce-
77
+ nario considers a downlink network, where a user nearby the
78
+ base station (BS), is utilized to serve as a D2D transmitter (DT),
79
+ facilitating the communication with a pair of users, i.e., D2D
80
+ receivers (DRs), which were otherwise not accessible by BS.
81
+ DT is deployed with RIS, which comprises M REs. To support
82
+ both DRs, a hard partitioning-based approach is utilized at
83
+ RIS. Unlike [7], where the point-to-point D2D communication
84
+ without a direct link was considered, we consider the novel
85
+ RIS-empowered NOMA-based D2D communication with both
86
+ direct and reflected links. This work’s main contribution can be
87
+ summarized as follows:
88
+ • We obtain the closed-form expressions for the upper and
89
+ lower bounds of ergodic rate for the NOMA pair of the
90
+ proposed RIS-enabled NOMA-based D2D communication
91
+ system. Initially, we formulate the received signal-to-
92
+ interference-plus-noise-ratio (SINR) and then utilize it for
93
+ deriving the closed-form expressions of SE and EE for
94
+ both the DRs;
95
+ • In addition, we illustrate the effect of the distribution
96
+ of REs, the power allocation ratio, and the Nakagami-m
97
+ fading parameters on network performance;
98
+ • Lastly, the proposed RIS-enabled NOMA-based D2D net-
99
+ work is compared to its corresponding OMA counterpart
100
+ and the case without RIS.
101
+
102
+ 围II. SYSTEM MODEL
103
+ Fig. 1 illustrates the system model where a single antenna
104
+ BS tries to communicate with a pair of blocked UEs, i.e.,
105
+ D2D receivers (DRs), denoted as DR1 and DR2. So, a D2D
106
+ transmitter (DT) is utilized to set a reliable communication link.
107
+ Further, the DT-to-DR transmission is assisted via RIS with
108
+ M number of REs. The system model can be regarded as a
109
+ connected D2D-enabled cellular system, where a cluster of UEs
110
+ are brought into coverage through the nearest connected UE1.
111
+ Further, without losing any generality, in this work, we focus
112
+ mainly on D2D communication, i.e., communication from DT
113
+ to DRs.
114
+ In order to support two DRs, RIS is partitioned in two
115
+ sub-RISs, each having M1 and M2 number of REs, with
116
+ M1 = η M, M2 = (1 − η) M, M1 + M2 = M and η
117
+ being the allocation parameter. Similar to [9], [10], a quasi-
118
+ static and flat fading channel is assumed with known channel
119
+ state information (CSI). Further, the BS-to-DT, DT-to-RIS and
120
+ RIS-to-DR channel links can either be line-of-sight (LoS), or
121
+ non-LoS (NLoS) and thus characterized through Nakagami-m
122
+ fading model [8]. The elements of g, g1 and g2 follow the
123
+ Nakagami-m fading model with m0, m1 and m2 as the fading
124
+ parameters. Similarly, the direct link between DT-to-DR is also
125
+ characterized through Nakagami-m fading channel with mhl as
126
+ fading parameter, where l = 1, 2.
127
+ In accordance with the NOMA and RIS concepts, the re-
128
+ ceived signals at DR1, r1, and DR2, r2, can be expressed as
129
+ r1 = (h1 + ¯g1 Φ1 g1 ) (β1 x1 + β2 x2)
130
+
131
+ Pr + No,
132
+ (1)
133
+ r2 = (h2 + ¯g2 Φ2 g2 ) (β1 x1 + β2 x2)
134
+
135
+ Pr + No,
136
+ (2)
137
+ where x1 and x2 represent the desired DR1 and DR2 signals,
138
+ respectively. Likewise, β1 and β2 is the power allocation
139
+ coefficient for DR1 and DR2. Further, β1 and β2 adhere to
140
+ the NOMA constraint, i.e., β2
141
+ 1
142
+ + β2
143
+ 2 = 1. Further, Pr and
144
+ No denote the transmit power at DT and the additive white
145
+ Gaussian noise (AWGN), with No ∈ CN(0, σ2).
146
+ Now the received signal of the DRs
147
+ can be maxi-
148
+ mized through proper phase shifting at the RIS. Mathe-
149
+ matically, |gΦ1g1| =
150
+ ����M1
151
+ i=1 giδiejθi gi
152
+ 1
153
+ ��� and |gΦ2g2| =
154
+ ����M2
155
+ i=1 giδi ejθigi
156
+ 2
157
+ ��� maximizes the received signal power at
158
+ DRs. Here, gi, gi
159
+ 1 and gi
160
+ 2 denotes the i-th element of g, g1
161
+ and g2, respectively. Thus, re-configuring θi to ˜θ maximize the
162
+ received power. The corresponding channel gain2 to ˜θ can be
163
+ expressed as
164
+ |H1|2 = |h1 + ¯g1Φ1g1|2 =
165
+
166
+ |h1| +
167
+ M1
168
+
169
+ i=1
170
+ ��¯gi
171
+ 1
172
+ �� ��gi
173
+ 1
174
+ ��
175
+ �2
176
+ ,
177
+ (3)
178
+ |H2|2 = |h2 + ¯g2Φ2g2|2 =
179
+
180
+ |h2| +
181
+ M2
182
+
183
+ i=1
184
+ ��¯gi
185
+ 2
186
+ �� ��gi
187
+ 2
188
+ ��
189
+ �2
190
+ .
191
+ (4)
192
+ 1There can be multiple users within the cluster, however, due to complexity
193
+ requirements, we restrict ourselves to the two-user case, i.e., two DRs [8].
194
+ 2Without losing any generality, δi = 1, ∀ i is assumed.
195
+ III. PERFORMANCE ANALYSIS
196
+ This section evaluates the bound on the ergodic rate of DRs.
197
+ Further, the SE and EE for RIS-enabled NOMA-based D2D is
198
+ formulated considering the fading parameter, power allocation,
199
+ and REs distribution. Initially, the SINR for both the DRs is
200
+ formulated and later on we utilize it in evaluating the SE and
201
+ EE.
202
+ Considering the DR1 signal as an interference, DR2 will
203
+ decode the received signal with the following SINR
204
+ SINRDR2 =
205
+ |H2|2 β2
206
+ 2 Pr
207
+ |H2|2 β2
208
+ 1 Pr + No
209
+ =
210
+ |H2|2 β2
211
+ 2 ρr
212
+ |H2|2 β2
213
+ 1 ρr + 1
214
+ ,
215
+ (5)
216
+ where ρr = Pr/No is transmit SNR at DT.
217
+ Likewise, at DR1, applying SIC, initially, DR1 will decode
218
+ the received signal of DR2. SINR for it can be expressed as
219
+ SINRDR1→DR2 =
220
+ |H1|2 β2
221
+ 2 ρr
222
+ |H1|2 β2
223
+ 1 ρr + σ2 .
224
+ (6)
225
+ After decoding and canceling the signal of DR2, DR1 can
226
+ decode its own signal with SINR of
227
+ SINRDR1 = β2
228
+ 1 |H1|2 ρr.
229
+ (7)
230
+ A. Channel Characterization
231
+ Now the channel gains, H1 and H2, as defined in (3) and
232
+ (4), respectively, do not conform to any known closed-form
233
+ distribution. Thus, for the sake of simplification of the analytical
234
+ performance, we can approximate G1 and G2 (for M1 ≫ 1
235
+ and M2 ≫ 1) as |H1|2 =
236
+ ��M1
237
+ i=1
238
+ ��¯gi
239
+ 1
240
+ �� ��gi
241
+ 1
242
+ ��
243
+ �2
244
+ and |H2|2 =
245
+ ��M2
246
+ i=1
247
+ ��¯gi
248
+ 2
249
+ �� ��gi
250
+ 2
251
+ ��
252
+ �2
253
+ , respectively. The distribution function for
254
+ the channel gain, |H|2, can be defined for g ≥ 0 as [11]
255
+ f|H|2(y) =
256
+ √ya
257
+ 2ba+1Γ(a + 1) exp
258
+
259
+
260
+ √y
261
+ b
262
+
263
+ ,
264
+ (8)
265
+ F|H|2(y) =
266
+ γ
267
+
268
+ a + 1,
269
+ √y
270
+ b
271
+
272
+ Γ (a + 1) √y .
273
+ (9)
274
+ Here,
275
+ a
276
+ and
277
+ b
278
+ are
279
+ the
280
+ variables
281
+ defined
282
+ as
283
+ a
284
+ =
285
+ m0Γ(m0)2MmlΓ(ml)2
286
+ m0Γ(m0)2mlΓ(ml)2−Γ(m0+ 1
287
+ 2 )2Γ(ml+ 1
288
+ 2 )2 − N − 1 and b
289
+ =
290
+ m0Γ(m0)2mlΓ(ml)2−Γ(m0+ 1
291
+ 2 )2Γ(ml+ 1
292
+ 2 )2
293
+ � m0
294
+ Ωg Γ(m0)Γ(m0+ 1
295
+ 2 )
296
+ � ml
297
+ Ωgl Γ(ml)Γ(ml+ 1
298
+ 2 ), with N ∈ {M1, M2}
299
+ and l ∈ {1, 2}, for H ∈ {H1, H2}. Further, Γ(·) represents
300
+ the Gamma function and γ(·, ·) indicates the lower incomplete
301
+ Gamma function.
302
+ B. Ergodic Rate
303
+ The ergodic rates for DR1 and DR2 can be formu-
304
+ lated as RDR1 = E [log2 (1 + SINRDR1)] and RDR2 =
305
+ E [log2 (1 + SINRDR2)],
306
+ respectively.
307
+ Since
308
+ the
309
+ channel
310
+ gain’s exact distribution is unknown, the expectations are math-
311
+ ematically intractable, and thus a closed-form expression may
312
+ not be derived. Hence, we resort to approximating the ergodic
313
+ rates of DRs with tight upper and lower bounds. Specifically,
314
+ the upper bound is derived by invoking Jensen’s inequality, and
315
+ the lower bound is derived by utilizing the approximate PDF
316
+
317
+ Rl
318
+ DR1 =
319
+ 1
320
+ ln (2) Γ(a + 1)
321
+
322
+ π csc (aπ/2) F1
323
+ (a + 2) (β1b)a+2 (ρr)
324
+ a
325
+ 2 +1 +
326
+ π sec (aπ/2) F1
327
+ (a + 1) (β1b)a+1 (ρr)
328
+ a+1
329
+ 2
330
+ +2 a (a − 1) ψ(0) (a + 1) +
331
+
332
+ a2 − a
333
+
334
+ Γ(a − 1) ln
335
+
336
+ b2 β2
337
+ 1 ρr
338
+
339
+ + Γ(a − 1) F3
340
+
341
+ (10)
342
+ Rl
343
+ DR2 =
344
+ 1
345
+ ln (2)Γ(a + 1)
346
+
347
+ π csc (aπ/2)
348
+ (a + 2) ba+2
349
+
350
+ F4
351
+ (c1ρr)
352
+ a
353
+ 2 +1 −
354
+ F5
355
+ (c2ρr)
356
+ a
357
+ 2 +1
358
+
359
+ + π sec (aπ/2)
360
+ (a + 1) ba+1
361
+
362
+ F6
363
+ (c1ρr)
364
+ a+1
365
+ 2
366
+
367
+ F7
368
+ (c2ρr)
369
+ a+1
370
+ 2
371
+
372
+ + Γ(a − 1) {F8 − F9} +
373
+
374
+ a2 − a
375
+
376
+ Γ(a − 1) ln
377
+ �c1
378
+ c2
379
+ ��
380
+ (11)
381
+ Ru
382
+ DR1 = log2
383
+
384
+ 1 + Ξ1 Ωh1 + M1 Ξ1 Ωm0 Ωm1 + M1 (M1 − 1) Ξ1
385
+ Ωm0
386
+ m0
387
+
388
+ Γ
389
+
390
+ m0 + 1
391
+ 2
392
+
393
+ Γ (m0)
394
+ �2
395
+ Ωm1
396
+ m1
397
+
398
+ Γ
399
+
400
+ m1 + 1
401
+ 2
402
+
403
+ Γ (m1)
404
+ �2
405
+ + 2 M1 Ξ1
406
+ Γ(mh1 + 1
407
+ 2)
408
+ Γ(mh1)
409
+
410
+ Ωmh1
411
+ mh1
412
+ Γ(m0 + 1
413
+ 2)
414
+ Γ(m0)
415
+
416
+ Ωm0
417
+ m0
418
+ Γ(m1 + 1
419
+ 2)
420
+ Γ(m1)
421
+
422
+ Ωm1
423
+ m1
424
+
425
+ (12)
426
+ Ru
427
+ DR2 = log2
428
+
429
+ 1 + Ξ2Ωh2 + M2Ξ2Ωm0Ωm2 + M2 (M2 − 1) Ξ2Ωm0
430
+
431
+ Γ
432
+
433
+ m0 + 1
434
+ 2
435
+ ��2 Ωm2
436
+
437
+ Γ
438
+
439
+ m2 + 1
440
+ 2
441
+ ��2
442
+ m0 {Γ (m0)}2 m2 {Γ (m2)}2
443
+ +
444
+ 2M2Ξ2Γ(mh2 + 1
445
+ 2)Γ(m0 + 1
446
+ 2)Γ(m2 + 1
447
+ 2)�Ωmh2 Ωm0Ωm2
448
+ Γ(mh2)Γ(m0)Γ(m2)√mh2m0m2
449
+
450
+ − log2 [1 + Ξ3Ωh2 + M2Ξ3Ωm0Ωm2
451
+ +M2 (M2−1) Ξ3Ωm0
452
+
453
+ Γ
454
+
455
+ m0+ 1
456
+ 2
457
+ ��2 Ωm2
458
+
459
+ Γ
460
+
461
+ m2+ 1
462
+ 2
463
+ ��2
464
+ m0 {Γ (m0)}2 m2 {Γ (m2)}2
465
+ +
466
+ 2M2Ξ3Γ(mh2 + 1
467
+ 2)Γ(m0+ 1
468
+ 2)Γ(m2+ 1
469
+ 2)�Ωmh2Ωm0Ωm2
470
+ Γ(mh2)Γ(m0)Γ(m2)√mh2m0m2
471
+
472
+ (13)
473
+ as described in (8). The bounds on the ergodic rate of DRs
474
+ are evaluated following a series of mathematical manipulations.
475
+ The following Lemmas present the upper and lower bound for
476
+ the proposed RIS-enabled NOMA-based D2D communication
477
+ system.
478
+ Lemma 1. The lower bound on the ergodic rates of DR1 and
479
+ DR2 can be expressed as in (10) and (11), shown on the top
480
+ of the next page.
481
+ Proof. The proof is presented in Appendix A.
482
+ Lemma 2. Likewise, the upper bound on the ergodic rate of
483
+ DR1 and DR2 can be expressed as in (12) and (13), shown on
484
+ the top of the page.
485
+ Proof. The proof is presented in Appendix B.
486
+ C. SE and EE
487
+ Based on the ergodic rate established in the preceding
488
+ subsection, SE of RIS-enabled NOMA-based D2D can be
489
+ described as SE = RDR1 + RDR2. Similarly, the EE can be
490
+ defined as the ratio of the SE to the total power utilized, Ptot,
491
+ in bits/Joule/Hz. Ptot consists of the power utilized by the BS,
492
+ DT, RIS, and DRs. Thus, the EE may be represented as
493
+ EE = SE
494
+ Ptot
495
+ =
496
+ SE
497
+ (1 + α)Pr + MPRE + 2PU
498
+ ,
499
+ (14)
500
+ where Pr denotes the static power consumption of DT. Like-
501
+ wise, αPr is the dynamic power consumption at DT. Further,
502
+ PRE denotes the power consumed by each of the RE and PU
503
+ is the power utilized by DR.
504
+ 0
505
+ 5
506
+ 10
507
+ 15
508
+ 20
509
+ 25
510
+ 30
511
+ 6
512
+ 8
513
+ 10
514
+ 12
515
+ 14
516
+ 16
517
+ 18
518
+ 20
519
+ 22
520
+ 24
521
+ 26
522
+ Transmit SNR (dB)
523
+ SE (bps/Hz)
524
+
525
+
526
+ Exact − Simulation
527
+ Lower Bound
528
+ Upper Bound
529
+ M = 200, 100, 50, 20, 10
530
+ Fig. 2. SE versus transmit SNR with varying M.
531
+ IV. SIMULATION RESULT
532
+ This section presents the simulation and analytical results
533
+ for the proposed RIS-empowered NOMA-based D2D commu-
534
+ nication system. For the direct links, the Nakagami fading
535
+ parameters are assumed to be mh1 = mh2 = 2 and, for the RIS
536
+ reflected links, m0 = m1 = m2 = 5, respectively. Likewise,
537
+ the power allocation factor for DR1 is β2
538
+ 1 = 0.3 while for DR2
539
+ is β2
540
+ 2 = 0.7, if not specified otherwise. Additionally, the value
541
+ of RE allocation parameter η is assumed to be 0.5.
542
+ Fig. 2 shows the SE results for the proposed RIS-empowered
543
+ NOMA-based D2D communication. Specifically, it shows SE
544
+ with respect to the transmit power while comparing the simu-
545
+ lation and analytical results. These results can easily infer the
546
+ following observations: 1) Apart from smaller M, analytical
547
+ SE is quite precise compared to simulation-based SE. 2) Due
548
+
549
+ 0
550
+ 20
551
+ 40
552
+ 60
553
+ 80
554
+ 100
555
+ 120
556
+ 140
557
+ 160
558
+ 180
559
+ 200
560
+ 4
561
+ 6
562
+ 8
563
+ 10
564
+ 12
565
+ 14
566
+ 16
567
+ 18
568
+ 20
569
+ 22
570
+ 24
571
+ 26
572
+ No. of REs (M)
573
+ SE (bps/Hz)
574
+
575
+
576
+ NOMA − Exact Simulation
577
+ OMA − Exact Simulation
578
+ NOMA − Lower Bound
579
+ OMA − Lower Bound
580
+ NOMA − Upper Bound
581
+ OMA − Upper Bound
582
+ ρr = 10, 20, 30 dB
583
+ Fig. 3. SE of NOMA-/OMA-based D2D with respect to M.
584
+ 0
585
+ 5
586
+ 10
587
+ 15
588
+ 20
589
+ 25
590
+ 30
591
+ 0
592
+ 0.5
593
+ 1
594
+ 1.5
595
+ 2
596
+ 2.5
597
+ 3
598
+ 3.5
599
+ 4
600
+ 4.5
601
+ Transmit SNR (dB)
602
+ EE (bits/Hz/Joule)
603
+
604
+
605
+ NOMA
606
+ OMA
607
+ M = 20, 50, 200
608
+ Fig. 4. EE versus transmit SNR at different M.
609
+ to the multiplicative path-loss, for less number of REs, i.e.,
610
+ smaller M, the received power from the direct link is signif-
611
+ icant. However, as the number of REs increases, the received
612
+ power from a RIS-reflected link is much more than the power
613
+ received from the direct link to the extent that it can safely
614
+ be ignored. Thus, it can easily be inferred from the analytical
615
+ and simulation framework that the received signal power from
616
+ the direct link is relatively insignificant and can be ignored as
617
+ compared to the received power from the RIS-reflected link.
618
+ Fig. 3 shows the SE of the proposed RIS-empowered D2D
619
+ communication system for both NOMA and OMA scenarios.
620
+ Specifically, the SE of both NOMA and OMA scenarios is
621
+ plotted with respect to the number of REs for different SNRs.
622
+ It can easily be observed here that, instead of increasing the
623
+ transmit power, the number of REs can be increased to get the
624
+ same SE. In other words, for fixed required SE, we can tradeoff
625
+ the transmit power with the number of REs. As D2D users
626
+ are usually power constrained, RIS-empowered D2D commu-
627
+ nication can be a viable alternative to cut down the transmit
628
+ power and improve network EE. Further, as evident from the
629
+ result, RIS-empowered NOMA-based D2D is more spectrally
630
+ efficient as compared to OMA-based D2D. For instance, SE at
631
+ 20 dB SNR and M = 20 is 15.26 bps/Hz for NOMA and 14.76
632
+ bps/Hz for OMA, respectively. The NOMA gain will increase
633
+ when the channel gain between UEs increases. Further, SE also
634
+ improves with the number of REs, as evident from the result.
635
+ Likewise, SE grows as the transmit power goes up.
636
+ Fig. 4 shows EE of the proposed RIS-empowered D2D
637
+ communication system, where EE of both NOMA and OMA
638
+ scenarios is plotted with respect to the transmit power for
639
+ the varying number of REs. Further, it can be inferred that
640
+ RIS-empowered D2D is energy-efficient as compared to OMA.
641
+ Further, it can also be observed that the EE increases with
642
+ the number of REs, whereas EE decreases as the transmit
643
+ power increases. This is because SE increases linearly while
644
+ the transmit power increases logarithmically; thus, the overall
645
+ compounding impact decreases EE while increasing transmit
646
+ SNR. In addition, EE is likewise saturated for a large number of
647
+ REs, and no further gains are observed. The result also demon-
648
+ strates that increasing the number of REs does not improve
649
+ performance, as SE increases while EE becomes saturated.
650
+ Thus, it can be inferred that RIS improves the SE and EE
651
+ performance of the D2D system.
652
+ V. CONCLUSION
653
+ In this paper, we investigated the performance of a RIS-
654
+ empowered NOMA-based D2D communication system. Specif-
655
+ ically, we derived the closed-form expressions for SE’s upper
656
+ and lower bounds. As shown through the results, apart from
657
+ the smaller values of the number of REs, the bounds are pretty
658
+ tight and converge to exact SE, even for moderate REs. Further,
659
+ we have also investigated the EE performance. Since the D2D
660
+ devices are usually power-constrained, the results show that
661
+ the transmit power can be a tradeoff with the number of REs
662
+ at RIS. Additionally, the results are also compared with the
663
+ OMA scenario, where it has been shown that NOMA-based
664
+ D2D outperforms the OMA-based case.
665
+ VI. ACKNOWLEDGEMENT
666
+ This work was supported by the Nazarbayev University CRP
667
+ Grant no. 11022021CRP1513.
668
+ APPENDIX A
669
+ PROOF OF LEMMA 1
670
+ The ergodic rate of DR1 can be formulated as
671
+ RDR1 =
672
+ 1
673
+ ln (2)
674
+
675
+
676
+ 0
677
+ ln
678
+
679
+ 1 + β2
680
+ 1 |y|2 ρr
681
+
682
+ f|H|2(y)dy
683
+
684
+ ��
685
+
686
+ J1
687
+ .
688
+ (15)
689
+ Further, using (8) and the below relation ln(t)
690
+ =
691
+ (t −
692
+ 1) 2F1 (1, 1; 2; 1 − t), J1 in (15) can be modified as
693
+ J1 =
694
+ 1
695
+ Γ(a + 1)
696
+
697
+
698
+ 0
699
+ √g 2F1 (1, 1; 2; −g)
700
+ 2ba+1
701
+ e
702
+
703
+
704
+ √g
705
+ b
706
+
707
+ dg.
708
+ (16)
709
+ Here, 2F1(·, ·; ·; ·) represents the Gauss hyper-geometric func-
710
+ tion. Now, this J1 can be solved utilizing [11, Theorem 3] and
711
+ substituted in (15). After rearranging the terms, the ergodic
712
+ rate for DR1 can be given as shown in (11), where F1 =
713
+ 1F2
714
+
715
+ 1+ a
716
+ 2; 3
717
+ 2, 2+ a
718
+ 2;
719
+ −1
720
+ 4b2β2
721
+ 1ρr
722
+
723
+ , F2 = 1F2
724
+
725
+ a+1
726
+ 2 ; 1
727
+ 2, a+3
728
+ 2 ;
729
+ −1
730
+ 4b2β2
731
+ 1ρr
732
+
733
+ and F3 = 2F3
734
+
735
+ 1, 1; 2, 1− a
736
+ 2, 3−a
737
+ 2 ;
738
+ −1
739
+ 4b2β2
740
+ 1ρr
741
+
742
+
743
+ Likewise, the ergodic rate for DR2, RDR2, can be given by
744
+ RDR2 =
745
+ 1
746
+ ln (2)
747
+
748
+
749
+
750
+
751
+
752
+ 0
753
+ ln
754
+
755
+ 1 + c1 |y|2 ρr
756
+
757
+ f|H|2(y)dy
758
+
759
+
760
+
761
+ 0
762
+ ln
763
+
764
+ 1 + c2 |y|2 ρr
765
+
766
+ f|H|2(y)dy
767
+
768
+
769
+  ,
770
+ (17)
771
+ where c1 and c2 are defined as c1 = β2
772
+ 1 + β2
773
+ 2 and c2 = β2
774
+ 1.
775
+ It
776
+ can
777
+ be evaluated similarly
778
+ to
779
+ J1. After
780
+ rearranging
781
+ the terms, the ergodic rate for DR2 can be given as
782
+ shown in (12), where F4 =
783
+ 1F2
784
+
785
+ 1+ a
786
+ 2; 3
787
+ 2, 2+ a
788
+ 2;
789
+ −1
790
+ 4b2c1ρr
791
+
792
+ ,
793
+ F5
794
+ =
795
+ 1F2
796
+
797
+ 1+ a
798
+ 2; 3
799
+ 2, 2+ a
800
+ 2;
801
+ −1
802
+ 4b2c2ρr
803
+
804
+ ,
805
+ F6
806
+ =
807
+ 1F2
808
+
809
+ a+1
810
+ 2 ; 1
811
+ 2, a+3
812
+ 2 ;
813
+ −1
814
+ 4b2c1ρr
815
+
816
+ , F7 = 1F2
817
+
818
+ a+1
819
+ 2 ; 1
820
+ 2, a+3
821
+ 2 ;
822
+ −1
823
+ 4b2c2ρr
824
+
825
+ ,
826
+ F8
827
+ =
828
+ 2F3
829
+
830
+ 1, 1; 2, 1− a
831
+ 2, 3−a
832
+ 2 ;
833
+ −1
834
+ 4b2c1ρr
835
+
836
+ and
837
+ F9
838
+ =
839
+ 2F3
840
+
841
+ 1, 1; 2, 1− a
842
+ 2, 3−a
843
+ 2 ;
844
+ −1
845
+ 4b2c2ρr
846
+
847
+ . This completes the proof of
848
+ Lemma 1.
849
+ APPENDIX B
850
+ PROOF OF LEMMA 2
851
+ Applying Jensen’s inequality, we define the upper bound for
852
+ DR1 as Ru
853
+ DR1, where RDR1 ≤ Ru
854
+ DR1, with Ξ1 = β2
855
+ 1ρr, as
856
+ Ru
857
+ DR1 = log2
858
+
859
+ 1 + Ξ1E
860
+
861
+ |H1|2��
862
+ .
863
+ (18)
864
+ To calculate E
865
+
866
+ |H1|2�
867
+ , we apply the binomial expansion
868
+ theorem (BET) as
869
+ E
870
+
871
+ |H1|2�
872
+ = E
873
+
874
+
875
+ �����h1 +
876
+ M1
877
+
878
+ i=1
879
+ ��¯gi
880
+ 1
881
+ �� ��gi
882
+ 1
883
+ ��
884
+ �����
885
+ 2
886
+  = E
887
+
888
+ |h1|2�
889
+
890
+ ��
891
+
892
+ E1
893
+ + E
894
+
895
+
896
+
897
+ � M1
898
+
899
+ i=1
900
+ ��¯gi
901
+ 1
902
+ �� ��gi
903
+ 1
904
+ ��
905
+ �2
906
+
907
+
908
+
909
+ ��
910
+
911
+ E2
912
+ +2 E
913
+ � M1
914
+
915
+ i=1
916
+ ��¯gi
917
+ 1
918
+ �� ��gi
919
+ 1
920
+ �� |h1|
921
+
922
+
923
+ ��
924
+
925
+ E3
926
+ . (19)
927
+ Now, we have E1 = Ωh1. Likewise, to calculate E2, we apply
928
+ BET again; thus, on expanding, E2 can be expressed as
929
+ E
930
+ � M1
931
+
932
+ i=1
933
+ ��¯gi
934
+ 1
935
+ ��2 ��gi
936
+ 1
937
+ ��2
938
+
939
+ + E
940
+
941
+
942
+
943
+
944
+
945
+ M1
946
+
947
+ i=1
948
+ M1
949
+
950
+ i=1
951
+ j̸=i
952
+ ��¯gi
953
+ 1
954
+ �� ��gi
955
+ 1
956
+ ��
957
+ ���¯gj
958
+ 1
959
+ ���
960
+ ���gj
961
+ 1
962
+ ���
963
+
964
+
965
+
966
+
967
+
968
+ , (20)
969
+ where E
970
+ ��M1
971
+ i=1
972
+ ��¯gi
973
+ 1
974
+ ��2 ��gi
975
+ 1
976
+ ��2�
977
+ =
978
+ M1Ωm0Ωm1. Further, for
979
+ E
980
+ ��M1
981
+ i=1
982
+ �M1
983
+ i=1
984
+ j̸=i
985
+ ��¯gi
986
+ 1
987
+ �� ��gi
988
+ 1
989
+ ��
990
+ ���¯gj
991
+ 1
992
+ ���
993
+ ���gj
994
+ 1
995
+ ���
996
+
997
+ ,
998
+ the
999
+ expected
1000
+ value of
1001
+ a
1002
+ Nakagami-m
1003
+ variable
1004
+ can
1005
+ be
1006
+ given
1007
+ as
1008
+ E{|g1|}
1009
+ =
1010
+ Γ(m1+ 1
1011
+ 2 )
1012
+ Γ(m1)
1013
+ �� Ωm1
1014
+ m1
1015
+
1016
+ . Since ¯gi
1017
+ 1 and gi
1018
+ 1 are mutually independent,
1019
+ we can have
1020
+ E
1021
+
1022
+
1023
+
1024
+
1025
+
1026
+
1027
+
1028
+ M1
1029
+
1030
+ i=1
1031
+ M1
1032
+
1033
+ j=1
1034
+ j̸=i
1035
+ ��¯gi
1036
+ 1
1037
+ �� ��gi
1038
+ 1
1039
+ ��
1040
+ ���¯gj
1041
+ 1
1042
+ ���
1043
+ ���gj
1044
+ 2
1045
+ ���
1046
+
1047
+
1048
+
1049
+
1050
+
1051
+
1052
+
1053
+ = M1 (M1 − 1)
1054
+ �Ωm0
1055
+ m0
1056
+
1057
+ ×
1058
+
1059
+ Γ
1060
+
1061
+ m0 + 1
1062
+ 2
1063
+ ��2 �
1064
+ Γ
1065
+
1066
+ m1 + 1
1067
+ 2
1068
+ ��2
1069
+ {Γ (m0)}2 {Γ (m1)}2
1070
+ �Ωm1
1071
+ m1
1072
+
1073
+ .
1074
+ (21)
1075
+ Likewise, E3 can be calculated as
1076
+ E3 = M Γ
1077
+
1078
+ mh1 + 1
1079
+ 2
1080
+
1081
+ Γ
1082
+
1083
+ m0 + 1
1084
+ 2
1085
+
1086
+ Γ
1087
+
1088
+ m1 + 1
1089
+ 2
1090
+
1091
+ ×
1092
+
1093
+ Ωmh1 Ωm0Ωm1
1094
+ mh1m0m1
1095
+ / [Γ (mh1) Γ (m0) Γ (m1)] .
1096
+ (22)
1097
+ Finally, putting E1, E2 and E3 all together yields E
1098
+
1099
+ |H1|2�
1100
+ which can be put in (18) to give the desired upper bound as
1101
+ shown in (27).
1102
+ Likewise, the upper bound on the ergodic rate of DR2, Ru
1103
+ DR2
1104
+ can be defined as RDR2 ≤ Ru
1105
+ DR2, where RDR2 can be defined
1106
+ as
1107
+ RDR2 = E
1108
+
1109
+ log2
1110
+
1111
+ 1 + |H2|2 �
1112
+ β2
1113
+ 1 + β2
1114
+ 2
1115
+
1116
+ ρr
1117
+ 1 + |H2|2 β2
1118
+ 1ρr
1119
+ ��
1120
+ ,
1121
+ = E
1122
+
1123
+ log2
1124
+
1125
+ 1 + Ξ2 |H2|2�
1126
+ − log2
1127
+
1128
+ 1 + Ξ3 |H2|2��
1129
+ ,
1130
+ (23)
1131
+ where Ξ2 =
1132
+
1133
+ β2
1134
+ 1 + β2
1135
+ 2
1136
+
1137
+ ρr and Ξ3 = β2
1138
+ 1ρr. Thus, Ru
1139
+ DR2 can be
1140
+ defined as
1141
+ Ru
1142
+ DR2=log2
1143
+
1144
+ 1+Ξ2E
1145
+
1146
+ |H2|2��
1147
+ −log2
1148
+
1149
+ 1+Ξ3E
1150
+
1151
+ |H2|2��
1152
+ .
1153
+ (24)
1154
+ Similar to E
1155
+
1156
+ |H1|2�
1157
+ , E
1158
+
1159
+ |H2|2�
1160
+ can be evaluated. After sub-
1161
+ stituting and rearranging the terms, Ru
1162
+ DR2 is given in (13).
1163
+ REFERENCES
1164
+ [1] E. Basar, M. Di Renzo, J. De Rosny, M. Debbah, M.-S. Alouini, and
1165
+ R. Zhang, “Wireless communications through reconfigurable intelligent
1166
+ surfaces,” IEEE Access, vol. 7, pp. 116 753–116 773, 2019.
1167
+ [2] S. Hu, F. Rusek, and O. Edfors, “Beyond massive MIMO: The potential
1168
+ of data transmission with large intelligent surfaces,” IEEE Trans. Signal
1169
+ Process., vol. 66, no. 10, pp. 2746–2758, 2018.
1170
+ [3] A. Asadi, Q. Wang, and V. Mancuso, “A survey on device-to-device
1171
+ communication in cellular networks,” IEEE Commun. Surv. Tutor., vol. 16,
1172
+ no. 4, pp. 1801–1819, 2014.
1173
+ [4] C. Zhang et al., “Distributed intelligent reflecting surfaces-aided device-
1174
+ to-device communications system,” J. Commun. Inf. Netw., vol. 6, no. 3,
1175
+ pp. 197–207, Sept. 2021.
1176
+ [5] Y. Cao et al., “Sum-rate maximization for multi-reconfigurable intelligent
1177
+ surface-assisted device-to-device communications,” IEEE Trans. Com-
1178
+ mun., vol. 69, no. 11, pp. 7283–7296, Nov. 2021.
1179
+ [6] G. Yang et al., “Reconfigurable intelligent surface empowered device-
1180
+ to-device communication underlaying cellular networks,” IEEE Trans.
1181
+ Commun., vol. 69, no. 11, pp. 7790–7805, Nov. 2021.
1182
+ [7] Y. Ni et al., “Performance analysis for RIS-assisted D2D communication
1183
+ under nakagami-m fading,” IEEE Trans. Veh. Technol., vol. 70, no. 6, pp.
1184
+ 5865–5879, Jun. 2021.
1185
+ [8] Y. Cheng et al., “Downlink and uplink intelligent reflecting surface aided
1186
+ networks: NOMA and OMA,” IEEE Trans. Wirel. Commun., pp. 1–1,
1187
+ 2021.
1188
+ [9] J. Zhu et al., “Power efficient IRS-assisted NOMA,” IEEE Trans. Com-
1189
+ mun., vol. 69, no. 2, pp. 900–913, 2021.
1190
+ [10] Z. Ding and H. Vincent Poor, “A simple design of IRS-NOMA transmis-
1191
+ sion,” IEEE Commun. Lett., vol. 24, no. 5, pp. 1119–1123, 2020.
1192
+ [11] M. H. Samuh, A. M. Salhab, and A. H. A. El-Malek, “Performance
1193
+ analysis and optimization of RIS-assisted networks in Nakagami-m envi-
1194
+ ronment,” arXiv preprint arXiv:2010.07841, 2020.
1195
+
4tE2T4oBgHgl3EQfkAch/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,485 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf,len=484
2
+ page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
3
+ page_content='03973v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
4
+ page_content='IT] 10 Jan 2023 Performance of RIS-empowered NOMA-based D2D Communication under Nakagami-m Fading Mohd Hamza Naim Shaikh, ◦Sultangali Arzykulov, ◦Abdulkadir Celik, ◦Ahmed M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
5
+ page_content=' Eltawil, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
6
+ page_content=' Nauryzbayev School of Engineering and Digital Sciences, Nazarbayev University, Nur-Sultan City, 010000, Kazakhstan CEMSE Division, King Abdullah University of Science and Technology, Thuwal, 23955, Saudi Arabia Emails: {hamza.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
7
+ page_content='shaikh, galymzhan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
8
+ page_content='nauryzbayev}@nu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
9
+ page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
10
+ page_content='kz, {sultangali.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
11
+ page_content='arzykulov, abdulkadir.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
12
+ page_content='celik, ahmed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
13
+ page_content='eltawil}@kaust.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
14
+ page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
15
+ page_content='sa Abstract—Reconfigurable intelligent surfaces (RISs) have sparked a renewed interest in the research community envi- sioning future wireless communication networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
16
+ page_content=' In this study, we analyzed the performance of RIS-enabled non-orthogonal multiple access (NOMA) based device-to-device (D2D) wireless communication system, where the RIS is partitioned to serve a pair of D2D users.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
17
+ page_content=' Specifically, closed-form expressions are derived for the upper and lower limits of spectral efficiency (SE) and energy efficiency (EE).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
18
+ page_content=' In addition, the performance of the proposed NOMA-based system is also compared with its orthogonal counter- part.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
19
+ page_content=' Extensive simulation is done to corroborate the analytical findings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
20
+ page_content=' The results demonstrate that RIS highly enhances the performance of a NOMA-based D2D network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
21
+ page_content=' Keywords— Device-to-device, energy efficiency, non-orthogonal multiple access, RIS, spectral efficiency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
22
+ page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
23
+ page_content=' INTRODUCTION Reconfigurable intelligent surfaces (RISs) have recently been emerged as a revolutionary technique to realize the smart and programmable wireless environment for the next generation 6G systems [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
24
+ page_content=' Inherently, RIS consists of a planar array of large number of passive reflecting elements (REs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
25
+ page_content=' These REs can reflect the incident signal so that the reflected signal can be aligned towards the desired location.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
26
+ page_content=' Because of their potential to transform a hostile wireless environment into an adaptive and favorable propagating channel, RISs have received much attention from the research community.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
27
+ page_content=' RIS has the potential to enhance spectral efficiency (SE) significantly, and energy efficiency (EE) due to the large number of passive REs [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
28
+ page_content=' On the other hand, device-to-device (D2D) communication is also considered a promising technology proposed in the 5G standard that enables direct communications between D2D users.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
29
+ page_content=' In D2D communication, the same time-frequency re- sources of cellular users are re-utilized by the D2D users, thus, allowing massive access without aggravating the spectrum crunch [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
30
+ page_content=' However, in D2D communication, a successful transmission is highly reliant on the propagation environment due to the limited power budget available at nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
31
+ page_content=' Unfortu- nately, this restriction limits the applicability of D2D in many of the existing scenarios, especially in dense urban environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
32
+ page_content=' Since RIS can adapt an unknown channel to a favorable prop- agation environment, deploying RISs can effectively alleviate this constraint [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
33
+ page_content=' In [5], the authors have optimized the RIS- aided underlay D2D communication to maximize the capacity by optimizing RIS phase shifts along with spectrum reuse and Blockage RIS BS g2 DR1 g1 g DR2 DT h h1 h2 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
34
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
35
+ page_content=' Schematic for RIS-empowered D2D Communication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
36
+ page_content=' transmit beamforming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
37
+ page_content=' A joint resource allocation to maximize the sum rate of a RIS-assisted D2D underlay cellular network was studied in [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
38
+ page_content=' Likewise, in [7], performance analysis for RIS-assisted D2D communication was carried out for underlay and overlay modes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
39
+ page_content=' This paper investigates the performance of a RIS-empowered NOMA-based D2D communication system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
40
+ page_content=' The proposed sce- nario considers a downlink network, where a user nearby the base station (BS), is utilized to serve as a D2D transmitter (DT), facilitating the communication with a pair of users, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
41
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
42
+ page_content=', D2D receivers (DRs), which were otherwise not accessible by BS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
43
+ page_content=' DT is deployed with RIS, which comprises M REs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
44
+ page_content=' To support both DRs, a hard partitioning-based approach is utilized at RIS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
45
+ page_content=' Unlike [7], where the point-to-point D2D communication without a direct link was considered, we consider the novel RIS-empowered NOMA-based D2D communication with both direct and reflected links.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
46
+ page_content=' This work’s main contribution can be summarized as follows: We obtain the closed-form expressions for the upper and lower bounds of ergodic rate for the NOMA pair of the proposed RIS-enabled NOMA-based D2D communication system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
47
+ page_content=' Initially, we formulate the received signal-to- interference-plus-noise-ratio (SINR) and then utilize it for deriving the closed-form expressions of SE and EE for both the DRs;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
48
+ page_content=' In addition, we illustrate the effect of the distribution of REs, the power allocation ratio, and the Nakagami-m fading parameters on network performance;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
49
+ page_content=' Lastly, the proposed RIS-enabled NOMA-based D2D net- work is compared to its corresponding OMA counterpart and the case without RIS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
50
+ page_content=' 围II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
51
+ page_content=' SYSTEM MODEL Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
52
+ page_content=' 1 illustrates the system model where a single antenna BS tries to communicate with a pair of blocked UEs, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
53
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
54
+ page_content=', D2D receivers (DRs), denoted as DR1 and DR2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
55
+ page_content=' So, a D2D transmitter (DT) is utilized to set a reliable communication link.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
56
+ page_content=' Further, the DT-to-DR transmission is assisted via RIS with M number of REs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
57
+ page_content=' The system model can be regarded as a connected D2D-enabled cellular system, where a cluster of UEs are brought into coverage through the nearest connected UE1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
58
+ page_content=' Further, without losing any generality, in this work, we focus mainly on D2D communication, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
59
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
60
+ page_content=', communication from DT to DRs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
61
+ page_content=' In order to support two DRs, RIS is partitioned in two sub-RISs, each having M1 and M2 number of REs, with M1 = η M, M2 = (1 − η) M, M1 + M2 = M and η being the allocation parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
62
+ page_content=' Similar to [9], [10], a quasi- static and flat fading channel is assumed with known channel state information (CSI).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
63
+ page_content=' Further, the BS-to-DT, DT-to-RIS and RIS-to-DR channel links can either be line-of-sight (LoS), or non-LoS (NLoS) and thus characterized through Nakagami-m fading model [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
64
+ page_content=' The elements of g, g1 and g2 follow the Nakagami-m fading model with m0, m1 and m2 as the fading parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
65
+ page_content=' Similarly, the direct link between DT-to-DR is also characterized through Nakagami-m fading channel with mhl as fading parameter, where l = 1, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
66
+ page_content=' In accordance with the NOMA and RIS concepts, the re- ceived signals at DR1, r1, and DR2, r2, can be expressed as r1 = (h1 + ¯g1 Φ1 g1 ) (β1 x1 + β2 x2) � Pr + No, (1) r2 = (h2 + ¯g2 Φ2 g2 ) (β1 x1 + β2 x2) � Pr + No, (2) where x1 and x2 represent the desired DR1 and DR2 signals, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
67
+ page_content=' Likewise, β1 and β2 is the power allocation coefficient for DR1 and DR2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
68
+ page_content=' Further, β1 and β2 adhere to the NOMA constraint, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
69
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
70
+ page_content=', β2 1 + β2 2 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
71
+ page_content=' Further, Pr and No denote the transmit power at DT and the additive white Gaussian noise (AWGN), with No ∈ CN(0, σ2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
72
+ page_content=' Now the received signal of the DRs can be maxi- mized through proper phase shifting at the RIS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
73
+ page_content=' Mathe- matically, |gΦ1g1| = ����M1 i=1 giδiejθi gi 1 ��� and |gΦ2g2| = ����M2 i=1 giδi ejθigi 2 ��� maximizes the received signal power at DRs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
74
+ page_content=' Here, gi, gi 1 and gi 2 denotes the i-th element of g, g1 and g2, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
75
+ page_content=' Thus, re-configuring θi to ˜θ maximize the received power.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
76
+ page_content=' The corresponding channel gain2 to ˜θ can be expressed as |H1|2 = |h1 + ¯g1Φ1g1|2 = � |h1| + M1 � i=1 ��¯gi 1 �� ��gi 1 �� �2 , (3) |H2|2 = |h2 + ¯g2Φ2g2|2 = � |h2| + M2 � i=1 ��¯gi 2 �� ��gi 2 �� �2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
77
+ page_content=' (4) 1There can be multiple users within the cluster, however, due to complexity requirements, we restrict ourselves to the two-user case, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
78
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
79
+ page_content=', two DRs [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
80
+ page_content=' 2Without losing any generality, δi = 1, ∀ i is assumed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
81
+ page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
82
+ page_content=' PERFORMANCE ANALYSIS This section evaluates the bound on the ergodic rate of DRs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
83
+ page_content=' Further, the SE and EE for RIS-enabled NOMA-based D2D is formulated considering the fading parameter, power allocation, and REs distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
84
+ page_content=' Initially, the SINR for both the DRs is formulated and later on we utilize it in evaluating the SE and EE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
85
+ page_content=' Considering the DR1 signal as an interference, DR2 will decode the received signal with the following SINR SINRDR2 = |H2|2 β2 2 Pr |H2|2 β2 1 Pr + No = |H2|2 β2 2 ρr |H2|2 β2 1 ρr + 1 , (5) where ρr = Pr/No is transmit SNR at DT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
86
+ page_content=' Likewise, at DR1, applying SIC, initially, DR1 will decode the received signal of DR2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
87
+ page_content=' SINR for it can be expressed as SINRDR1→DR2 = |H1|2 β2 2 ρr |H1|2 β2 1 ρr + σ2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
88
+ page_content=' (6) After decoding and canceling the signal of DR2, DR1 can decode its own signal with SINR of SINRDR1 = β2 1 |H1|2 ρr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
89
+ page_content=' (7) A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
90
+ page_content=' Channel Characterization Now the channel gains, H1 and H2, as defined in (3) and (4), respectively, do not conform to any known closed-form distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
91
+ page_content=' Thus, for the sake of simplification of the analytical performance, we can approximate G1 and G2 (for M1 ≫ 1 and M2 ≫ 1) as |H1|2 = ��M1 i=1 ��¯gi 1 �� ��gi 1 �� �2 and |H2|2 = ��M2 i=1 ��¯gi 2 �� ��gi 2 �� �2 , respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
92
+ page_content=' The distribution function for the channel gain, |H|2, can be defined for g ≥ 0 as [11] f|H|2(y) = √ya 2ba+1Γ(a + 1) exp � − √y b � , (8) F|H|2(y) = γ � a + 1, √y b � Γ (a + 1) √y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
93
+ page_content=' (9) Here, a and b are the variables defined as a = m0Γ(m0)2MmlΓ(ml)2 m0Γ(m0)2mlΓ(ml)2−Γ(m0+ 1 2 )2Γ(ml+ 1 2 )2 − N − 1 and b = m0Γ(m0)2mlΓ(ml)2−Γ(m0+ 1 2 )2Γ(ml+ 1 2 )2 � m0 Ωg Γ(m0)Γ(m0+ 1 2 ) � ml Ωgl Γ(ml)Γ(ml+ 1 2 ), with N ∈ {M1, M2} and l ∈ {1, 2}, for H ∈ {H1, H2}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
94
+ page_content=' Further, Γ(·) represents the Gamma function and γ(·, ·) indicates the lower incomplete Gamma function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
95
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
96
+ page_content=' Ergodic Rate The ergodic rates for DR1 and DR2 can be formu- lated as RDR1 = E [log2 (1 + SINRDR1)] and RDR2 = E [log2 (1 + SINRDR2)], respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
97
+ page_content=' Since the channel gain’s exact distribution is unknown, the expectations are math- ematically intractable, and thus a closed-form expression may not be derived.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
98
+ page_content=' Hence, we resort to approximating the ergodic rates of DRs with tight upper and lower bounds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
99
+ page_content=' Specifically,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
100
+ page_content=' the upper bound is derived by invoking Jensen’s inequality,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
101
+ page_content=' and ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
102
+ page_content='the lower bound is derived by utilizing the approximate PDF ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
103
+ page_content='Rl ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
104
+ page_content='DR1 = ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
105
+ page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
106
+ page_content='ln (2) Γ(a + 1) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
107
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
108
+ page_content='π csc (aπ/2) F1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
109
+ page_content='(a + 2) (β1b)a+2 (ρr) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
110
+ page_content='a ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
111
+ page_content='2 +1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
112
+ page_content='π sec (aπ/2) F1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
113
+ page_content='(a + 1) (β1b)a+1 (ρr) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
114
+ page_content='a+1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
115
+ page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
116
+ page_content='+2 a (a − 1) ψ(0) (a + 1) + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
117
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
118
+ page_content='a2 − a ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
119
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
120
+ page_content='Γ(a − 1) ln ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
121
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
122
+ page_content='b2 β2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
123
+ page_content='1 ρr ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
124
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
125
+ page_content='+ Γ(a − 1) F3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
126
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
127
+ page_content='(10) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
128
+ page_content='Rl ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
129
+ page_content='DR2 = ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
130
+ page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
131
+ page_content='ln (2)Γ(a + 1) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
132
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
133
+ page_content='π csc (aπ/2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
134
+ page_content='(a + 2) ba+2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
135
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
136
+ page_content='F4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
137
+ page_content='(c1ρr) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
138
+ page_content='a ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
139
+ page_content='2 +1 − ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
140
+ page_content='F5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
141
+ page_content='(c2ρr) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
142
+ page_content='a ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
143
+ page_content='2 +1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
144
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
145
+ page_content='+ π sec (aπ/2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
146
+ page_content='(a + 1) ba+1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
147
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
148
+ page_content='F6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
149
+ page_content='(c1ρr) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
150
+ page_content='a+1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
151
+ page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
152
+ page_content='− ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
153
+ page_content='F7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
154
+ page_content='(c2ρr) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
155
+ page_content='a+1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
156
+ page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
157
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
158
+ page_content='+ Γ(a − 1) {F8 − F9} + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
159
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
160
+ page_content='a2 − a ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
161
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
162
+ page_content='Γ(a − 1) ln ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
163
+ page_content='�c1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
164
+ page_content='c2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
165
+ page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
166
+ page_content='(11) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
167
+ page_content='Ru ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
168
+ page_content='DR1 = log2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
169
+ page_content='\uf8ee ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
170
+ page_content='\uf8f01 + Ξ1 Ωh1 + M1 Ξ1 Ωm0 Ωm1 + M1 (M1 − 1) Ξ1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
171
+ page_content='Ωm0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
172
+ page_content='m0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
173
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
174
+ page_content='Γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
175
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
176
+ page_content='m0 + 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
177
+ page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
178
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
179
+ page_content='Γ (m0) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
180
+ page_content='�2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
181
+ page_content='Ωm1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
182
+ page_content='m1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
183
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
184
+ page_content='Γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
185
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
186
+ page_content='m1 + 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
187
+ page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
188
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
189
+ page_content='Γ (m1) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
190
+ page_content='�2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
191
+ page_content='+ 2 M1 Ξ1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
192
+ page_content='Γ(mh1 + 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
193
+ page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
194
+ page_content='Γ(mh1) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
195
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
196
+ page_content='Ωmh1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
197
+ page_content='mh1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
198
+ page_content='Γ(m0 + 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
199
+ page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
200
+ page_content='Γ(m0) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
201
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
202
+ page_content='Ωm0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
203
+ page_content='m0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
204
+ page_content='Γ(m1 + 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
205
+ page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
206
+ page_content='Γ(m1) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
207
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
208
+ page_content='Ωm1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
209
+ page_content='m1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
210
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
211
+ page_content='(12) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
212
+ page_content='Ru ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
213
+ page_content='DR2 = log2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
214
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
215
+ page_content='1 + Ξ2Ωh2 + M2Ξ2Ωm0Ωm2 + M2 (M2 − 1) Ξ2Ωm0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
216
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
217
+ page_content='Γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
218
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
219
+ page_content='m0 + 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
220
+ page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
221
+ page_content='��2 Ωm2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
222
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
223
+ page_content='Γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
224
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
225
+ page_content='m2 + 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
226
+ page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
227
+ page_content='��2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
228
+ page_content='m0 {Γ (m0)}2 m2 {Γ (m2)}2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
229
+ page_content='+ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
230
+ page_content='2M2Ξ2Γ(mh2 + 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
231
+ page_content='2)Γ(m0 + 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
232
+ page_content='2)Γ(m2 + 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
233
+ page_content='2)�Ωmh2 Ωm0Ωm2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
234
+ page_content='Γ(mh2)Γ(m0)Γ(m2)√mh2m0m2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
235
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
236
+ page_content='− log2 [1 + Ξ3Ωh2 + M2Ξ3Ωm0Ωm2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
237
+ page_content='+M2 (M2−1) Ξ3Ωm0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
238
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
239
+ page_content='Γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
240
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
241
+ page_content='m0+ 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
242
+ page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
243
+ page_content='��2 Ωm2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
244
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
245
+ page_content='Γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
246
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
247
+ page_content='m2+ 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
248
+ page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
249
+ page_content='��2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
250
+ page_content='m0 {Γ (m0)}2 m2 {Γ (m2)}2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
251
+ page_content='+ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
252
+ page_content='2M2Ξ3Γ(mh2 + 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
253
+ page_content='2)Γ(m0+ 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
254
+ page_content='2)Γ(m2+ 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
255
+ page_content='2)�Ωmh2Ωm0Ωm2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
256
+ page_content='Γ(mh2)Γ(m0)Γ(m2)√mh2m0m2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
257
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
258
+ page_content='(13) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
259
+ page_content='as described in (8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
260
+ page_content=' The bounds on the ergodic rate of DRs are evaluated following a series of mathematical manipulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
261
+ page_content=' The following Lemmas present the upper and lower bound for the proposed RIS-enabled NOMA-based D2D communication system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
262
+ page_content=' Lemma 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
263
+ page_content=' The lower bound on the ergodic rates of DR1 and DR2 can be expressed as in (10) and (11), shown on the top of the next page.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
264
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
265
+ page_content=' The proof is presented in Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
266
+ page_content=' Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
267
+ page_content=' Likewise, the upper bound on the ergodic rate of DR1 and DR2 can be expressed as in (12) and (13), shown on the top of the page.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
268
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
269
+ page_content=' The proof is presented in Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
270
+ page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
271
+ page_content=' SE and EE Based on the ergodic rate established in the preceding subsection, SE of RIS-enabled NOMA-based D2D can be described as SE = RDR1 + RDR2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
272
+ page_content=' Similarly, the EE can be defined as the ratio of the SE to the total power utilized, Ptot, in bits/Joule/Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
273
+ page_content=' Ptot consists of the power utilized by the BS, DT, RIS, and DRs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
274
+ page_content=' Thus, the EE may be represented as EE = SE Ptot = SE (1 + α)Pr + MPRE + 2PU , (14) where Pr denotes the static power consumption of DT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
275
+ page_content=' Like- wise, αPr is the dynamic power consumption at DT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
276
+ page_content=' Further, PRE denotes the power consumed by each of the RE and PU is the power utilized by DR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
277
+ page_content=' 0 5 10 15 20 25 30 6 8 10 12 14 16 18 20 22 24 26 Transmit SNR (dB) SE (bps/Hz) Exact − Simulation Lower Bound Upper Bound M = 200, 100, 50, 20, 10 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
278
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
279
+ page_content=' SE versus transmit SNR with varying M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
280
+ page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
281
+ page_content=' SIMULATION RESULT This section presents the simulation and analytical results for the proposed RIS-empowered NOMA-based D2D commu- nication system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
282
+ page_content=' For the direct links, the Nakagami fading parameters are assumed to be mh1 = mh2 = 2 and, for the RIS reflected links, m0 = m1 = m2 = 5, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
283
+ page_content=' Likewise, the power allocation factor for DR1 is β2 1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
284
+ page_content='3 while for DR2 is β2 2 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
285
+ page_content='7, if not specified otherwise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
286
+ page_content=' Additionally, the value of RE allocation parameter η is assumed to be 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
287
+ page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
288
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
289
+ page_content=' 2 shows the SE results for the proposed RIS-empowered NOMA-based D2D communication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
290
+ page_content=' Specifically, it shows SE with respect to the transmit power while comparing the simu- lation and analytical results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
291
+ page_content=' These results can easily infer the following observations: 1) Apart from smaller M, analytical SE is quite precise compared to simulation-based SE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
292
+ page_content=' 2) Due 0 20 40 60 80 100 120 140 160 180 200 4 6 8 10 12 14 16 18 20 22 24 26 No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
293
+ page_content=' of REs (M) SE (bps/Hz) NOMA − Exact Simulation OMA − Exact Simulation NOMA − Lower Bound OMA − Lower Bound NOMA − Upper Bound OMA − Upper Bound ρr = 10, 20, 30 dB Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
294
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
295
+ page_content=' SE of NOMA-/OMA-based D2D with respect to M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
296
+ page_content=' 0 5 10 15 20 25 30 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
297
+ page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
298
+ page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
299
+ page_content='5 3 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
300
+ page_content='5 4 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
301
+ page_content='5 Transmit SNR (dB) EE (bits/Hz/Joule) NOMA OMA M = 20, 50, 200 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
302
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
303
+ page_content=' EE versus transmit SNR at different M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
304
+ page_content=' to the multiplicative path-loss, for less number of REs, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
305
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
306
+ page_content=', smaller M, the received power from the direct link is signif- icant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
307
+ page_content=' However, as the number of REs increases, the received power from a RIS-reflected link is much more than the power received from the direct link to the extent that it can safely be ignored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
308
+ page_content=' Thus, it can easily be inferred from the analytical and simulation framework that the received signal power from the direct link is relatively insignificant and can be ignored as compared to the received power from the RIS-reflected link.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
309
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
310
+ page_content=' 3 shows the SE of the proposed RIS-empowered D2D communication system for both NOMA and OMA scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
311
+ page_content=' Specifically, the SE of both NOMA and OMA scenarios is plotted with respect to the number of REs for different SNRs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
312
+ page_content=' It can easily be observed here that, instead of increasing the transmit power, the number of REs can be increased to get the same SE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
313
+ page_content=' In other words, for fixed required SE, we can tradeoff the transmit power with the number of REs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
314
+ page_content=' As D2D users are usually power constrained, RIS-empowered D2D commu- nication can be a viable alternative to cut down the transmit power and improve network EE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
315
+ page_content=' Further, as evident from the result, RIS-empowered NOMA-based D2D is more spectrally efficient as compared to OMA-based D2D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
316
+ page_content=' For instance, SE at 20 dB SNR and M = 20 is 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
317
+ page_content='26 bps/Hz for NOMA and 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
318
+ page_content='76 bps/Hz for OMA, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
319
+ page_content=' The NOMA gain will increase when the channel gain between UEs increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
320
+ page_content=' Further, SE also improves with the number of REs, as evident from the result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
321
+ page_content=' Likewise, SE grows as the transmit power goes up.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
322
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
323
+ page_content=' 4 shows EE of the proposed RIS-empowered D2D communication system, where EE of both NOMA and OMA scenarios is plotted with respect to the transmit power for the varying number of REs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
324
+ page_content=' Further, it can be inferred that RIS-empowered D2D is energy-efficient as compared to OMA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
325
+ page_content=' Further, it can also be observed that the EE increases with the number of REs, whereas EE decreases as the transmit power increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
326
+ page_content=' This is because SE increases linearly while the transmit power increases logarithmically;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
327
+ page_content=' thus, the overall compounding impact decreases EE while increasing transmit SNR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
328
+ page_content=' In addition, EE is likewise saturated for a large number of REs, and no further gains are observed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
329
+ page_content=' The result also demon- strates that increasing the number of REs does not improve performance, as SE increases while EE becomes saturated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
330
+ page_content=' Thus, it can be inferred that RIS improves the SE and EE performance of the D2D system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
331
+ page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
332
+ page_content=' CONCLUSION In this paper, we investigated the performance of a RIS- empowered NOMA-based D2D communication system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
333
+ page_content=' Specif- ically, we derived the closed-form expressions for SE’s upper and lower bounds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
334
+ page_content=' As shown through the results, apart from the smaller values of the number of REs, the bounds are pretty tight and converge to exact SE, even for moderate REs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
335
+ page_content=' Further, we have also investigated the EE performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
336
+ page_content=' Since the D2D devices are usually power-constrained, the results show that the transmit power can be a tradeoff with the number of REs at RIS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
337
+ page_content=' Additionally, the results are also compared with the OMA scenario, where it has been shown that NOMA-based D2D outperforms the OMA-based case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
338
+ page_content=' VI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
339
+ page_content=' ACKNOWLEDGEMENT This work was supported by the Nazarbayev University CRP Grant no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
340
+ page_content=' 11022021CRP1513.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
341
+ page_content=' APPENDIX A PROOF OF LEMMA 1 The ergodic rate of DR1 can be formulated as RDR1 = 1 ln (2) ∞ � 0 ln � 1 + β2 1 |y|2 ρr � f|H|2(y)dy � �� � J1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
342
+ page_content=' (15) Further, using (8) and the below relation ln(t) = (t − 1) 2F1 (1, 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
343
+ page_content=' 2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
344
+ page_content=' 1 − t), J1 in (15) can be modified as J1 = 1 Γ(a + 1) ∞ � 0 √g 2F1 (1, 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
345
+ page_content=' 2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
346
+ page_content=' −g) 2ba+1 e � − √g b � dg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
347
+ page_content=' (16) Here, 2F1(·, ·;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
348
+ page_content=' ·;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
349
+ page_content=' ·) represents the Gauss hyper-geometric func- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
350
+ page_content=' Now, this J1 can be solved utilizing [11, Theorem 3] and substituted in (15).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
351
+ page_content=' After rearranging the terms, the ergodic rate for DR1 can be given as shown in (11), where F1 = 1F2 � 1+ a 2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
352
+ page_content=' 3 2, 2+ a 2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
353
+ page_content=' −1 4b2β2 1ρr � , F2 = 1F2 � a+1 2 ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
354
+ page_content=' 1 2, a+3 2 ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
355
+ page_content=' −1 4b2β2 1ρr � and F3 = 2F3 � 1, 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
356
+ page_content=' 2, 1− a 2, 3−a 2 ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
357
+ page_content=' −1 4b2β2 1ρr � Likewise, the ergodic rate for DR2, RDR2, can be given by RDR2 = 1 ln (2) \uf8f1 \uf8f2 \uf8f3 ∞ � 0 ln � 1 + c1 |y|2 ρr � f|H|2(y)dy − ∞ � 0 ln � 1 + c2 |y|2 ρr � f|H|2(y)dy \uf8fc \uf8fd \uf8fe , (17) where c1 and c2 are defined as c1 = β2 1 + β2 2 and c2 = β2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
358
+ page_content=' It can be evaluated similarly to J1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
359
+ page_content=' After rearranging the terms, the ergodic rate for DR2 can be given as shown in (12), where F4 = 1F2 � 1+ a 2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
360
+ page_content=' 3 2, 2+ a 2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
361
+ page_content=' −1 4b2c1ρr � , F5 = 1F2 � 1+ a 2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
362
+ page_content=' 3 2, 2+ a 2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
363
+ page_content=' −1 4b2c2ρr � , F6 = 1F2 � a+1 2 ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
364
+ page_content=' 1 2, a+3 2 ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
365
+ page_content=' −1 4b2c1ρr � , F7 = 1F2 � a+1 2 ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
366
+ page_content=' 1 2, a+3 2 ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
367
+ page_content=' −1 4b2c2ρr � , F8 = 2F3 � 1, 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
368
+ page_content=' 2, 1− a 2, 3−a 2 ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
369
+ page_content=' −1 4b2c1ρr � and F9 = 2F3 � 1, 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
370
+ page_content=' 2, 1− a 2, 3−a 2 ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
371
+ page_content=' −1 4b2c2ρr � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
372
+ page_content=' This completes the proof of Lemma 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
373
+ page_content=' APPENDIX B PROOF OF LEMMA 2 Applying Jensen’s inequality, we define the upper bound for DR1 as Ru DR1, where RDR1 ≤ Ru DR1, with Ξ1 = β2 1ρr, as Ru DR1 = log2 � 1 + Ξ1E � |H1|2�� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
374
+ page_content=' (18) To calculate E � |H1|2� , we apply the binomial expansion theorem (BET) as E � |H1|2� = E \uf8ee \uf8f0 �����h1 + M1 � i=1 ��¯gi 1 �� ��gi 1 �� ����� 2\uf8f9 \uf8fb = E � |h1|2� � �� � E1 + E \uf8f1 \uf8f2 \uf8f3 � M1 � i=1 ��¯gi 1 �� ��gi 1 �� �2\uf8fc \uf8fd \uf8fe � �� � E2 +2 E � M1 � i=1 ��¯gi 1 �� ��gi 1 �� |h1| � � �� � E3 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
375
+ page_content=' (19) Now, we have E1 = Ωh1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
376
+ page_content=' Likewise, to calculate E2, we apply BET again;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
377
+ page_content=' thus, on expanding, E2 can be expressed as E � M1 � i=1 ��¯gi 1 ��2 ��gi 1 ��2 � + E \uf8f1 \uf8f4 \uf8f2 \uf8f4 \uf8f3 M1 � i=1 M1 � i=1 j̸=i ��¯gi 1 �� ��gi 1 �� ���¯gj 1 ��� ���gj 1 ��� \uf8fc \uf8f4 \uf8fd \uf8f4 \uf8fe , (20) where E ��M1 i=1 ��¯gi 1 ��2 ��gi 1 ��2� = M1Ωm0Ωm1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
378
+ page_content=' Further, for E ��M1 i=1 �M1 i=1 j̸=i ��¯gi 1 �� ��gi 1 �� ���¯gj 1 ��� ���gj 1 ��� � , the expected value of a Nakagami-m variable can be given as E{|g1|} = Γ(m1+ 1 2 ) Γ(m1) �� Ωm1 m1 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
379
+ page_content=' Since ¯gi 1 and gi 1 are mutually independent, we can have E \uf8f1 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f3 M1 � i=1 M1 � j=1 j̸=i ��¯gi 1 �� ��gi 1 �� ���¯gj 1 ��� ���gj 2 ��� \uf8fc \uf8f4 \uf8f4 \uf8fd \uf8f4 \uf8f4 \uf8fe = M1 (M1 − 1) �Ωm0 m0 � × � Γ � m0 + 1 2 ��2 � Γ � m1 + 1 2 ��2 {Γ (m0)}2 {Γ (m1)}2 �Ωm1 m1 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
380
+ page_content=' (21) Likewise, E3 can be calculated as E3 = M Γ � mh1 + 1 2 � Γ � m0 + 1 2 � Γ � m1 + 1 2 � × � Ωmh1 Ωm0Ωm1 mh1m0m1 / [Γ (mh1) Γ (m0) Γ (m1)] .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
381
+ page_content=' (22) Finally, putting E1, E2 and E3 all together yields E � |H1|2� which can be put in (18) to give the desired upper bound as shown in (27).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
382
+ page_content=' Likewise, the upper bound on the ergodic rate of DR2, Ru DR2 can be defined as RDR2 ≤ Ru DR2, where RDR2 can be defined as RDR2 = E � log2 � 1 + |H2|2 � β2 1 + β2 2 � ρr 1 + |H2|2 β2 1ρr �� , = E � log2 � 1 + Ξ2 |H2|2� − log2 � 1 + Ξ3 |H2|2�� , (23) where Ξ2 = � β2 1 + β2 2 � ρr and Ξ3 = β2 1ρr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
383
+ page_content=' Thus, Ru DR2 can be defined as Ru DR2=log2 � 1+Ξ2E � |H2|2�� −log2 � 1+Ξ3E � |H2|2�� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
384
+ page_content=' (24) Similar to E � |H1|2� , E � |H2|2� can be evaluated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
385
+ page_content=' After sub- stituting and rearranging the terms, Ru DR2 is given in (13).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
386
+ page_content=' REFERENCES [1] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
387
+ page_content=' Basar, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
388
+ page_content=' Di Renzo, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
389
+ page_content=' De Rosny, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
390
+ page_content=' Debbah, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
391
+ page_content='-S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
392
+ page_content=' Alouini, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
393
+ page_content=' Zhang, “Wireless communications through reconfigurable intelligent surfaces,” IEEE Access, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
394
+ page_content=' 7, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
395
+ page_content=' 116 753–116 773, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
396
+ page_content=' [2] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
397
+ page_content=' Hu, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
398
+ page_content=' Rusek, and O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
399
+ page_content=' Edfors, “Beyond massive MIMO: The potential of data transmission with large intelligent surfaces,” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
400
+ page_content=' Signal Process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
401
+ page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
402
+ page_content=' 66, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
403
+ page_content=' 10, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
404
+ page_content=' 2746–2758, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
405
+ page_content=' [3] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
406
+ page_content=' Asadi, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
407
+ page_content=' Wang, and V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
408
+ page_content=' Mancuso, “A survey on device-to-device communication in cellular networks,” IEEE Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
409
+ page_content=' Surv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
410
+ page_content=' Tutor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
411
+ page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
412
+ page_content=' 16, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
413
+ page_content=' 4, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
414
+ page_content=' 1801–1819, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
415
+ page_content=' [4] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
416
+ page_content=' Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
417
+ page_content=', “Distributed intelligent reflecting surfaces-aided device- to-device communications system,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
418
+ page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
419
+ page_content=' Inf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
420
+ page_content=' Netw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
421
+ page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
422
+ page_content=' 6, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
423
+ page_content=' 3, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
424
+ page_content=' 197–207, Sept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
425
+ page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
426
+ page_content=' [5] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
427
+ page_content=' Cao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
428
+ page_content=', “Sum-rate maximization for multi-reconfigurable intelligent surface-assisted device-to-device communications,” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
429
+ page_content=' Com- mun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
430
+ page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
431
+ page_content=' 69, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
432
+ page_content=' 11, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
433
+ page_content=' 7283–7296, Nov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
434
+ page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
435
+ page_content=' [6] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
436
+ page_content=' Yang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
437
+ page_content=', “Reconfigurable intelligent surface empowered device- to-device communication underlaying cellular networks,” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
438
+ page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
439
+ page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
440
+ page_content=' 69, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
441
+ page_content=' 11, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
442
+ page_content=' 7790–7805, Nov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
443
+ page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
444
+ page_content=' [7] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
445
+ page_content=' Ni et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
446
+ page_content=', “Performance analysis for RIS-assisted D2D communication under nakagami-m fading,” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
447
+ page_content=' Veh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
448
+ page_content=' Technol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
449
+ page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
450
+ page_content=' 70, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
451
+ page_content=' 6, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
452
+ page_content=' 5865–5879, Jun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
453
+ page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
454
+ page_content=' [8] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
455
+ page_content=' Cheng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
456
+ page_content=', “Downlink and uplink intelligent reflecting surface aided networks: NOMA and OMA,” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
457
+ page_content=' Wirel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
458
+ page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
459
+ page_content=', pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
460
+ page_content=' 1–1, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
461
+ page_content=' [9] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
462
+ page_content=' Zhu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
463
+ page_content=', “Power efficient IRS-assisted NOMA,” IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
464
+ page_content=' Com- mun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
465
+ page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
466
+ page_content=' 69, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
467
+ page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
468
+ page_content=' 900–913, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
469
+ page_content=' [10] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
470
+ page_content=' Ding and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
471
+ page_content=' Vincent Poor, “A simple design of IRS-NOMA transmis- sion,” IEEE Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
472
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
473
+ page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
474
+ page_content=' 24, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
475
+ page_content=' 5, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
476
+ page_content=' 1119–1123, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
477
+ page_content=' [11] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
478
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
479
+ page_content=' Samuh, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
480
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
481
+ page_content=' Salhab, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
482
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
483
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
484
+ page_content=' El-Malek, “Performance analysis and optimization of RIS-assisted networks in Nakagami-m envi- ronment,” arXiv preprint arXiv:2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
485
+ page_content='07841, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE2T4oBgHgl3EQfkAch/content/2301.03973v1.pdf'}
5NE1T4oBgHgl3EQfmgQS/content/tmp_files/2301.03297v1.pdf.txt ADDED
@@ -0,0 +1,1527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sectional Voronoi tessellations:
2
+ Characterization and high-dimensional limits
3
+ Anna Gusakovaa, Zakhar Kabluchkob, and Christoph Thälec
4
+ Abstract
5
+ The intersections of beta-Voronoi, beta-prime-Voronoi and Gaussian-Voronoi tessellations in Rd with
6
+ ℓ-dimensional affine subspaces, 1 ≤ ℓ ≤ d − 1, are shown to be random tessellations of the same type
7
+ but with different model parameters. In particular, the intersection of a classical Poisson-Voronoi
8
+ tessellation with an affine subspace is shown to have the same distribution as a certain beta-Voronoi
9
+ tessellation. The geometric properties of the typical cell and, more generally, typical k-faces, of the
10
+ sectional Poisson-Voronoi tessellation are studied in detail. It is proved that in high dimensions, that
11
+ is as d → ∞, the intersection of the d-dimensional Poison-Voronoi tessellation with an affine subspace
12
+ of fixed dimension ℓ converges to the ℓ-dimensional Gaussian-Voronoi tessellation.
13
+ Keywords: Beta-Voronoi tessellation, Gaussian-Voronoi tessellation, high-dimensional limit, Laguerre
14
+ tessellation, Poisson point process, Poisson-Voronoi tessellation, sectional tessellation, stochastic geo-
15
+ metry, typical cell
16
+ MSC: 60D05, 60G55
17
+ 1
18
+ Introduction
19
+ The present paper is devoted to the study of affine sections of Poisson-Voronoi tessellations. To define
20
+ these, let ζ ⊂ Rd be the set of atoms of a stationary point process in Rd. For each point x ∈ ζ we
21
+ construct the Voronoi cell V (x, ζ) of x as the set of all points in Rd which are closer to x than to any
22
+ other point of ζ:
23
+ V (x, ζ) := {z ∈ Rd : ∥x − z∥ ≤ ∥y − z∥
24
+ for all y ∈ ζ \ {x}},
25
+ where ∥ · ∥ stands for the Euclidean norm. The Voronoi cell can be thought of as a zone of influence
26
+ or attraction of the point x and it is known that each Voronoi cell is a convex polytope in Rd, with
27
+ probability 1. The collection of all these polytopes is the Voronoi tessellation associated with ζ. If
28
+ ζ is a homogeneous Poisson point process with constant intensity ρ > 0, this construction yields the
29
+ Poisson-Voronoi tessellation, denoted here by Wd,ρ, – one of the most classical models studied in
30
+ stochastic geometry. We refer to the monographs [22, 27, 29] for more detailed information, applications
31
+ and further references on Voronoi tessellations and in particular the Poisson-Voronoi tessellation.
32
+ A number of random tessellations studied in stochastic geometry, such as the Poisson hyperplane
33
+ or the STIT tessellations, have the distinguished feature of being stable under intersections with lower-
34
+ dimensional affine subspaces. By this we mean that the intersection with an affine subspace of one of these
35
+ random tessellations is again a model of the same type within the intersecting subspace. For example, the
36
+ intersection of a Poisson hyperplane tessellation with an affine subspace L is again a Poisson hyperplane
37
+ tessellation within L. However, a similar property is not true for the Poisson-Voronoi tessellation. In fact,
38
+ it has been shown by Chiu, Van De Weygaert and Stoyan [6] that the intersection of the Poisson-Voronoi
39
+ tessellation with an affine subspace cannot be a Voronoi tessellation induced by any stationary point
40
+ process within the subspace. In other words, the sectional Poisson-Voronoi tessellation is necessarily a
41
+ ‘non-Voronoi’ tessellation. However, besides a few mean values determined in [6, 20] further probabilistic
42
+ aMünster University, Germany. Email: [email protected]
43
+ bMünster University, Germany. Email: [email protected]
44
+ cRuhr University Bochum, Germany. Email: [email protected]
45
+ 1
46
+ arXiv:2301.03297v1 [math.PR] 9 Jan 2023
47
+
48
+ or geometric information about the sectional Poisson-Voronoi tessellation seems not available in the
49
+ existing literature, although they are of importance for stereological applications (see [29, Chapter 11.5.4]
50
+ or [21, Section 14.4.6] as well as the references cited therein). It is one of the main purposes of this paper
51
+ to derive a precise description of the sectional Poisson-Voronoi tessellation and to study its typical cell.
52
+ We do this by establishing a connection with the so-called β-Voronoi tessellations, a random tessellation
53
+ model we recently introduced and studied in the series of papers [10, 7, 8, 9]. Their analysis in turn was
54
+ based on the connection with the class of beta random polytopes, which has already seen a number of
55
+ applications in stochastic geometry [11, 13, 14, 15].
56
+ We study the problem of the sectional Poisson-Voronoi tessellation just explained in a more general
57
+ framework. In fact, the random tessellation we study is either
58
+ • a β-Voronoi tessellation Vd,β,γ in Rd with parameters β ≥ −1 and γ > 0,
59
+ • a β′-Voronoi tessellation V′
60
+ d,β,γ in Rd with parameters β > d
61
+ 2 + 1 and γ > 0,
62
+ • or a Gaussian-Voronoi tessellation Gd,λ in Rd with parameter λ > 0;
63
+ a description of all these models will be provided in Section 3. We remark that the classical Poisson-
64
+ Voronoi tessellation generated by a stationary Poisson point process in Rd with intensity ρ > 0 appears
65
+ in this framework as the β-Voronoi tessellation corresponding to the parameters β = −1 and γ =
66
+ π
67
+ d+1
68
+ 2 ρ/Γ( d+1
69
+ 2 ). Now, let L ⊂ Rd be an affine subspace of dimension 1 ≤ ℓ ≤ d − 1. We show in Theorem
70
+ 4.1 below that
71
+ • the sectional tessellation Vd,β,γ ∩ L is a (β + d−ℓ
72
+ 2 )-Voronoi tessellation in L with the same γ,
73
+ • the sectional tessellation V′
74
+ d,β,γ ∩ L is a (β − d−ℓ
75
+ 2 )′-Voronoi tessellation in L with the same γ,
76
+ • the sectional tessellation Gd,λ ∩ L is again a Gaussian-Voronoi tessellation in L with the same λ.
77
+ In particular, the intersection of the classical Poisson-Voronoi tessellation Wd,ρ with L turns out to be
78
+ a β-Voronoi tessellation within L with β =
79
+ d−ℓ
80
+ 2
81
+ − 1 and γ = π
82
+ d+1
83
+ 2 ρ/Γ( d+1
84
+ 2 ).
85
+ For clarity we should
86
+ remark that none of the random tessellations Vd,β,γ with β > −1 are actually Voronoi tessellations.
87
+ Our terminology is motivated by the fact that Vd,β,γ may be viewed as a deformation of the classical
88
+ Poisson-Voronoi tessellation (which corresponds to β = −1).
89
+ With the identification of the sectional Poisson-Voronoi tessellation at hand, in the second part of this
90
+ paper we study its geometric properties. More precisely, we determine in Theorem 5.1 its face intensities
91
+ in terms of quantities which have already appeared in the study of beta random polytopes [11, 15]. From
92
+ here on, we determine the expected volume, the expected intrinsic volumes as well as the expected f-
93
+ vector of the typical cell (and even more generally the typical k-face) of the sectional Poisson-Voronoi
94
+ tessellation. Moreover, generalizing earlier results of Miles [20] we consider the asymptotics, as d → ∞, of
95
+ several characteristics (such as the volume of the typical cell) of the sectional Poisson-Voronoi tessellation
96
+ and identify the limits with the corresponding characteristics of a suitable Gaussian-Voronoi tessellation.
97
+ The weak convergence on the level of tessellations is discussed as well using a coupling construction
98
+ similar to the one in [8].
99
+ 2
100
+ Preliminaries on random tessellations
101
+ In this section we collect some definitions and facts about general stationary random tessellations in Rd.
102
+ For more detailed discussions we refer the reader to [27, Chapters 4 and 10] as well as [29, Chapter 10].
103
+ A tessellation T in Rd is a countable, locally finite collection of d-dimensional polytopes, which cover
104
+ the space and have non-empty, disjoint interiors. The elements of T are called the cells of T. Given a
105
+ polytope c ⊂ Rd we denote by Fk(c) the set of its k-dimensional faces, 0 ≤ k ≤ d, where Fd(c) = {c},
106
+ and let F(c) := �d
107
+ k=0 Fk(c). A tessellation T is called face-to-face if for any two of its cells c1, c2 ∈ T
108
+ one has that
109
+ c1 ∩ c2 ∈ (F(c1) ∩ F(c2)) ∪ {∅},
110
+ 2
111
+
112
+ that is, the intersection of two cells is either empty or a common face of both cells. For a face-to-face
113
+ tessellation T one defines Fk(T) = �
114
+ c∈T Fk(c) and F(T) = �
115
+ c∈T F(c). A face-to-face tessellation in Rd
116
+ is called normal if each k-dimensional face of the tessellation is contained in the boundary of precisely
117
+ d + 1 − k cells, for all k ∈ {0, 1, . . . , d − 1}.
118
+ We denote by T the set of all face-to-face tessellations in Rd, which is supplied with a measurable
119
+ structure as in [27, Chapter 10]. By a random tessellation we understand a particle process T in Rd
120
+ (in the usual sense of stochastic geometry, see [27, Section 4.1]) satisfying supp T ∈ T almost surely. It is
121
+ convenient to identify the random point process T with its support. A random tessellation is stationary,
122
+ provided that its distribution is invariant under all shifts in Rd and isotropic if its distribution is invariant
123
+ under all rotations in Rd. For a stationary random tessellation T and k ∈ {0, 1, . . . , d − 1} we define the
124
+ stationary particle process T (k) := �
125
+ F∈Fk(T ) δF of k-dimensional polytopes, which is referred to as the
126
+ process of k-faces.
127
+ Next, we recall the concept of a typical cell (and a typical k-face) of a stationary random tessellation
128
+ T ; see [27, Section 4.1,4.2], [27, page 450], [26, Section 4.3] for more details. Let C′ be the space of
129
+ non-empty compact subsets of Rd endowed with the Hausdorff metric. A centre function is a Borel
130
+ function z : C′ → Rd such that z(C + m) = z(C) + m for all C ∈ C′ and m ∈ Rd. The intensity of
131
+ k-faces of T is defined by
132
+ γk(T ) := E
133
+
134
+ F∈Fk(T )
135
+ 1(z(F) ∈ [0, 1]d),
136
+ k = 0, . . . , d.
137
+ These quantities are known to be independent of the choice of the centre function z. Assuming that
138
+ γk(T ) ∈ (0, ∞), the typical k-face of T with respect to the centre function z is the k-dimensional
139
+ random polytope whose distribution is given by
140
+ Pz
141
+ T ,k( · ) :=
142
+ 1
143
+ γk(T ) E
144
+
145
+ F∈Fk(T )
146
+ 1(F − z(F) ∈ · )1(z(F) ∈ [0, 1]d).
147
+ In particular, for k = d we get the concept of the typical cell of T . It should be noted that translation-
148
+ invariant characteristics of the distribution of the typical k-face do not depend on the choice of z. More
149
+ precisely, if z and z′ are two centre functions, then Pz′
150
+ T ,k is the push-forward of Pz
151
+ T ,k under the map
152
+ F �→ F − z′(F).
153
+ 3
154
+ Construction of β-, β′- and Gaussian-Voronoi tessellations
155
+ 3.1
156
+ General Laguerre tessellations
157
+ In this section we only briefly recall some facts about Laguerre tessellations and refer the reader to [7,
158
+ Sections 3.2–3.4] and [8, Sections 2.3 and 3.1] for further details.
159
+ We start by defining a general Laguerre tessellation. Given two points v, w ∈ Rd and h ∈ R we define
160
+ the power of w with respect to the pair (v, h) as
161
+ pow(w, (v, h)) := ∥w − v∥2 + h.
162
+ In this situation, h is referred to as the weight (or height) of the point v. Let X be a countable set of
163
+ marked points of the form (v, h) ∈ Rd × R. Then the Laguerre cell of (v, h) ∈ X is the set
164
+ C((v, h), X) := {w ∈ Rd : pow(w, (v, h)) ≤ pow(w, (v′, h′)) for all (v′, h′) ∈ X}.
165
+ The point v is called the nucleus of the cell C((v, h), X). Note that a Laguerre cell may be empty and
166
+ even if it is non-empty, it does not need to contain its nucleus. The collection of all non-empty Laguerre
167
+ cells of X is called the Laguerre diagram:
168
+ L(X) := {C((v, h), X): (v, h) ∈ X, C((v, h), X) ̸= ∅}.
169
+ 3
170
+
171
+ In the special case when the heights h of all points are the same (say, h0 ∈ R) the above definition leads
172
+ to the classical Voronoi cell. More precisely, let Y be a countable set of points in Rd whose “marked”
173
+ version X is obtained by attaching a fixed weight h0 to each point. Then the Voronoi cell of v ∈ Y is
174
+ V (v, Y ) = C((v, h0), X) = {w ∈ Rd : ∥w − v∥ ≤ ∥w − v′∥ for all v′ ∈ Y }.
175
+ The collection of the Voronoi cells of all v ∈ Y is called the Voronoi diagram V(Y ).
176
+ It should
177
+ be mentioned that a Laguerre diagram is not necessarily a tessellation in Rd, at least as long as no
178
+ additional assumptions on the geometric properties of the set X are imposed. Such assumptions have
179
+ been described in detail in [18, 19, 25]. In the present article we are interested in random tessellations
180
+ built on Poisson point processes. More precisely, we consider a Poisson point process ξ in Rd × E, where
181
+ E ⊂ R is a Borel set (an interval), and the corresponding Laguerre diagram L(ξ). Lemmas 1 and 2 in
182
+ [7] (see also [8, Lemma 2.1]) provide sufficient conditions on ξ which ensure that, almost surely, L(ξ) is a
183
+ stationary random face-to-face normal tessellation in Rd. In the following we work under these conditions
184
+ and remark that they are automatically satisfied in the three cases we consider from Section 3.3 on.
185
+ 3.2
186
+ Laguerre tessellations via paraboloid growth processes
187
+ An alternative approach to the construction of Laguerre diagrams uses so-called paraboloid growth pro-
188
+ cesses with overlaps (or simply paraboloid growth process), which were first introduced in [1, 28] in order
189
+ to study the asymptotic geometry of random polytopes; see also [2, 3, 4, 5]. In this section we briefly
190
+ describe this rather useful construction and refer for more details to [8, Section 3.1]. Let
191
+ Π±,x := {(v′, h′) ∈ Rd × R: h′ = ±∥v′ − v∥2 + h}
192
+ be the upward (+) and downward (−) standard paraboloids with apex x := (v, h) ∈ Rd × R, denoted
193
+ as apex Π±,x := x. In case (v, h) = (0, 0) we simply write Π± = Π±,(0,0). Given a set A ⊂ Rd × R we put
194
+ A↓ : = {(v, h′) ∈ Rd × R: (v, h) ∈ A for some h ≥ h′},
195
+ A↑ : = {(v, h′) ∈ Rd × R: (v, h) ∈ A for some h ≤ h′}.
196
+ Following the definition from [1], for a given Poisson point process ξ in Rd × R, we introduce the
197
+ paraboloid growth process Ψ(ξ):
198
+ Ψ(ξ) :=
199
+
200
+ x∈ξ
201
+ Π↑
202
+ +,x.
203
+ It should be noted that, in typical situations, the majority of paraboloids will be completely covered by
204
+ other paraboloids, implying that they do not "contribute" to the model and can thus be omitted without
205
+ loosing any information about the set Ψ(ξ). This leads to the definition of extreme points. A point x ∈ ξ
206
+ is called extreme in the paraboloid growth process Ψ(ξ) if and only if its associated paraboloid is not
207
+ fully covered by the paraboloids associated with other points of ξ, i.e., if
208
+ Π↑
209
+ +,x ̸⊂
210
+
211
+ y∈ξ,y̸=x
212
+ Π↑
213
+ +,y.
214
+ We denote by ext(Ψ(ξ)) the set of all extreme points of the paraboloid growth process Ψ(ξ). Using the
215
+ paraboloid growth process we can construct a random diagram in Rd. Given a point x = (v, h) ∈ ξ define
216
+ the Ψ-cell of x as
217
+ CΨ(x, ξ) :=
218
+
219
+ {w ∈ Rd :
220
+
221
+ (w, 0)↑ ∪ (w, 0)↓�
222
+ ∩ bd Ψ(ξ) ∈ Π+,x},
223
+ if x ∈ ext(Ψ(ξ)),
224
+ ∅,
225
+ otherwise,
226
+ where bd A denotes the boundary of a set A.
227
+ In other words, w belongs to CΨ(x, ξ) if and only if
228
+ ∥w − v∥2 + h ≤ ∥w − v′∥ + h′ for all (v′, h′) ∈ ξ. Thus, the Ψ-cell of an extreme point x of the paraboloid
229
+ growth process Ψ(ξ) is non-empty and coincides with the Laguerre cell C(x, ξ). Next, we construct the
230
+ diagram LΨ(ξ) as the collection of all non-empty Ψ-cells:
231
+ LΨ(ξ) := {CΨ(x, ξ): CΨ(x, ξ) ̸= ∅} = {CΨ(x, ξ): x ∈ ext(Ψ(ξ))}.
232
+ We directly have that LΨ(ξ) = L(ξ).
233
+ 4
234
+
235
+ Figure 3.1: Left panel: Simulation of a β-Voronoi tessellation in R2 with β = 5. Middle panel: Simulation
236
+ of a β′-Voronoi tessellation in R2 with β = 2.5. Right panel: Simulation of a Gaussian-Voronoi tessellation
237
+ in R2.
238
+ 3.3
239
+ Three families of random tessellations
240
+ In this article we consider random tessellations in Rd build on the following three families of Poisson
241
+ point processes. For β > −1 and 0 < γ < ∞ we consider a Poisson point process ηd,β,γ in Rd × [0, +∞)
242
+ whose intensity measure has density
243
+ (v, h) �→ γ cd+1,βhβ,
244
+ cd+1,β := Γ
245
+ � d+1
246
+ 2
247
+ + β + 1
248
+
249
+ π
250
+ d+1
251
+ 2 Γ(β + 1)
252
+ ,
253
+ (3.1)
254
+ with respect to the Lebesgue measure on Rd × [0, +∞). Further, for β > d
255
+ 2 + 1 and 0 < γ < ∞ we
256
+ consider a Poisson point process η′
257
+ d,β,γ in Rd × (−∞, 0) with intensity measure having density
258
+ (v, h) �→ γ c′
259
+ d+1,β(−h)−β,
260
+ c′
261
+ d+1,β :=
262
+ Γ (β)
263
+ π
264
+ d+1
265
+ 2 Γ(β − d+1
266
+ 2 )
267
+ ,
268
+ (3.2)
269
+ with respect to the Lebesgue measure on Rd × (−∞, 0). The constants cd+1,β and c′
270
+ d+1,β in the above
271
+ definitions are introduced for convenience. For example, they make the statement of Theorem 4.1 below
272
+ more transparent. Finally, for λ > 0 and 0 < γ < ∞ we consider a Poisson point process ζd,λ,γ in Rd × R
273
+ whose intensity measure has density
274
+ (v, h) �→ γ eλh,
275
+ with respect to the Lebesgue measure on Rd × R. It was shown in [7, Lemma 3] and in [8, Section
276
+ 3.3] that the Poisson point processes ηd,β,γ and ζd,λ,γ satisfy the sufficient conditions of Lemma 1 and
277
+ Lemma 2 in [7] and, hence, the corresponding Laguerre diagrams Vd,β,γ := L(ηd,β,γ), V′
278
+ d,β,γ := L(η′
279
+ d,β,γ)
280
+ and Gd,λ := L(ζd,λ,γ) are stationary random normal tessellations in Rd, which are called β-Voronoi,
281
+ β′-Voronoi and Gaussian-Voronoi tessellations, respectively. These tessellations have been studied
282
+ in [10, 7, 8, 9], where they were considered in Rd−1 instead of Rd. Simulations of these tessellations in
283
+ the plane are shown in Figure 3.1. Note that although the point process η′
284
+ d,β,γ is well-defined in the range
285
+ β > d+1
286
+ 2 , the corresponding β′-Voronoi tessellation exists in the smaller range β > d
287
+ 2 + 1 only, see [7,
288
+ Lemma 3].
289
+ Remark 3.1. Note that changing the parameter γ amounts to shifting the Poisson point process ζd,λ,γ
290
+ along the height coordinate h. In particular, the distribution of the point process Gd,λ does not depend
291
+ on the choice of γ, which is reflected in our notation.
292
+ Remark 3.2. It will be convenient to extend the above definition of the β-Voronoi tessellation to the
293
+ case β = −1 (with arbitrary γ > 0) by defining Vd,−1,γ := Wd,r(d)γ to be the classical Poisson-Voronoi
294
+ 5
295
+
296
+ tessellation constructed on the homogeneous Poisson point process on Rd with constant intensity r(d)γ,
297
+ where
298
+ r(d) := Γ
299
+ �d + 1
300
+ 2
301
+
302
+ π− d+1
303
+ 2 .
304
+ A justification for this definition is given by the following proposition. For the necessary background on
305
+ point processes and weak convergence we refer to [24, Chapter 3].
306
+ Proposition 3.3. As β ↓ −1, the Poisson process ηd,β,γ converges, weakly on the space of locally finite
307
+ integer-valued measures on Rd × [0, ∞), to the Poisson point process whose intensity measure is the
308
+ Lebesgue measure on Rd × {0} times r(d)γ.
309
+ Proof. Write β = −1 + ε with ε ↓ 0. Then the constant appearing in (3.1) satisfies
310
+ cd+1,β = Γ
311
+ � d+1
312
+ 2
313
+ + ε
314
+
315
+ π
316
+ d+1
317
+ 2 Γ(ε)
318
+ = Γ
319
+ � d+1
320
+ 2
321
+
322
+ π
323
+ d+1
324
+ 2
325
+ ε(1 + o(1)),
326
+ as ε ↓ 0. It follows that for every c > 0 and every bounded Borel set B ⊂ Rd the number of points of the
327
+ Poisson point process ηd,β,γ appearing in B × [0, c] is Poisson-distributed with expectation
328
+ γλd(B)cd+1,β
329
+ � c
330
+ 0
331
+ h−1+εdh −→
332
+ ε↓0 Γ
333
+ �d + 1
334
+ 2
335
+
336
+ π− d+1
337
+ 2 γλd(B) = r(d)γλd(B),
338
+ where λd denotes the d-dimensional Lebesgue measure. Since the right-hand side does not depend on c,
339
+ it follows that, for every 0 < c1 < c2, the expected number of points of ηd,β,γ in B × [c1, c2] converges
340
+ to 0, as ε ↓ 0. Hence, the intensity measure of ηd,β,γ converges as β ↓ −1 to the Lebesgue measure
341
+ on Rd × {0} times r(d)γ, vaguely on the space Rd × [0, ∞). Then, the claim of the proposition follows
342
+ from [16, Theorem 16.16 (iv)] or [24, Propositions 3.6 and 3.19].
343
+ 4
344
+ Affine sections of β, β′- and Gaussian-Voronoi tessellations
345
+ In this section we study the intersection of the d-dimensional random tessellations Vd,β,γ, V′
346
+ d,β,γ and Gd,λ
347
+ with an affine subspace L ⊂ Rd of dimension ℓ ∈ {1, . . . , d − 1}. By stationarity and isotropy of these
348
+ tessellations, we may and will assume without loss of generality that L = Rℓ is the linear subspace of Rd
349
+ spanned by the first ℓ vectors of the standard orthonormal basis of Rd. The intersection of the tessellation
350
+ Vd,β,γ with Rℓ will be denoted by Vd,β,γ ∩ Rℓ. Similar convention is used for the tessellations V′
351
+ d,β,γ and
352
+ Gd,λ. The following theorem identifies the distribution of Vd,β,γ ∩ Rℓ, V′
353
+ d,β,γ ∩ Rℓ and Gd,λ ∩ Rℓ.
354
+ Theorem 4.1. Fix integers d ≥ 2 and 1 ≤ ℓ ≤ d − 1.
355
+ (i) For any β ≥ −1 and γ > 0, Vd,β,γ ∩ Rℓ has the same distribution as Vℓ,β+ d−ℓ
356
+ 2 ,γ.
357
+ (ii) For any β > d
358
+ 2 + 1 and γ > 0, V′
359
+ d,β,γ ∩ Rℓ has the same distribution as V′
360
+ ℓ,β− d−ℓ
361
+ 2 ,γ.
362
+ (iii) For any λ > 0, Gd,λ ∩ Rℓ has the same distribution as Gℓ,λ.
363
+ Before we move on to the proof of Theorem 4.1 we would like to highlight the following special case
364
+ which deals with sections of the classical Poisson-Voronoi tessellation. Together with the results we obtain
365
+ below, this fully answers and resolves the problems raised in [6, 20, 21, 29].
366
+ Corollary 4.2. Fix integers d ≥ 2 and 1 ≤ ℓ ≤ d − 1. Then for any ρ > 0 the intersection of the
367
+ d-dimensional Poisson-Voronoi tessellation Wd,ρ of intensity ρ with Rℓ has the same distribution as
368
+ V
369
+ ℓ, d−ℓ
370
+ 2 −1,π
371
+ d+1
372
+ 2
373
+ ρ/Γ( d+1
374
+ 2 ).
375
+ 6
376
+
377
+ Proof of Theorem 4.1. Let us first consider the case ℓ = d−1 meaning that we intersect with a hyperplane.
378
+ Let ξ be one of the Poisson point processes ηd,β,γ, η′
379
+ d,β,γ or ζd,λ,γ. The atoms of ξ live in the space
380
+ Rd+1 = Rd × R; a generic point in this space is denoted by (v, h) with v = (v1, . . . , vd) ∈ Rd being the
381
+ spatial coordinate and h ∈ R being the height coordinate. The Laguerre tessellation L(ξ) lives in the
382
+ space Rd defined by the equation h = 0. The linear hyperplane L ≡ Rd−1 ⊂ Rd ⊂ Rd+1 with which our
383
+ tessellations are intersected is given by the equations {vd = 0, h = 0}.
384
+ In order to prove the statement we will use the representation of the Laguerre tessellation L(ξ) in
385
+ terms of the paraboloid growth process Ψ(ξ) as described in Section 3.2. We extend the hyperplane
386
+ L ⊂ Rd by adding the height coordinate, namely we consider
387
+ L′ := {(v, h) = (v1, . . . , vd, h) ∈ Rd × R: vd = 0}.
388
+ For every point x = (v, h) ∈ Rd+1, the intersection of the d-dimensional paraboloid Π+,x with L′ is a
389
+ (d − 1)-dimensional upward paraboloid Π+,x′ ∩ L′ in L′ with apex given by
390
+ x′ = f(v, h) := (v1, . . . , vd−1, 0, h + v2
391
+ d) ∈ L′.
392
+ If x runs through all atoms of ξ, then x′ runs through all atoms of the point process f(ξ) := {f(x): x ∈ ξ}
393
+ on L′, which is also a Poisson point process by the mapping theorem; see [17, Theorem 5.1].
394
+ The
395
+ intersection of the Laguerre tessellation L(ξ) with L coincides with the Laguerre tessellation generated
396
+ by the point process f(ξ) (within L).
397
+ Now, we are going to identify the intensity measure µ of the Poisson point process f(ξ). To this end,
398
+ we consider the Poisson point processes ηd,β,γ, η′
399
+ d,β,γ and ζd,λ,γ separately.
400
+ Case (i).
401
+ Let first β > −1. By the mapping theorem for Poisson point processes [17, Theorem 5.1],
402
+ f(ηd,β,γ) := {f(x): x ∈ ηd,β,γ} is a Poisson point process in Rd−1×[0, +∞) ⊂ L′. To compute its intensity
403
+ measure µ, we take some Borel set B ⊂ L, any s > 0 and observe that an atom (v, h) of ηd,β,γ is mapped
404
+ by f to B × [0, s] if and only if (v1, . . . , vd−1) ∈ B and h + v2
405
+ d ≤ s. The latter condition means that h ≤ s
406
+ and |vd| ≤
407
+
408
+ s − h. It follows that the intensity measure µ satisfies
409
+ µ(B × [0, s]) = γ cd+1,β
410
+
411
+ Rd
412
+ � ∞
413
+ 0
414
+ hβ1(f(v, h) ∈ B × [0, s]) dhdv
415
+ = γ cd+1,β
416
+
417
+ Rd
418
+ � s
419
+ 0
420
+ hβ1(v ∈ B × [−
421
+
422
+ s − h,
423
+
424
+ s − h]) dhdv
425
+ = 2γ cd+1,β λd−1(B)
426
+ � s
427
+ 0
428
+ hβ√
429
+ s − h dh
430
+ = γ Γ( d+1
431
+ 2
432
+ + β + 1)
433
+ π
434
+ d
435
+ 2 Γ(β + 3
436
+ 2)
437
+ λd−1(B) sβ+ 3
438
+ 2
439
+ β + 3
440
+ 2
441
+ .
442
+ (4.1)
443
+ In the case β = −1, we let ηd,−1,γ be the Poisson point process on Rd × {0} (which is considered as a
444
+ subset of Rd+1) whose intensity with respect to the Lebesgue measure on Rd ×{0} is constant and equals
445
+ Γ( d+1
446
+ 2 )π− d+1
447
+ 2 γ. Thus, the heights of all points in ηd,−1,γ are 0. Then, the Laguerre tessellation generated
448
+ by ηd,−1,γ on Rd coincides with Vd,−1,γ by our convention described in Remark 3.2. To compute the
449
+ intensity measure µ of the Poisson point process f(ηd,−1,γ), we take some Borel set B ⊂ L, any s > 0
450
+ and observe that an atom (v, 0) of ηd,−1,γ is mapped by f to B × [0, s] if and only if (v1, . . . , vd−1) ∈ B
451
+ and v2
452
+ d ≤ s. It follows that the intensity measure µ satisfies
453
+ µ(B × [0, s]) = γ Γ( d+1
454
+ 2 )
455
+ π
456
+ d+1
457
+ 2
458
+
459
+ Rd 1(f(v, 0) ∈ B × [0, s]) dv
460
+ = γ 2Γ( d+1
461
+ 2 )
462
+ π
463
+ d+1
464
+ 2
465
+ λd−1(B)√s.
466
+ (4.2)
467
+ 7
468
+
469
+ By differentiating (4.1) and (4.2) with respect to s, it follows that for all β ≥ −1, the intensity
470
+ measure of f(ηd,β,γ) has density
471
+ (v′, 0, h) �→ γ cd,β+ 1
472
+ 2 hβ+ 1
473
+ 2 ,
474
+ v′ ∈ Rd−1, h > 0,
475
+ with respect to the Lebesgue measure on L × [0, +∞). Consequently, the Laguerre tessellation generated
476
+ by f(ηd,β,γ) within L has the same distribution as Vd−1,β+ 1
477
+ 2 ,γ.
478
+ Case (ii).
479
+ Next, we deal with f(η′
480
+ d,β,γ) := {f(x): x ∈ η′
481
+ d,β,γ}. Let us first consider only those points
482
+ of f(η′
483
+ d,β,γ) that have negative height and compute the intensity measure µ of these points. The points
484
+ with positive height coordinate have no influence on the resulting tessellation, as we will argue below.
485
+ To determine µ, we take some Borel set B ⊂ L, any s < 0 and observe that an atom (v, −g) of η′
486
+ d,β,γ
487
+ (with g > 0) is mapped by f to B × (−∞, s] if and only if (v1, . . . , vd−1) ∈ B and v2
488
+ d ≤ s + g. The latter
489
+ condition means that g ≥ −s and |vd| ≤ √s + g. It follows that the intensity measure µ satisfies
490
+ µ(B × (−∞, s]) = γ c′
491
+ d+1,β
492
+
493
+ Rd
494
+ � ∞
495
+ 0
496
+ g−β1(f(v, −g) ∈ B × [−s, ∞)) dgdv
497
+ = γ c′
498
+ d+1,β
499
+
500
+ Rd
501
+ � ∞
502
+ −s
503
+ g−β1(v ∈ B × [−√s + g, √s + g]) dgdv
504
+ = 2γ c′
505
+ d+1,β λd−1(B)
506
+ � ∞
507
+ −s
508
+ g−β√g + s dg
509
+ = γ
510
+ Γ(β − 1
511
+ 2)
512
+ π
513
+ d
514
+ 2 Γ(β − d+1
515
+ 2 )
516
+ λd−1(B)(−s)−β+ 3
517
+ 2
518
+ β − 3
519
+ 2
520
+ .
521
+ Differentiating, we conclude that the intensity measure µ has density
522
+ (v′, 0, h) �→ γ c′
523
+ d,β+ 1
524
+ 2 (−h)−β+ 1
525
+ 2 ,
526
+ v′ ∈ Rd−1, h < 0,
527
+ with respect to the Lebesgue measure on L × (−∞, 0). So, by the mapping theorem [17, Theorem 5.1],
528
+ the restriction of f(η′
529
+ d,β,γ) to L × (−∞, 0) is a Poisson point process with the same intensity measure
530
+ as η′
531
+ d−1,β− 1
532
+ 2 ,γ. The Laguerre tessellation generated by this Poisson point process within L ≡ Rd−1 has
533
+ the same distribution as V′
534
+ d−1,β− 1
535
+ 2 ,γ.
536
+ It remains to observe that adding the points of f(η′
537
+ d,β,γ) with
538
+ positive height coordinate does not change the Laguerre tessellation. Indeed, every point in Rd−1 × {0}
539
+ is an accumulation point of η′
540
+ d−1,β− 1
541
+ 2 ,γ, hence the lower boundary of the paraboloid growth process
542
+ Ψ(η′
543
+ d−1,β− 1
544
+ 2 ,γ) is contained in Rd−1×(−∞, 0] and points with positive height coordinate have no influence
545
+ on the tessellation.
546
+ Case (iii).
547
+ Finally, we consider f(ζd,λ,γ), which is a Poisson point process in L′.
548
+ To compute its
549
+ intensity measure µ, we take some Borel set B ⊂ L, any s ∈ R and observe that an atom (v, h) of ζd,λ,γ
550
+ is mapped by f to B × (−∞, s] if and only if (v1, . . . , vd−1) ∈ B and h + v2
551
+ d ≤ s. It follows that
552
+ µ(B × (−∞, s]) = γ
553
+
554
+ Rd
555
+
556
+ R
557
+ eλh1(f(v, h) ∈ B × (−∞, s)) dhdv
558
+ = 2γ λd−1(B)
559
+ � s
560
+ −∞
561
+ ehλ√
562
+ s − h dh
563
+ = γ λd−1(B)
564
+ √πeλs
565
+ λ3/2 .
566
+ Thus, the density of the intensity measure of f(ζd,λ,γ) is given by
567
+ (v′, 0, h) �→ γ
568
+
569
+ π/λ eλh,
570
+ v′ ∈ Rd−1, h ∈ R.
571
+ 8
572
+
573
+ Hence, the Laguerre tessellation generated by f(ζd,λ,γ) within L has the same distribution as Gd−1,λ
574
+ (recall that the parameter γ does not influence the distribution of the Gaussian-Voronoi tessellation).
575
+ This proves the claim for ℓ = d − 1. For general 1 ≤ ℓ ≤ d − 2 we can inductively repeat the above
576
+ argument d − ℓ times.
577
+ 5
578
+ Sectional Poisson-Voronoi tessellations
579
+ 5.1
580
+ Face intensities and the expected volume of the typical cell
581
+ As we have shown in Corollary 4.2, the sectional Poisson-Voronoi tessellation Wd,ρ ∩ Rℓ can be identified
582
+ with a suitable β-Voronoi tessellation. This makes it possible to compute explicitly several functionals
583
+ of the sectional Poisson-Voronoi tessellation. We begin with a formula for the intensity of j-dimensional
584
+ faces. This quantity, denoted by γj(Wd,ρ ∩ Rℓ), has been defined in Section 2.
585
+ Theorem 5.1. Let d ≥ 2, 1 ≤ ℓ ≤ d − 1 and 0 ≤ j ≤ ℓ. Then, for any ρ > 0, we have
586
+ γj(Wd,ρ ∩ Rℓ) = ρ
587
+
588
+ d 2Jℓ+1,ℓ−j+1( d−ℓ−1
589
+ 2
590
+
591
+
592
+ 2
593
+ d(ℓ + 1)
594
+ Γ( (ℓ+1)(d−1)
595
+ 2
596
+ + 1)Γ(ℓ + 1 − ℓ
597
+ d)Γ( d
598
+ 2 + 1)ℓ+1− ℓ
599
+ d
600
+ Γ( (ℓ+1)(d−1)+1
601
+ 2
602
+ )Γ( ℓ+2
603
+ 2 )Γ( d+1
604
+ 2 )ℓ+1
605
+ ,
606
+ wher
607
+ Jℓ+1,ℓ−j+1
608
+ �d − ℓ − 1
609
+ 2
610
+
611
+ =
612
+ �ℓ + 1
613
+ j
614
+
615
+ Γ( (d−1)(ℓ+1)+3
616
+ 2
617
+ )
618
+ √π Γ( (d−1)(ℓ+1)
619
+ 2
620
+ + 1)
621
+ � +∞
622
+ −∞
623
+ (cosh u)−(d−1)(ℓ+1)−2
624
+ ×
625
+
626
+ 1
627
+ 2 + i Γ( d+1
628
+ 2 )
629
+ √π Γ( d
630
+ 2)
631
+ � u
632
+ 0
633
+ (cosh v)d−1dv
634
+ �j
635
+ du
636
+ (5.1)
637
+ and i = √−1 stands for the imaginary unit.
638
+ Proof. By Corollary 4.2 we have γj(Wd,ρ ∩ Rℓ) = γj(Vℓ, d−ℓ−2
639
+ 2
640
+ ,r), where r = r(d)ρ = π
641
+ d+1
642
+ 2 ρ/Γ( d+1
643
+ 2 ). The
644
+ formula for γj(Vℓ, d−ℓ−2
645
+ 2
646
+ ,r) can be obtained by combining [7, Theorem 6] (which we apply with parameters
647
+ d := ℓ + 1, β := d−ℓ−2
648
+ 2
649
+ and j := ℓ − j) with [7, Proposition 3] (with parameters d := ℓ + 1, k := ℓ + 1 − j).
650
+ Note that [7, Theorem 6] refers to [7, Theorem 2] which has to be applied with parameters d := ℓ + 1,
651
+ s := 1, ν := 0, γ := r.
652
+ Remark 5.2. The quantities Jd+1,k(β) for general d ≥ 0, k ∈ {1, . . . , d + 1} and β ≥ −1 have a natural
653
+ geometric meaning. Namely, Jd+1,k(β) is equal to the expected sum of internal angles at its k-vertex
654
+ faces of a random beta-simplex, which is defined as the convex hull of d + 1 independent random points
655
+ with density proportional to (1 − ∥x∥2)β in the d-dimensional unit ball, see [7, Section 6.1] and [11] for
656
+ details. From this interpretation it directly follows that
657
+ J1,1(β) = J2,1(��) = J2,2(β) = 1,
658
+ J3,1(β) = 1
659
+ 2,
660
+ J3,2(β) = 3
661
+ 2,
662
+ J3,3(β) = 1,
663
+ Jℓ+1,ℓ(β) = ℓ + 1
664
+ 2
665
+ ,
666
+ Jℓ+1,ℓ+1(β) = 1,
667
+ for any ℓ ≥ 1 and β ≥ −1. Moreover, if we denote by Σd a regular d-dimensional simplex and by σk(Σd)
668
+ the internal angle sum at its k-vertex faces, then
669
+ Jd+1,k(∞) := lim
670
+ β→∞ Jd+1,k(β) = σk(Σd)
671
+ according to [7, Proposition 2].
672
+ 9
673
+
674
+ d = 2
675
+ d = 3
676
+ d = 4
677
+ d = 5
678
+ d = 6
679
+ ℓ = 1
680
+ π
681
+ 4√ρ
682
+ 3
683
+
684
+ 3
685
+ 3
686
+ √4πρ·Γ
687
+ � 5
688
+ 3
689
+
690
+ 15π3/2
691
+ 64· 4
692
+ √8ρ·Γ
693
+ � 3
694
+ 4
695
+
696
+ 7 5
697
+
698
+ 5
699
+ 3· 5
700
+
701
+ 648π2ρ·Γ
702
+ � 9
703
+ 5
704
+
705
+ 2835· 6
706
+
707
+ 3·π3/2
708
+ 16384· 6
709
+ √32ρ·Γ
710
+ � 5
711
+ 6
712
+
713
+ ℓ = 2
714
+
715
+ 5· 3
716
+
717
+ 4
718
+ 3
719
+
720
+ 3π5ρ2·Γ
721
+ � 7
722
+ 3
723
+
724
+ 24
725
+
726
+ 2
727
+ 35√πρ
728
+ 77·24/5
729
+ 5·153/5·π9/5·ρ2/5·Γ
730
+ � 13
731
+ 5
732
+
733
+ 50· 3
734
+
735
+ 6
736
+ 143· 3
737
+ √ρ·Γ
738
+ � 8
739
+ 3
740
+
741
+ ℓ = 3
742
+
743
+
744
+ 280665·π3/2
745
+ 821248· 4
746
+
747
+ 2·ρ3/4·Γ
748
+ � 13
749
+ 4
750
+
751
+ 56·153/5· 5√
752
+ 2/π
753
+ 187·ρ3/5·Γ
754
+ � 17
755
+ 5
756
+
757
+ 17320875·√
758
+ 3/2·π
759
+ 176201728·√ρ
760
+ ℓ = 4
761
+
762
+
763
+
764
+ 144848704·23/5
765
+ 156/5π8/5�
766
+ 1692197−141120π2
767
+
768
+ ρ4/5Γ
769
+ � 21
770
+ 5
771
+
772
+ 15·62/3
773
+ 13·ρ2/3·Γ
774
+ � 13
775
+ 3
776
+
777
+ ℓ = 5
778
+
779
+
780
+
781
+
782
+ 6823504578515625·35/6·π3/2
783
+ 4912276871446528· 6
784
+
785
+ 2·ρ5/6·Γ
786
+ � 31
787
+ 6
788
+
789
+ Table 5.1: E vol(Zd,ℓ,ρ) for small values of d and ℓ.
790
+ As a corollary of Theorem 5.1 we can compute the expected volume of the typical cell Zd,ℓ,ρ of the
791
+ sectional Poisson-Voronoi tessellation Wd,ρ ∩ Rℓ. Note that the volume does not change under shifts,
792
+ which is why it does not matter how to choose the centre function in the definition of the typical cell.
793
+ For ℓ = 1 and ℓ = 2 this quantity has been studied by Miles [20] who showed that
794
+ E vol(Zd,1,ρ) = ρ− 1
795
+ d
796
+ Γ(d − 1
797
+ 2)Γ( d+1
798
+ 2 )2
799
+ (d − 1)!Γ(2 − 1
800
+ d)Γ( d
801
+ 2)Γ( d
802
+ 2 + 1)1− 1
803
+ d
804
+ ,
805
+ E vol(Zd,2,ρ) = ρ− 2
806
+ d
807
+ 3d · Γ( 3d
808
+ 2 − 1)Γ( d+1
809
+ 2 )3
810
+ πΓ( 3d−1
811
+ 2
812
+ )Γ(3 − 2
813
+ d)Γ( d
814
+ 2 + 1)3− 2
815
+ d
816
+ ,
817
+ see Formulas (4.1) and (4.4) in [20]. Our result generalizes this to arbitrary 1 ≤ ℓ ≤ d − 1; special cases
818
+ with small values of d and ℓ are summarized in Table 5.1.
819
+ Corollary 5.3. Let ρ > 0. Then, for any d ≥ 2 and 1 ≤ ℓ ≤ d − 1 we have
820
+ E vol(Zd,ℓ,ρ) = ρ− ℓ
821
+ d
822
+ d(ℓ + 1)
823
+ 2Jℓ+1,1( d−ℓ−1
824
+ 2
825
+
826
+
827
+ 2
828
+ Γ( (ℓ+1)(d−1)+1
829
+ 2
830
+ )
831
+ Γ( (ℓ+1)(d−1)
832
+ 2
833
+ + 1)
834
+ Γ( ℓ+2
835
+ 2 )
836
+ Γ(ℓ + 1 − ℓ
837
+ d)
838
+ Γ( d+1
839
+ 2 )ℓ+1
840
+ Γ( d
841
+ 2 + 1)ℓ+1− ℓ
842
+ d
843
+ .
844
+ Proof. It is known from [27, Equation (10.4)] that E vol(Zd,ℓ,ρ) = γℓ(Wd,ρ ∩ Rℓ)−1. The right-hand side
845
+ is known from Theorem 5.1.
846
+ Remark 5.4. Corollary 5.3 stays true for ℓ = d where it gives the expected volume of a typical Poisson-
847
+ Voronoi cell to be E vol(Zd,d,ρ) = 1/ρ. The quantity Jℓ+1,1(− 1
848
+ 2) cancels with the Gamma-factors by
849
+ the formula given in [12, Theorem 3.9] and the Legendre duplication formula for the Gamma function.
850
+ Theorem 5.1 also stays true for ℓ = d and gives the intensity of j-faces in the Poisson-Voronoi tessellation;
851
+ see [11, Remark 2.10] for another formula.
852
+ In the next result we compute the limit of the intensity of j-dimensional faces in the d-dimensional
853
+ Poisson-Voronoi tessellation intersected with Rℓ in the regime when d → ∞ while ℓ ∈ N stays fixed.
854
+ Proposition 5.5. Fix some ℓ ∈ N and 0 ≤ j ≤ ℓ.
855
+ Let (ρd)d∈N be a positive sequence such that
856
+ lim
857
+ d→∞(ρd)1/d = κ > 0. Then,
858
+ lim
859
+ d→∞ γj(Wd,ρd ∩ Rℓ) = Jℓ+1,ℓ−j+1(∞)(κ2πe)
860
+
861
+ 2
862
+
863
+ ℓ + 1
864
+ 2(ℓ − 1)!
865
+ Γ( ℓ
866
+ 2)
867
+ ,
868
+ where Jℓ+1,ℓ−j+1(∞) is the sum of angles at (ℓ − j)-dimensional faces of a regular ℓ-dimensional simplex
869
+ Σℓ; see Remark 5.2.
870
+ Remark 5.6. For example, we may take ρd = ρ > 0 to be constant, in which case κ = 1.
871
+ 10
872
+
873
+ Proof of Proposition 5.5. By Theorem 5.1,
874
+ lim
875
+ d→∞ γj(Wd,ρd ∩ Rℓ) = lim
876
+ d→∞
877
+ 2Jℓ+1,ℓ−j+1( d−ℓ−1
878
+ 2
879
+
880
+
881
+ 2
882
+ d(ℓ + 1)ρ
883
+ − ℓ
884
+ d
885
+ d
886
+ Γ( (ℓ+1)(d−1)
887
+ 2
888
+ + 1)
889
+ Γ( (ℓ+1)(d−1)+1
890
+ 2
891
+ )
892
+ Γ(ℓ + 1 − ℓ
893
+ d)
894
+ Γ( ℓ+2
895
+ 2 )
896
+ Γ( d
897
+ 2 + 1)ℓ+1− ℓ
898
+ d
899
+ Γ( d+1
900
+ 2 )ℓ+1
901
+ = 2Jℓ+1,ℓ−j+1(∞)(κ2π)
902
+
903
+ 2
904
+ ℓ + 1
905
+ 2Γ(ℓ)
906
+ Γ( ℓ
907
+ 2) lim
908
+ d→∞
909
+ 1
910
+ d
911
+ Γ( (ℓ+1)(d−1)
912
+ 2
913
+ + 1)
914
+ Γ( (ℓ+1)(d−1)+1
915
+ 2
916
+ )
917
+ Γ( d
918
+ 2 + 1)ℓ+1− ℓ
919
+ d
920
+ Γ( d+1
921
+ 2 )ℓ+1
922
+ .
923
+ By Stirling’s formula for the Gamma function, Γ(z) =
924
+
925
+ 2π/z(z/e)z(1 + O(z−1)). Since lim
926
+ n→∞
927
+ Γ(n)nz
928
+ Γ(n+z) = 1
929
+ we get
930
+ lim
931
+ d→∞ γj(Wd,ρd ∩ Rℓ) = Jℓ+1,ℓ−j+1(∞)(κ2πe)
932
+
933
+ 2
934
+
935
+ ℓ + 1
936
+ 2Γ(ℓ)
937
+ Γ( ℓ
938
+ 2) lim
939
+ d→∞
940
+ � d
941
+
942
+ � ℓ
943
+ 2d = Jℓ+1,ℓ−j+1(∞)(κ2πe)
944
+
945
+ 2
946
+
947
+ ℓ + 1
948
+ 2Γ(ℓ)
949
+ Γ( ℓ
950
+ 2) .
951
+ This completes the argument.
952
+ We now study the asymptotic behaviour of the expected volume of the typical cell in the sections of
953
+ fixed dimension ℓ of a high-dimensional Poisson-Voronoi tessellation.
954
+ Corollary 5.7. Let (ρd)d∈N be a positive sequence such that lim
955
+ d→∞(ρd)1/d = κ > 0. Then, for every ℓ ∈ N,
956
+ lim
957
+ d→∞ E vol(Zd,ℓ,ρd) =
958
+
959
+ ℓ + 1
960
+ Jℓ+1,1(∞)(κ2πe)
961
+
962
+ 2
963
+ Γ( ℓ
964
+ 2)
965
+ 2(ℓ − 1)!,
966
+ where Jℓ+1,1(∞) is the sum of solid angles of the regular ℓ-dimensional simplex Σℓ at its vertices; see
967
+ Remark 5.2.
968
+ Proof. This follows from the fact that E vol(Zd,ℓ,ρd) = γℓ(Wd,ρd ∩ Rℓ)−1 (see [27, Equation (10.4)]) by
969
+ applying Proposition 5.5 with j = ℓ.
970
+ In the special cases ℓ = 1, 2 and for every fixed ρ > 0, Corollary 5.7 combined with the results of
971
+ Remark 5.2 yields, for any constant ρ > 0, the limit relations
972
+ lim
973
+ d→∞ E vol(Zd,1,ρ) =
974
+ 1
975
+
976
+ 2e,
977
+ and
978
+ lim
979
+ d→∞ E vol(Zd,2,ρ) =
980
+
981
+ 3
982
+ eπ ,
983
+ which were already known from the work of Miles [20, pp. 318, 319]. Moreover, for ℓ = 3 we get
984
+ lim
985
+ d→∞ E vol(Zd,3,ρ) =
986
+
987
+ 4e3/2(3 arccos(1/3) − π)
988
+ �−1,
989
+ for example.
990
+ This follows from the fact that the solid angle at a vertex of a regular tetrahedron is
991
+ θ :=
992
+ 1
993
+ 4π(3 arccos(1/3) − π), implying that J4,1(∞) = σ1(Σ3) = 4θ = 3
994
+ π arccos(1/3) − 1.
995
+ 5.2
996
+ Expected intrinsic volumes and f-vectors of typical k-faces
997
+ Together with the volume of the typical cell Zd,ℓ,ρ of the sectional Poisson-Voronoi tessellation we can
998
+ consider its intrinsic volumes. We recall from [27, p. 222] that the intrinsic volume Vm(K) of order
999
+ 0 ≤ m ≤ d of a compact convex set K ⊂ Rd may be defined as
1000
+ Vm(K) :=
1001
+ d!
1002
+ m!(d − m)!
1003
+ Γ( m
1004
+ 2 + 1)Γ( d−m
1005
+ 2
1006
+ + 1)
1007
+ Γ( d
1008
+ 2 + 1)
1009
+ Eλm(K|L),
1010
+ where L ⊂ Rd is a uniformly distributed random subspace of dimension m, K|L denotes the orthogonal
1011
+ projection of K onto L and λm(K|L) its m-dimensional Lebesgue measure. In addition, instead of the
1012
+ 11
1013
+
1014
+ typical sectional cell we can consider for 1 ≤ k ≤ ℓ the typical k-face Z(k)
1015
+ d,ℓ,ρ of the sectional Poisson-
1016
+ Voronoi tessellation Wd,ρ ∩ Rℓ, see Section 2 or [27, page 450] for a formal definition. For example, for
1017
+ k = ℓ we get back the typical cell, for k = ℓ − 1 the typical facet and for k = 1 the typical edge of
1018
+ the sectional tessellation. Using the results from [27, pages 466-467] for general stationary and isotropic
1019
+ random tessellations we conclude (by combining the last two formulas there) that
1020
+ EVj(Z(k)
1021
+ d,ℓ,ρ) =
1022
+ ℓ!
1023
+ j!(ℓ − j!)
1024
+ Γ( j
1025
+ 2 + 1)Γ( ℓ−j
1026
+ 2 + 1)
1027
+ Γ( ℓ
1028
+ 2 + 1)
1029
+ γk−j(Wd,ρ ∩ Rℓ−j)
1030
+ γk(Wd,ρ ∩ Rℓ)
1031
+ ,
1032
+ (5.2)
1033
+ where d ≥ 2, and 1 ≤ ℓ ≤ d − 1, 0 ≤ k ≤ ℓ and 0 ≤ j ≤ k. This expression can be made fully explicit in
1034
+ view of Corollary 5.3:
1035
+ EVj(Z(k)
1036
+ d,ℓ,ρ) =
1037
+
1038
+ ρ
1039
+ Γ( d
1040
+ 2 + 1)
1041
+ �− j
1042
+ d
1043
+ (ℓ + 1)!
1044
+ j!(ℓ − j + 1)!
1045
+ Γ( j
1046
+ 2 + 1)
1047
+ πj/2
1048
+ Jℓ−j+1,ℓ−k+1( d−ℓ+j−1
1049
+ 2
1050
+ )
1051
+ Jℓ+1,ℓ−k+1( d−ℓ−1
1052
+ 2
1053
+ )
1054
+ × Γ( (ℓ−j+1)(d−1)
1055
+ 2
1056
+ + 1)Γ(ℓ − j + 1 − ℓ−j
1057
+ d )
1058
+ Γ( (ℓ+1)(d−1)
1059
+ 2
1060
+ + 1)Γ(ℓ + 1 − ℓ
1061
+ d)
1062
+ Γ( (ℓ+1)(d−1)+1
1063
+ 2
1064
+ )Γ( d+1
1065
+ 2 )j
1066
+ Γ( (ℓ−j+1)(d−1)+1
1067
+ 2
1068
+ )Γ( d
1069
+ 2 + 1)j .
1070
+ (5.3)
1071
+ For intersections of dimension ℓ = 2, Miles [20, Equations (4.4), (4.5) on p. 319] derived a formula for the
1072
+ expected area and perimeter of the typical cell which are particular cases of the above formula. Using
1073
+ Proposition 5.5 it is easy to derive the large d limit of (5.2). Namely, if lim
1074
+ d→∞(ρd)1/d = κ > 0, then
1075
+ lim
1076
+ d→∞ EVj(Z(k)
1077
+ d,ℓ,ρd) =
1078
+
1079
+ ℓ + 1
1080
+ √ℓ − j + 1 ·
1081
+ Γ( j
1082
+ 2 + 1)
1083
+ j!(κ2πe)j/2 · Jℓ−j+1,ℓ−k+1(∞)
1084
+ Jℓ+1,ℓ−k+1(∞) .
1085
+ For intersections of dimensions ℓ = 2 and 3 (and k = ℓ) we recover results of Miles [20, pp. 319, 320].
1086
+ Finally, we deal with the expected number of j-dimensional faces of the typical cell of the sectional
1087
+ Voronoi tessellation Wd,ρ ∩ Rℓ, which we denote by Efj(Zd,ℓ,ρ), for ρ > 0, d ≥ 2, 1 ≤ ℓ ≤ d − 1 and
1088
+ 0 ≤ j ≤ ℓ−1. Using the fact that, with probability 1, each j-dimensional face of the sectional tessellation
1089
+ is contained in the boundary of precisely ℓ − j + 1 of its cells (by normality of the tessellation), it follows
1090
+ that
1091
+ Efj(Zd,ℓ,ρ) = (ℓ − j + 1)γj(Wd,ρ ∩ Rℓ)
1092
+ γℓ(Wd,ρ ∩ Rℓ).
1093
+ We can now apply Corollary 5.3 to conclude that
1094
+ Efj(Zd,ℓ,ρ) = (ℓ − j + 1)Jℓ+1,ℓ−j+1( d−ℓ−1
1095
+ 2
1096
+ )
1097
+ Jℓ+1,1( d−ℓ−1
1098
+ 2
1099
+ )
1100
+ ,
1101
+ independently of ρ. Clearly, Ef0(Zd,1,ρ) = 2 for any d ≥ 2. Also, Efj(Zd,2,ρ) = 6 for any d ≥ 2 and
1102
+ 0 ≤ j ≤ 1, since the sectional Voronoi tessellation is stationary and normal. Some non-trivial values for
1103
+ space dimensions d = 4, 5, 6 are collected in Table 5.2.
1104
+ Similarly, we can compute the expected number of j-dimensional faces of the typical k-dimensional
1105
+ face Z(k)
1106
+ d,ℓ,ρ of the sectional Poisson-Voronoi tessellation Wd,ρ ∩ Rℓ for d ≥ 2, 1 ≤ ℓ ≤ d − 1, 1 ≤ k ≤ ℓ and
1107
+ 0 ≤ j ≤ k − 1:
1108
+ Efj(Z(k)
1109
+ d,ℓ,ρ) = (k − j + 1)Jℓ+1,ℓ−j+1( d−ℓ−1
1110
+ 2
1111
+ )
1112
+ Jℓ+1,ℓ−k+1( d−ℓ−1
1113
+ 2
1114
+ ).
1115
+ In the large d limit this becomes
1116
+ lim
1117
+ d→∞ Efj(Z(k)
1118
+ d,ℓ,ρ) = (k − j + 1)Jℓ+1,ℓ−j+1(∞)
1119
+ Jℓ+1,ℓ−k+1(∞).
1120
+ Again, for ℓ = 2, 3 we recover results of Miles [20, p. 320].
1121
+ 12
1122
+
1123
+ d = 4, ℓ = 3
1124
+ d = 5, ℓ = 3
1125
+ d = 5, ℓ = 4
1126
+ d = 6, ℓ = 3
1127
+ d = 6, ℓ = 4
1128
+ d = 6, ℓ = 5
1129
+ j = 0
1130
+ 10 240
1131
+ 401
1132
+ 67 200π2
1133
+ 26 741
1134
+ 4 233 600π2
1135
+ 1 692 197−141 120π2
1136
+ 524 288
1137
+ 21 509
1138
+ 52 003
1139
+ 400
1140
+ 34 394 098 106 368
1141
+ 37 477 698 299
1142
+ j = 1
1143
+ 15 360
1144
+ 401
1145
+ 100 800π2
1146
+ 26 741
1147
+ 8 467 200π2
1148
+ 1 692 197−141 120π2
1149
+ 786 432
1150
+ 21 509
1151
+ 52 003
1152
+ 200
1153
+ 85 985 245 265 920
1154
+ 37 477 698 299
1155
+ j = 2
1156
+ 5 922
1157
+ 401
1158
+ 2 + 33 600π2
1159
+ 26 741
1160
+ 10 153 182+4 233 600π2
1161
+ 1 692 197−141 120π2
1162
+ 305 162
1163
+ 21 509
1164
+ 162 009
1165
+ 1 000
1166
+ 74 276 903 321 600
1167
+ 37 477 698 299
1168
+ j = 3
1169
+
1170
+
1171
+ 10 153 182
1172
+ 1 692 197−141 120π2
1173
+
1174
+ 64 003
1175
+ 2 000
1176
+ 25 430 109 716 480
1177
+ 37 477 698 299
1178
+ j = 4
1179
+
1180
+
1181
+
1182
+
1183
+
1184
+ 53 194 508 510
1185
+ 707 126 383
1186
+ Table 5.2: Efj(Zd,ℓ,ρ) for small values of d, ℓ and j.
1187
+ 6
1188
+ Convergence to the Gaussian-Voronoi tessellation in high dimensions
1189
+ In Sections 5.1 and 5.2 we computed explicitly several characteristics of the sectional Poisson-Voronoi
1190
+ tessellation Wd,ρd∩Rℓ and the limits of these characteristics in the regime when d → ∞ and (ρd)1/d → κ >
1191
+ 0, while ℓ ∈ N stays fixed. It turns out that these limits coincide with the corresponding characteristics
1192
+ of the tessellation Gℓ,λ with λ = κ2πe. For example, for the typical cell Z(Gℓ,λ) of the Gaussian-Voronoi
1193
+ tessellation Gℓ,λ it is known from [8, Section 5] (where the special case λ = 1/2 has been considered) that
1194
+ E vol(Z(Gℓ,λ)) =
1195
+
1196
+ ℓ + 1
1197
+ Jℓ+1,1(∞)λ
1198
+
1199
+ 2
1200
+ Γ( ℓ
1201
+ 2)
1202
+ 2(ℓ − 1)!.
1203
+ This formula coincides with the one obtained in Corollary 5.7 if we choose λ = κ2πe. In the next two
1204
+ theorems we explain this and other similar coincidences by proving weak convergence of the corresponding
1205
+ tessellations and the typical cells.
1206
+ Theorem 6.1. Take any positive sequence (ρd)d∈N with limd→∞(ρd)1/d = κ > 0 and let ℓ ∈ N be
1207
+ fixed. Then, as d → ∞, the sectional Poisson-Voronoi tessellation Wd,ρd ∩ Rℓ converges to Gℓ,κ2πe in the
1208
+ following sense: It is possible to define all these random tessellations on the same probability space such
1209
+ that for every ℓ-dimensional ball BR ⊂ Rℓ of radius R > 0 centred at the origin the probability that the
1210
+ restrictions of Wd,ρd ∩ Rℓ and Gℓ,κ2πe to BR coincide, converges to 1, as d → ∞.
1211
+ Remark 6.2. The skeleton of a (random) tessellation T is the (random) closed set skel(T ) = �
1212
+ c∈T bd c,
1213
+ where bd c denotes the topological boundary of the cell c.
1214
+ The mode of convergence appearing in
1215
+ Theorem 6.1 implies that the random closed set skel(Wd,ρd ∩ Rℓ) converges to the random closed set
1216
+ skel(Gℓ,κ2πe) weakly as d → ∞; see [27, Chapter 2] for this concept.
1217
+ Proof of Theorem 6.1. The proof of this theorem basically follows the same route as the proof of The-
1218
+ orem 4.2 in [8], which is the reason why we leave out some details here. By Corollary 4.2, the sectional
1219
+ tessellation Wd,ρd ∩ Rℓ has the same distribution as Vℓ,βd,γd with
1220
+ βd = 1
1221
+ 2(d − ℓ) − 1
1222
+ and
1223
+ γd = π
1224
+ d+1
1225
+ 2 Γ
1226
+ �d + 1
1227
+ 2
1228
+ �−1
1229
+ ρd.
1230
+ We will prove that, as d → ∞, the Poisson point processes ηℓ,βd,γd converge, after an appropriate vertical
1231
+ shift, to ζℓ,κ2πe,1; see Section 3.3 for their definitions. The vertical shift Qd : Rℓ+1 → Rℓ+1 is given by
1232
+ Qd(v, h) = (v, h − ad) with
1233
+ ad = 1
1234
+ π
1235
+ �πΓ( d−ℓ
1236
+ 2 )
1237
+ ρd
1238
+
1239
+ 2
1240
+ d−ℓ−2 .
1241
+ (6.1)
1242
+ Note that applying such a vertical shift to a point process does not change the resulting Laguerre tessel-
1243
+ lation since it amounts to shifting all paraboloids along the height coordinate.
1244
+ 13
1245
+
1246
+ In the following, we show that the intensity function of the Poisson point process ξℓ,d := Qd(ηℓ,βd,γd)
1247
+ converges, as d → ∞, to the intensity function of ξℓ,∞ := ζℓ,κ2πe,1 uniformly on every compact subset of
1248
+ Rℓ+1. Indeed, the intensity function of Qd(ηℓ,βd,γd) is given by
1249
+ fd(v, h) = ρdπ
1250
+ d−ℓ
1251
+ 2
1252
+ Γ( d−ℓ
1253
+ 2 )(h + ad)
1254
+ d−ℓ−2
1255
+ 2
1256
+ 1
1257
+
1258
+ h + ad > 0
1259
+
1260
+ = ρdπ
1261
+ d−ℓ
1262
+ 2
1263
+ Γ( d−ℓ
1264
+ 2 )a
1265
+ d−ℓ−2
1266
+ 2
1267
+ d
1268
+
1269
+ 1 + h
1270
+ ad
1271
+ � d−ℓ−2
1272
+ 2
1273
+ 1
1274
+
1275
+ h + ad > 0
1276
+
1277
+ =
1278
+
1279
+ 1 + h
1280
+ ad
1281
+ �ad· d−ℓ−2
1282
+ 2ad 1
1283
+
1284
+ h + ad > 0
1285
+
1286
+ .
1287
+ Stirling’s formula for the Gamma function and (6.1) yield
1288
+ lim
1289
+ d→∞
1290
+ d − ℓ − 2
1291
+ 2ad
1292
+ =
1293
+ 1
1294
+ κ2π lim
1295
+ d→∞
1296
+ d − ℓ + 2
1297
+ 2Γ( d−ℓ
1298
+ 2 )
1299
+ 2
1300
+ d−ℓ+2
1301
+ = κ2πe lim
1302
+ d→∞
1303
+ d − ℓ + 2
1304
+ (d − ℓ)
1305
+ d−ℓ
1306
+ d−ℓ+2
1307
+ = κ2πe.
1308
+ Note that, in particular, ad → ∞ as d → ∞. We conclude that limd→∞ fd(v, h) = eκ2πeh uniformly
1309
+ as long as h stays bounded. By standard results [24, Propositions 3.6 and 3.19], this also implies weak
1310
+ convergence of the corresponding Poisson point processes.
1311
+ After we have shown the convergence of the point processes ξℓ,d to ξℓ,∞, as d → ∞, we explain the
1312
+ procedure allowing to transfer this result to the convergence of the corresponding tessellations LΨ(ξℓ,d)
1313
+ to LΨ(ξℓ,∞) as d → ∞. Note that LΨ(ξℓ,d) has the same distribution as Wd,ρd ∩ Rℓ and LΨ(ξℓ,∞) has the
1314
+ same distribution as Gℓ,κ2πe (see Corollary 4.2 and Section 3.2).
1315
+ We fix an ℓ-dimensional ball BR ⊂ Rℓ of radius R > 0 centred at the origin and for any ε > 0 we aim
1316
+ to find a region K(R, ε) �� Rℓ+1, independent of d, such that with probability at least 1−ε the restrictions
1317
+ of the tessellations LΨ(ξℓ,d) and LΨ(ξℓ,∞) to BR are completely determined by the restrictions of the
1318
+ point processes ξℓ,d and ξℓ,∞ to K(R, ε) for any d. To this end, we note that LΨ(ξℓ,d) may be regarded
1319
+ as a vertical projection along the h-axis of the boundary of the corresponding paraboloid growth process
1320
+ Ψ(ξℓ,d) (see Section 3.2). From this it follows that if the restrictions of the tessellations LΨ(ξℓ,d) and
1321
+ LΨ(ξℓ,∞) to BR do not coincide, then the boundaries of the corresponding paraboloid hull processes
1322
+ bd Ψ(ξℓ,d) and bd Ψ(ξℓ,∞) restricted to the cylinder BR × R do not coincide as well. The construction of
1323
+ the region K(R, ε) can be now performed as follows. First, we consider the event E(T, r) that bd Ψ(ξℓ,d)
1324
+ restricted to the cylinder BR ×R is completely determined by the restriction of bd Ψ(ξℓ,d) to the cylinder
1325
+ BR+r ×(−∞, T] for some T, r > 0. By this we mean that for every paraboloid Π+,x with x ∈ ext(Ψ(ξℓ,d))
1326
+ the set Π+,x ∩ bd Ψ(ξℓ,d) either does not intersect BR × R or is included in BR+r × (−∞, T]. We have
1327
+ that
1328
+ 1 − P(E(T, r)) ≤ c1(R + r)ℓ(ec2(4T−r2) + e−c3T c4),
1329
+ r, d > c5,
1330
+ (6.2)
1331
+ where all constants c1, . . . , c5 are positive and independent of the parameters d, r and T. Since the proof
1332
+ of this estimate follows exactly the same route as the proof of [8, Lemma 4.4] (estimate for T) and [8,
1333
+ Lemma 4.5] (estimate for r), we decided to omit the technical details. In particular, (6.2) shows that
1334
+ for any ε > 0 there is a choice of T0 := T(ε) and r0 := r(ε) such that P(E(T0, r0)) ≥ 1 − ε. The
1335
+ same holds for Ψ(ξℓ,∞). Further, we note that if a paraboloid Π+,x with x ∈ ext(Ψ(ξℓ,d)) is such that
1336
+ Π+,x ∩ bd Ψ(ξℓ,d) ⊂ BR+r0 × (−∞, T0], then
1337
+ x ∈ {(v, h) ∈ Rℓ+1 : h ≤ T0, ∥v∥ ≤ R + r0 +
1338
+
1339
+ T0 − h} =: K(R, ε).
1340
+ To complete the proof, it suffices to argue that there exists a coupling of ξℓ,d and ξℓ,∞ on a common
1341
+ probability space such that the probability that the restrictions of these processes to the region K(R, ε)
1342
+ do not coincide converges to 0, as d → ∞. In a suitable coupling, this probability is bounded above by a
1343
+ constant multiple of the L1-norm of the difference of their intensity measures restricted to K(R, ε), see [23,
1344
+ Theorem 3.2.2]. As we have shown above, the densities fd(v, h) converge to eκ2πeh as d → ∞ uniformly
1345
+ 14
1346
+
1347
+ on compact sets and hence pointwise. Also, by the inequality 1 + x ≤ ex, we have fd(v, h) ≤ ec6h for
1348
+ some absolute constant c6 > 0. The fact that this upper bound is integrable over K(r, ε) has been shown
1349
+ in [8, Equation (4.18)]. Thus, fd(v, h) → eκ2πeh with respect to the L1-norm on K(R, ε), which ensures
1350
+ that the required coupling of the Poisson processes indeed exists. This completes the argument.
1351
+ Our aim is now to prove the weak convergence of the typical cell of the sectional Poisson-Voronoi
1352
+ tessellation to the typical cell of the Gaussian-Voronoi tessellation. Fix some ℓ ∈ N and let C be the
1353
+ space of compact subsets of Rℓ endowed with the Hausdorff metric. Put C′ = C\{∅}. The typical cells
1354
+ considered below are defined with respect to some fixed centre function z : C′ → Rℓ in the sense of
1355
+ Section 2, additionally satisfying z(C) ∈ C for every C ∈ C′.
1356
+ Theorem 6.3. Take any positive sequence (ρd)d∈N with limd→∞(ρd)1/d = κ > 0. Then, as d → ∞, the
1357
+ distribution of the typical cell of the sectional Poisson-Voronoi tessellation Wd,ρd ∩ Rℓ converges to the
1358
+ distribution of the typical cell of the Gaussian-Voronoi tessellation Gℓ,κ2πe weakly on C.
1359
+ This theorem is a consequence of Theorem 6.1 and the following general result.
1360
+ Proposition 6.4. Let (Tn)n∈N be a sequence of stationary random tessellations on Rℓ converging to a
1361
+ stationary random tessellation T∞ on Rℓ in the following sense: All random tessellations are defined on
1362
+ a common probability space and for every R > 0 we have lim
1363
+ n→∞ P[An(R)] = 1, where An(R) is the event
1364
+ that the restrictions of Tn and T∞ to [−R, R]ℓ coincide. More precisely, An(R) = A′
1365
+ n(R) ∩ A′′
1366
+ n(R) with
1367
+ A′
1368
+ n(R) := {for every C ∈ Tn s.t. C ∩ [−R, R]ℓ ̸= ∅ there is C′ ∈ T∞ s.t. C ∩ [−R, R]ℓ = C′ ∩ [−R, R]ℓ},
1369
+ A′′
1370
+ n(R) := {for every C′ ∈ T∞ s.t. C′ ∩ [−R, R]ℓ ̸= ∅ there is C ∈ Tn s.t. C ∩ [−R, R]ℓ = C′ ∩ [−R, R]ℓ}.
1371
+ Also, suppose that the cell intensity of Tn converges to that of T∞, that is lim
1372
+ n→∞ γℓ(Tn) = γℓ(T∞), and that
1373
+ all these intensities are finite. Then, the distribution of the typical cell of Tn converges to the distribution
1374
+ of the typical cell of T∞ weakly on C:
1375
+ Pz
1376
+ Tn,ℓ −→
1377
+ n→∞Pz
1378
+ T∞,ℓ,
1379
+ weakly on C.
1380
+ (6.3)
1381
+ Proof. Let f : C → R be a bounded continuous function and recall that z(C) is the centre of a cell C.
1382
+ Consider the random variables
1383
+ ξn :=
1384
+
1385
+ C∈Tn, z(C)∈[0,1]ℓ
1386
+ f(C),
1387
+ n ∈ N ∪ {∞}.
1388
+ By definition, the distribution of the typical cell of Tn satisfies
1389
+
1390
+ C
1391
+ f dPz
1392
+ Tn,ℓ =
1393
+ Eξn
1394
+ γℓ(Tn),
1395
+ n ∈ N ∪ {∞}.
1396
+ Since lim
1397
+ n→∞ γℓ(Tn) = γℓ(T∞), to prove (6.3) it suffices to check that
1398
+ lim
1399
+ n→∞ Eξn = Eξ∞.
1400
+ (6.4)
1401
+ In the following we shall define a “good” event Cn(R) on which ξn and ξ∞ are equal. For n ∈ N ∪ {∞}
1402
+ and R > 3 consider the random event
1403
+ Bn(R) := {∄C ∈ Tn : C ∩ [0, 1]ℓ ̸= ∅, C ̸⊂ [−R/2, R/2]ℓ}.
1404
+ A cell C with the properties listed in this definition is called a “long cell” in the tessellation Tn. The
1405
+ event Bn(R) occurs if there is no long cell. On the event An(R), each long cell C ∈ Tn corresponds to a
1406
+ long cell C′ ∈ T∞ with the same restriction to [−R, R]ℓ. It follows that An(R) ∩ (Bn(R))c ⊂ (B∞(R))c.
1407
+ 15
1408
+
1409
+ Since all cells in T∞ are bounded almost surely, the maximal diameter of a cell in T∞ intersecting [0, 1]ℓ
1410
+ is some almost surely finite random variable M. It follows that
1411
+ lim
1412
+ R→∞ sup
1413
+ n∈N
1414
+ P[An(R) ∩ (Bn(R))c] ≤ lim
1415
+ R→∞ P[(B∞(R))c] ≤ lim
1416
+ R→∞ P[M > (R/2) − 1] = 0.
1417
+ Recall also that limn→∞ P[An(R)] = 1 for every fixed R > 0. Consider now the “good” event Cn(R) :=
1418
+ An(R) ∩ Bn(R) and the “bad” event Dn(R) := (Cn(R))c. It follows from the above that
1419
+ lim
1420
+ R→∞ lim sup
1421
+ n→∞ P[Dn(R)] = 0.
1422
+ (6.5)
1423
+ Note that, on the event Cn(R), the sets {C ∈ Tn : z(C) ∈ [0, 1]ℓ} and {C′ ∈ T∞ : z(C′) ∈ [0, 1]ℓ} are
1424
+ equal, implying that ξn = ξ∞. Indeed, every cell C ∈ Tn with z(C) ∈ [0, 1]ℓ is contained in [−R/2, R/2]ℓ
1425
+ (since z(C) ∈ C and there is no long cell) and, consequently, C is also a cell of T∞ (since An(R) occurs).
1426
+ Conversely, for every cell C′ ∈ T∞ with z(C′) ∈ [0, 1]ℓ there is a cell C ∈ Tn such that the restrictions
1427
+ of C and C′ to [−R, R]ℓ coincide (since An(R) occurs) and C is not long (since Bn(R) occurs), which
1428
+ implies that C = C′. It follows that
1429
+ E[1(Cn(R)) ξn] = E[1(Cn(R)) ξ∞],
1430
+ n ∈ N, R > 3.
1431
+ To complete the proof of (6.4), it suffices to verify that
1432
+ lim
1433
+ R→∞ lim sup
1434
+ n→∞ E[1(Dn(R)) ξn] = 0,
1435
+ lim
1436
+ R→∞ lim sup
1437
+ n→∞ E[1(Dn(R)) ξ∞] = 0.
1438
+ In view of (6.5), it suffices to check that the family {ξn : n ∈ N ∪ {∞}} is uniformly integrable. Now,
1439
+ |ξn| ≤ ∥f∥∞ηn
1440
+ with
1441
+ ηn :=
1442
+
1443
+ C∈Tn, z(C)∈[0,1]ℓ
1444
+ 1,
1445
+ n ∈ N ∪ {∞}.
1446
+ As we already observed above, outside the event Dn(R) we have ηn = η∞. From (6.5) it follows that
1447
+ ηn → η∞ almost surely. On the other hand, we have Eηn = γℓ(Tn) → γℓ(T∞) = Eη∞ by assumption of
1448
+ the proposition (and all these expectations are finite). These two properties together with ηn ≥ 0 imply
1449
+ that the family {ηn : n ∈ N∪{∞}} is uniformly integrable. Indeed, if this were not the case, we could find
1450
+ ε > 0 such that, after passing to a subsequence, E[ηn1(Qn)] > ε for some events Qn with P[Qn] < 1/2n.
1451
+ Let Wk := ∪∞
1452
+ n=kQn. If K is sufficiently large, then E[η∞1(WK)] < ε/2, while E[ηn1(WK)] > ε for all
1453
+ n ≥ K. Applying Fatou’s lemma to the variables (ηn1((WK)c))n≥K leads to a contradiction with the
1454
+ assumption Eηn → Eη∞. Finally, the bound |ξn| ≤ ∥f∥∞ηn implies that the family {ξn : n ∈ N ∪ {∞}}
1455
+ is uniformly integrable as well, and the proof is complete.
1456
+ Acknowledgement
1457
+ ZK and CT were supported by the DFG priority program SPP 2265 Random Geometric Systems. AG
1458
+ and ZK were supported by the DFG under Germany’s Excellence Strategy EXC 2044 – 390685587,
1459
+ Mathematics Münster: Dynamics - Geometry - Structure.
1460
+ References
1461
+ [1] P. Calka, T. Schreiber, and J. E. Yukich. Brownian limits, local limits and variance asymptotics for convex
1462
+ hulls in the ball. Ann. Probab., 41(1):50–108, 2013.
1463
+ [2] P. Calka and J. E. Yukich. Variance asymptotics for random polytopes in smooth convex bodies. Probab.
1464
+ Theory Related Fields, 158(1-2):435–463, 2014.
1465
+ [3] P. Calka and J. E. Yukich. Variance asymptotics and scaling limits for Gaussian polytopes. Probab. Theory
1466
+ Related Fields, 163(1-2):259–301, 2015.
1467
+ 16
1468
+
1469
+ [4] P. Calka and J. E. Yukich.
1470
+ Variance asymptotics and scaling limits for random polytopes.
1471
+ Adv. Math.,
1472
+ 304:1–55, 2017.
1473
+ [5] P. Calka and J. E. Yukich. Convex hulls of perturbed random point sets. Ann. Appl. Probab., 31(4):1598–1632,
1474
+ 2021.
1475
+ [6] S. N. Chiu, R. van de Weygaert, and D. Stoyan. The sectional Poisson-Voronoi tessellation is not a Voronoi
1476
+ tessellation. Adv. in Appl. Probab., 28(2):356–376, 1996.
1477
+ [7] A. Gusakova, Z. Kabluchko, and C. Thäle.
1478
+ The β-Delaunay tessellation: Description of the model and
1479
+ geometry of typical cells. Adv. in Appl. Probab., 54(4):1252–1290, 2022.
1480
+ [8] A. Gusakova, Z. Kabluchko, and C. Thäle. The β-Delaunay tessellation II: The Gaussian limit tessellation.
1481
+ Electron. J. Probab., (27):1 – 33, 2022.
1482
+ [9] A. Gusakova, Z. Kabluchko, and C. Thäle. The β-Delaunay tessellation III: Kendall’s problem and limit
1483
+ theorems in high dimensions. ALEA, Lat. Am. J. Probab. Math. Stat., 19:23 – 50, 2022.
1484
+ [10] A. Gusakova, Z. Kabluchko, and C. Thäle. The β-Delaunay tessellation IV: Mixing properties and central
1485
+ limit theorems. Stoch. and Dynamics, accepted. Preprint at: https://arxiv.org/abs/2108.09472, 2023.
1486
+ [11] Z. Kabluchko. Angles of random simplices and face numbers of random polytopes. Adv. Math., 380:Paper
1487
+ No. 107612, 68, 2021.
1488
+ [12] Z. Kabluchko.
1489
+ Recursive scheme for angles of random simplices, and applications to random polytopes.
1490
+ Discrete Comput. Geom., 66:902–937, 2021.
1491
+ [13] Z. Kabluchko, D. Temesvari, and C. Thäle. Expected intrinsic volumes and facet numbers of random beta-
1492
+ polytopes. Math. Nachr., 292(1):79–105, 2019.
1493
+ [14] Z. Kabluchko and C. Thäle. The typical cell of a Voronoi tessellation on the sphere. Discrete Comput. Geom.,
1494
+ 66:1330–1350, 2021.
1495
+ [15] Z. Kabluchko, C. Thäle, and D. Zaporozhets. Beta polytopes and Poisson polyhedra: f-vectors and angles.
1496
+ Adv. Math., 374:Paper No. 107333, 63, 2020.
1497
+ [16] O. Kallenberg. Foundations of Modern Probability. Probability and its Applications (New York). Springer-
1498
+ Verlag, New York, second edition, 2002.
1499
+ [17] G. Last and M. Penrose. Lectures on the Poisson Process, volume 7 of Institute of Mathematical Statistics
1500
+ Textbooks. Cambridge University Press, Cambridge, 2018.
1501
+ [18] C. Lautensack. Random Laguerre Tessellations. PhD thesis, 2007.
1502
+ [19] C. Lautensack and S. Zuyev. Random Laguerre tessellations. Adv. in Appl. Probab., 40(3):630–650, 2008.
1503
+ [20] R.E. Miles. Sectional Voronoi tessellations. Revista de la Union Mathematica Argentina, 29:301–327.
1504
+ [21] W. Nagel. Stereology. In New perspectives in stochastic geometry, pages 451–475. Oxford Univ. Press, Oxford,
1505
+ 2010.
1506
+ [22] A. Okabe, B. Boots, K. Sugihara, and S. N. Chiu. Spatial Tessellations: Concepts and Applications of Voronoi
1507
+ Diagrams. Wiley Series in Probability and Statistics. John Wiley & Sons, Ltd., Chichester, second edition,
1508
+ 2000. With a foreword by D. G. Kendall.
1509
+ [23] R.-D. Reiss. A course on point processes. Springer Series in Statistics. Springer-Verlag, New York, 1993.
1510
+ [24] S. I. Resnick. Extreme Values, Regular Variation and Point Processes. Springer Series in Operations Research
1511
+ and Financial Engineering. Springer, New York, 2008. Reprint of the 1987 original.
1512
+ [25] M. Schlottmann. Periodic and quasi-periodic Laguerre tilings. International Journal of Modern Physics B,
1513
+ 7(6-07):1351–1363, January 1993.
1514
+ [26] R. Schneider and W. Weil.
1515
+ Stochastische Geometrie.
1516
+ Teubner Skripten zur Mathematischen Stochastik.
1517
+ (Teubner Texts on Mathematical Stochastics). B. G. Teubner, Stuttgart, 2000.
1518
+ [27] R. Schneider and W. Weil. Stochastic and Integral Geometry. Probability and its Applications (New York).
1519
+ Springer-Verlag, Berlin, 2008.
1520
+ [28] T. Schreiber and J. E. Yukich.
1521
+ Variance asymptotics and central limit theorems for generalized growth
1522
+ processes with applications to convex hulls and maximal points. Ann. Probab., 36:363–396, 2008.
1523
+ [29] D. Stoyan, W. S. Kendall, and J. Mecke. Stochastic Geometry and its Applications. Wiley Series in Probability
1524
+ and Mathematical Statistics: Applied Probability and Statistics. John Wiley & Sons, Ltd., Chichester, 1987.
1525
+ With a foreword by D. G. Kendall.
1526
+ 17
1527
+
5NE1T4oBgHgl3EQfmgQS/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
5tE1T4oBgHgl3EQfmgTC/content/2301.03299v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95ddf0ff4d36bb45d1889c132c32aa9e214348a22147081c902418c41e06dddb
3
+ size 278468
5tE1T4oBgHgl3EQfmgTC/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac4e759c8c87f83ebf59832a5768ac17fa209afba8a1927d48904f3bb7caa20d
3
+ size 3801133
5tE1T4oBgHgl3EQfmgTC/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef554f8f4c78192c08693fcadc2480e3b1faeef65c3f3bb89055b03049b4cd3f
3
+ size 137471
6dFKT4oBgHgl3EQfTi2q/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b29b1c967fb40f3bfcb859ba7f56230287500dc06e68b91fa336e45ea460b8b4
3
+ size 4259885
9NE3T4oBgHgl3EQfqwrj/content/tmp_files/2301.04655v1.pdf.txt ADDED
@@ -0,0 +1,1022 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ChatGPT is not all you need. A State of the Art
2
+ Review of large Generative AI models
3
+ Roberto Gozalo-Brizuela, Eduardo C. Garrido-Merch´an
4
+ Quantitative Methods Department, Universidad Pontificia Comillas, Madrid, Spain
5
6
+ Abstract. During the last two years there has been a plethora of large
7
+ generative models such as ChatGPT or Stable Diffusion that have been
8
+ published. Concretely, these models are able to perform tasks such as
9
+ being a general question and answering system or automatically creat-
10
+ ing artistic images that are revolutionizing several sectors. Consequently,
11
+ the implications that these generative models have in the industry and
12
+ society are enormous, as several job positions may be transformed. For
13
+ example, Generative AI is capable of transforming effectively and cre-
14
+ atively texts to images, like the DALLE-2 model; text to 3D images,
15
+ like the Dreamfusion model; images to text, like the Flamingo model;
16
+ texts to video, like the Phenaki model; texts to audio, like the AudioLM
17
+ model; texts to other texts, like ChatGPT; texts to code, like the Codex
18
+ model; texts to scientific texts, like the Galactica model or even create
19
+ algorithms like AlphaTensor. This work consists on an attempt to de-
20
+ scribe in a concise way the main models are sectors that are affected by
21
+ generative AI and to provide a taxonomy of the main generative models
22
+ published recently.
23
+ 1
24
+ Introduction
25
+ Generative AI refers to artificial intelligence that can generate novel content,
26
+ rather than simply analyzing or acting on existing data like expert systems [23].
27
+ In particular, expert systems contained knowledge bases and an inference engine
28
+ that generated content via an if-else rule database. However, modern generative
29
+ artificial intelligence contain a discriminator or transformer model trained on a
30
+ corpus or dataset that is able to map the input information into a latent high-
31
+ dimensional space and a generator model, that is able to generate an stochastic
32
+ behaviour creating novel content in every new trial even from the same prompts
33
+ as an input, performing unsupervised, semi-supervised or supervised learning,
34
+ depending on the particular methodology. Regarding the created content by the
35
+ model, generative artificial intelligence models are different from predictive ma-
36
+ chine learning systems, that merely perform a discrimination behaviour, solv-
37
+ ing classification or regression problems. In particular, these models are able
38
+ to discriminate information and generate information of the transformed input
39
+ information, or prompt.
40
+ The key aspect about generative models is that their architecture and the
41
+ data that they have been fed is enormous. For example, it is possible now to
42
+ arXiv:2301.04655v1 [cs.LG] 11 Jan 2023
43
+
44
+ 2
45
+ Roberto Gozalo-Brizuela, Eduardo C. Garrido-Merch´an
46
+ estimate the parameters of the model by feeding it the contents of the whole
47
+ Wikipedia, Github, social networks, Google images and more. Despite being fed
48
+ with an enormous size of data, thanks to the rise of computing we can design
49
+ deep neural networks [18], transformers [22] and other models such as genera-
50
+ tive adversarial networks [9] or variational autoencoders [23] whose capacity is
51
+ able to model the complexity of the data, without suffering from underfitting.
52
+ As they are able to modelize the high-dimensional probability distribution of
53
+ language or photos of a concrete or general domain, if they are complemented
54
+ by generative models that map the latent high-dimensional semantic space of
55
+ language of photos to a multimedia representation of text, audio or video we
56
+ can map any input format like texts to any output format like video. In this
57
+ sense, applications of this technology are endless, in the sense that we can train
58
+ a model to generate genuine different multimedia formats as video, audio or text
59
+ from different multimedia input formats, as for example, text.
60
+ We believe that it is necessary to provide a state-of-the-art review on the
61
+ most popular generative AI models as they are revolutionizing several indus-
62
+ tries like the art industry [2] or universities [16,30]. As models are now able to
63
+ generate genuine artistic content or large texts answering a prompt, these two
64
+ industries and other ones that we will detail throughout this manuscript will
65
+ need to readapt their activity to continue providing value. In this sense, gen-
66
+ erative AI models will not replace humans but enhance our content, being an
67
+ inspiration for artists or improving the content generated by professors. In order
68
+ to provide information for a professional working in any industry that can be
69
+ benefited by these models we have made the organization of the paper as the
70
+ following one. First, we will provide a taxonomy of the main generative models
71
+ that have appeared in the industry. Then, the following sections will analyze
72
+ each of the categories of the taxonomy. Finally, we finish the manuscript with a
73
+ conclusions and further work section. We do not study the technical aspects of
74
+ every model, such as transformers in detail as our purpose in this review is on the
75
+ applications of the models and the content that they generative but not on how
76
+ they work. For a detailed explanation of deep learning models and generative
77
+ models we recommend other references [18,23].
78
+ 2
79
+ A Taxonomy of Generative AI models
80
+ Before analyzing each model in detail, we have tried to organize the current
81
+ generative artificial models into a taxonomy whose categories represent the main
82
+ mappings between each multimedia input and output type of data. The result
83
+ is the one that we have illustrated in Figure 1. We have discovered a total of 9
84
+ categories, where each of the models that appear in Figure 1 will be described
85
+ in detail in the following section. Each of the covered models has been published
86
+ recently, as we illustrate in Figure 2, as our main concern in this manuscript is
87
+ to describe the latest advances in generative AI models.
88
+ Interestingly, only six organizations are behind the deployment of these mod-
89
+ els, as we illustrate in Figure 3. The main reason behind this fact is that in order
90
+
91
+ State of the Art of Generative AI
92
+ 3
93
+ Fig. 1. A taxonomy of the most popular generative AI models that have recently
94
+ appeared classified according to their input and generated formats.
95
+ Fig. 2. Covered models by date of release. All models were released during 2022 except
96
+ LaMDA, which was released in 2021 and Muse, in 2023.
97
+
98
+ Text-to-
99
+ Text-to-
100
+ Image-to-
101
+ image
102
+ 3D
103
+ text
104
+ DALL-E
105
+ Stable
106
+ 2
107
+ Dreamfusion
108
+ Magic3D
109
+ Flamingo
110
+ Diffusion
111
+ VisualGPT
112
+ Imagen
113
+ Muse
114
+ Text.to-
115
+ Text.to-
116
+ Text-To-
117
+ Video
118
+ Audio
119
+ Text
120
+ Phenaki
121
+ Soundify
122
+ AudioLM
123
+ Whisper
124
+ ChatGPT3
125
+ LaMDA
126
+ Jukebox
127
+ PEER
128
+ Speech From
129
+ Brain
130
+ Text.to.
131
+ Text-to
132
+ Other
133
+ Code
134
+ Science
135
+ Models
136
+ Codex
137
+ Alphacode
138
+ Galactica
139
+ Minerva
140
+ Alphatensor
141
+ GATO
142
+ Human Motion
143
+ Diffusion ModelMeta Al Speech From Brain
144
+ Muse
145
+ Whisper
146
+ PEER
147
+ Stable Diffusion
148
+ Hurnan Motion Diffusion
149
+ Model
150
+ Jukebox
151
+ Imagen
152
+ VisualGPT
153
+ Magic3D
154
+ Minerva
155
+ LaMDA
156
+ Soundify
157
+ GATO
158
+ Alphacode
159
+ Flamingo
160
+ DallI-E-2
161
+ Galadtica
162
+ Alphatensor
163
+ Codex
164
+ Dreamfusion
165
+ ChatGPT3
166
+ AudioL
167
+ 04/01/2021
168
+ 14/04/2021
169
+ 23/07/2021
170
+ 31/10/2021
171
+ 08/02/2022
172
+ 19/05/2022
173
+ 27/08/2022
174
+ 05/12/20224
175
+ Roberto Gozalo-Brizuela, Eduardo C. Garrido-Merch´an
176
+ to be able to estimate the parameters of these models, it is mandatory to have
177
+ an enormous computation power and a highly skilled and experienced team in
178
+ data science and data engineering. Consequently, only the companies shown on
179
+ Figure 3, with the help of acquired startups and collaborations with academia,
180
+ have been able to successfully deploy generative artificial intelligence models.
181
+ Fig. 3. Models by developer. In terms of major companies participating in startups,
182
+ note that Microsoft invested 1 billion dollars in OpenAI and helps them with the de-
183
+ velopment of models. As well, note that Google acquired Deepmind in 2014. In terms
184
+ of universities, note that VisualGPT was developed by KAUST, Carnegie Mellon Uni-
185
+ versity and Nanyang Technological University and that the Human Motion Diffusion
186
+ Model was developed by Tel Aviv University, Israel. As well, other projects are de-
187
+ veloped by a company in collaboration with a university. Concretely, this is the case
188
+ for Stable Diffsion (Runway, Stability AI and LMU MUNICH), Soundify (Runway and
189
+ Carnegie Mellon University) and DreamFusion (Google and UC Berkeley)
190
+
191
+ GoogleResearch
192
+ openAI
193
+ DALL-E 2
194
+ Chat
195
+ GPT3
196
+ Imagen
197
+ Muse
198
+ Jukebox
199
+ Whisper
200
+ DreamFusion
201
+ Phenaki
202
+ Minerva
203
+ AudioLM
204
+ LaMDA
205
+ ODeepMnd
206
+ 00MetaAl
207
+ Flamingo
208
+ Alphacode
209
+ PEER
210
+ Speech
211
+ From Brain
212
+ Alphatensor
213
+ GATO
214
+ Galactica
215
+ Rrunway
216
+ nVIDIA
217
+ Stable
218
+ Diffusion
219
+ Soundify
220
+ Magic3DState of the Art of Generative AI
221
+ 5
222
+ Now that we have provided and analyzed the latest generative artificial in-
223
+ telligence models, the following section will cover each of the categories of the
224
+ taxonomy presented in Figure 1 in detail.
225
+ 3
226
+ Generative AI models categories
227
+ In this section we will cover in detail the nine categories described in Figure 1 of
228
+ the previous section. For every category, we illustrate the details of the models
229
+ shown in Figure 1.
230
+ 3.1
231
+ Text-to-image models
232
+ We begin the review by considering the models whose input is a text prompt
233
+ and whose output is an image.
234
+ DALL·E 2 : DALL·E 2, created by OpenAI, is able to generate original, genuine
235
+ and realistic images and art from a prompt consisting on a text description [10].
236
+ Luckily, it is possible to use the OPENAI API to get access to this model. In
237
+ particular, DALL·E 2 manages to combine concepts, attributes and different
238
+ styles. In order to do so, it uses the CLIP neural network. CLIP (Contrastive
239
+ Language-Image Pre-Training) is a neural network trained on a variety of (image,
240
+ text) pairs [25]. Using CLIP, that can be instructed in natural language to predict
241
+ the most relevant text snippet, given an image, the model has recently merged
242
+ as a successful representation learner for images. Concretely, CLIP embeddings
243
+ have several desirable properties: they are robust to image distribution shift, have
244
+ impressive zero-shot capabilities and have been fine-tuned to achieve state-of-the-
245
+ art results. In order to obtain a full generative model of images, the CLIP image
246
+ embedding decoder module is combined with a prior model, which generates
247
+ possible CLIP image embeddings from a given text caption. We illustrate an
248
+ image generated from a prompt in Figure 4
249
+ Fig. 4. Image generated from the prompt ”A shiba inu wearing a beret and black
250
+ turtleneck”.
251
+
252
+ 6
253
+ Roberto Gozalo-Brizuela, Eduardo C. Garrido-Merch´an
254
+ IMAGEN : Imagen is a text-to-image diffusion model [17] consisting on large
255
+ transformer language models. Critically, the main discovery observed with this
256
+ model made is that large language models, pre-trained on text-only corpora, are
257
+ very effective at encoding text for image synthesis [28]. Precisely, using Imagen,
258
+ it has been found out that increasing the size of the language model boosts both
259
+ sample fidelity and image-text alignment much more than increasing the size of
260
+ the image diffusion model. The model was created by Google and the API can
261
+ be found in their web page. For the evaluation of their model, Google created
262
+ Drawbench, a set of 200 prompts that support the evaluation and comparison of
263
+ text-to-image models. Most concretely, the model is based on a pretrained text
264
+ encoder (like BERT [12]) that performs a mapping from text to a sequence of
265
+ word embeddings and a cascade of conditional diffusion models that map these
266
+ embeddings to images of increasing resolutions. We show an image generated
267
+ from a prompt in Figure 5.
268
+ Fig. 5. Image generated from the prompt ”A cute corgi lives in a house made out of
269
+ sushi”.
270
+ Stable Diffusion : Stable Diffusion is a latent-diffusion model that is open-
271
+ source and has been developed by the CompVis group at LMU Munich. The
272
+ main difference of this model with respect to the other ones is the use of a
273
+ latent diffusion model and that it performs image modification as it can perform
274
+ operations in its latent space. For Stable Diffusion, we can use the API via their
275
+ website. More concretely, Stable Diffusion consists of two parts: the text encoder
276
+ and the image generator [17]. The image information creator works completely
277
+ in the latent space. This property makes it faster than previous diffusion models
278
+ that worked in a pixel space. We illustrate a Stable Diffusion image example in
279
+ Figure 7.
280
+
281
+ State of the Art of Generative AI
282
+ 7
283
+ Fig. 6. Image generated from the prompt ”A cute corgi lives in a house made out of
284
+ sushi”.
285
+ Muse : This model is a Text-to-image transformer model that achieves state-of-
286
+ the-art image generation while being more efficient than diffusion or autoregres-
287
+ sive models [6]. Concretely, it is trained on a masked modelling task in discrete
288
+ token space. Consequently, it is more efficient because of the use of discrete
289
+ tokens and requiring fewer sampling iterations. Compared to Parti, a autore-
290
+ gressive model, Muse is more efficient because of parallel decoding. Muse is 10x
291
+ faster at inference time than Imagen-3B or Parti-3B and 3x faster than Stable
292
+ Diffusion v 1.4. Muse is also faster than than Stable Diffusion in spite of both
293
+ models working in the latent space of a VQGAN. We append a comparison of
294
+ the generated images by DALL·E 2, IMAGEN and Muse in Figure ??.
295
+ 3.2
296
+ Text-to-3D models
297
+ The models that have been described in the previous section deal with the map-
298
+ ping of text prompts to 2D images. However, for some industries like gaming,
299
+ it is necessary to generate 3D images. In this section, we briefly describe two
300
+ text-to-3D models: Dreamfusion and Magic3D.
301
+ Dreamfusion : DreamFusion is a text-to-3D model developed by Google Re-
302
+ search that uses a pretrained 2D text-to-image diffusion model to perform text-
303
+ to-3D synthesis [24]. In particular, Dreamfusion replaces previous CLIP tech-
304
+ niques with a loss derived from distillation of a 2D diffusion model. Concretely,
305
+ the diffusion model can be used as a loss within a generic continuous optimization
306
+ problem to generate samples. Critically, sampling in parameter space is much
307
+ harder than in pixels as we want to create 3D models that look like good images
308
+ when rendered from random angles. To solve the issue, this model uses a differ-
309
+ entiable generator. Other approaches are focused on sampling pixels, however,
310
+ this model instead focuses on creating 3D models that look like good images
311
+ when rendered from random angles. We illustrate in Figure 8 an example of
312
+
313
+ 8
314
+ Roberto Gozalo-Brizuela, Eduardo C. Garrido-Merch´an
315
+ Fig. 7. Comparison of generated images by the DALL·E 2, IMAGEN and Muse models
316
+ with respect to the prompts that appear in the column of the left. The first column of
317
+ images contains the results generated by DALL·E 2, the second the results obtained
318
+ with IMAGEN and the third the images created by Muse.
319
+ an image created by Dreamfusion from one particular angle along with all the
320
+ variations that can be generated from additional text prompts. In order to see
321
+ the full animated image, we recommend to visit the web page of Dreamfusion.
322
+ Fig. 8. A 3D squirrel generated by Dreamfusion is shown at the left. Then, the other
323
+ images contain the modifications generated to the squirrel with text prompts like ”wear-
324
+ ing a jacket”.
325
+
326
+ DALL-E 2
327
+ Imagen
328
+ MUSE
329
+ A high contrast
330
+ portrait of a very
331
+ happy fuzzy panda
332
+ dressed as a chef
333
+ in a high end kit-
334
+ chen making dough.
335
+ There is a painting
336
+ of flowers on the
337
+ wall behind him.
338
+ Rainbow coloured
339
+ penguin.a DSLR
340
+ wearing
341
+ Bsup!!
342
+ onaroad
343
+ photo of
344
+ a leather
345
+ motorcycle
346
+ made of ice
347
+ jacket
348
+ a squirrelState of the Art of Generative AI
349
+ 9
350
+ Magic3D : This model is a text to 3D model made by NVIDIA Corporation.
351
+ While the Dreamfusion model achieves remarkable results, the method has two
352
+ problems: mainly, the long processing time and the low-quality of the generated
353
+ images. However, these problems are addressed by Magic3D using a two-stage
354
+ optimization framework [20]. Firstly, Magic3D builds a low-resolution diffusion
355
+ prior and, then, it accelerates with a sparse 3D hash grid structure. Using that,
356
+ a textured 3D mesh model is furthered optimized with an efficient differentiable
357
+ render. Comparatively, regarding human evaluation, the model achieves better
358
+ results, as 61.7% prefer this model to DreamFusion. As we can see in Figure 9,
359
+ Magic3D achieves much higher quality 3D shapes in both geometry and texture
360
+ compared to DreamFusion.
361
+ Fig. 9. 3D Images generated by Magic3D and Dreamfusion, where ”Ours” refer to
362
+ Magic3D. We can see a total of 8 text prompts and the images that both models
363
+ generate from that prompts.
364
+ 3.3
365
+ Image-to-Text models
366
+ Sometimes, it is also useful to obtain a text that describes an image, that is
367
+ precisely the inverse mapping to the one that has been analyzed in the previous
368
+ subsections. In this section, we analyze two models that perform this task, along
369
+ with others: Flamingo and VisualGPT.
370
+ Flamingo : A Visual Language Model created by Deepmind using few shot
371
+ learning on a wide range of open-ended vision and language tasks, simply by
372
+ being prompted with a few input/output examples [1]. Concretely, the input of
373
+ Flamingo contains visually conditioned autoregressive text generation models
374
+ able to ingest a sequence of text tokens interleaved with images and/or videos
375
+
376
+ Ours
377
+ DreamFusion[33]
378
+ Ours
379
+ DreamFusion[33]
380
+ 6
381
+ a kingfisher birdi
382
+ car made out of sushi*
383
+ anicecreamsundae
384
+ a beautifully carved wooden knight chess piecei
385
+ a small saguaro cactusplantedinaclaypor
386
+ A very beautiful tiny human heart organic sculpture made of copper wire
387
+ and threaded pipes, very intricate, curved, Studio lighting. high resolution*
388
+ modelofan adorable cottage
389
+ with athatched
390
+ aripestrawberm10
391
+ Roberto Gozalo-Brizuela, Eduardo C. Garrido-Merch´an
392
+ and produce text as output. A query is made to the model along with a photo
393
+ or a video and the model answers with a text answer. Some examples can be
394
+ observed in Figure 10. Flamingo models take advantage of two complementary
395
+ models: a vision model that analyzes visual scenes and a large language model
396
+ which performs a basic form of reasoning. The language model is trained on a
397
+ large amount of text data.
398
+ Fig. 10. Input prompts that contain images and text and output generated text re-
399
+ spones from Flamingo. Every column contains a single example where we can see how
400
+ Flamingo answers the question using the image from the text.
401
+ VisualGPT : VisualGPT is an image captioning model made by OpenAI [7].
402
+ Concretely, VisualGPT leverages knowledge from the pretrained language model
403
+ GPT-2 [5]. In order to bridge the semantic gap between different modalities, a
404
+ novel encoder-decoder attention mechanism [33] is designed with an unsaturated
405
+ rectified gating function. Critically, the biggest advantage of this model is that
406
+ it does not need for as much data as other image-to-text models. In particular,
407
+ improving data efficiency in image captioning networks would enable quick data
408
+ curation, description of rare objects, and applications in specialized domains.
409
+ Most interestingly, the API of this model can be found on GitHub. We include
410
+ three examples of text prompts generated by the model with respect to three
411
+ images fed to the model in Figure 11.
412
+ 3.4
413
+ Text-to-Video models
414
+ As we have seen in the previous subsections, it is now possible to generate images
415
+ from text. Consequently, the next logical step is to generate videos, that are
416
+
417
+ Question: What do you think
418
+ Question: What is odd about
419
+ Question: What country is
420
+ the capacities of these are?
421
+ this image? Explain why it is
422
+ this? Why do you think so?
423
+ Answer:
424
+ unusual. Answer:
425
+ Answer:
426
+ p Completion
427
+ The image is odd because
428
+ the elephant is in the back
429
+ It is Canada. I think so
430
+ The floppy disk is 1.44MB
431
+ of the truck. It is unusual
432
+ and the CD is 700MIB.
433
+ because the flag is the
434
+ because elephants are not
435
+ Canadian flag.
436
+ usually transported in the
437
+ back of a truck.State of the Art of Generative AI
438
+ 11
439
+ Fig. 11. Three examples of text prompts generated by the images shown on the left.
440
+ We also show the attention scores that the model assign to every word of the texts.
441
+ In the third image, we can see for example how the most discriminative information
442
+ about the image is the word ”cat” and ”television”.
443
+ sequences of images, from texts. In this section, we provide information about
444
+ two models that are able to perform this task: Phenaki and Soundify.
445
+ Phenaki : This model has been made by Google Research, and it is capable
446
+ of performing realistic video synthesis, given a sequence of textual prompts [34].
447
+ Most interestingly, we can get access to the API of the model from GitHub. In
448
+ particular, Phenaki is the first model that can generate videos from open domain
449
+ time variable prompts. To address data issues, it performs joint training on a
450
+ large image-text pairs dataset as well as a smaller number of video-text exam-
451
+ ples can result in generalization beyond what is available in the video datasets.
452
+ This is mainly due to image-text datasets having billions of inputs while text-
453
+
454
+ GT: the lady is sitting on the wood bench
455
+ ours
456
+ woman sitting
457
+ on
458
+ e
459
+ benchin
460
+ apark
461
+ attention
462
+ 0.7
463
+ 0.78
464
+ 0.82
465
+ 0.760.80.96
466
+ 0.80.690.85
467
+ GT: a laptop with a keyboard and mouse are on this desk
468
+ ours
469
+ alaptop sitting
470
+ on
471
+ adeskwithamouse
472
+ attention
473
+ 0.7
474
+ 0.78
475
+ 0.81
476
+ 0.70.7
477
+ 0.920.850.640.76
478
+ GT: a cat is sitting in front of a television
479
+ Ours
480
+ sitting
481
+ frontof
482
+ television
483
+ attention 0.80.860.80.83
484
+ 0.7 0.720.60.71
485
+ 0.93
486
+ GT: a number of people sitting on a snowy surface with skis
487
+ Ours
488
+ couple
489
+ of
490
+ people sitting
491
+ on
492
+ snowy suriace
493
+ attention
494
+ 0.8
495
+ 0.87
496
+ 0.71
497
+ 0.85
498
+ 0.91
499
+ 0.76 0.71
500
+ 0.940.9512
501
+ Roberto Gozalo-Brizuela, Eduardo C. Garrido-Merch´an
502
+ video datasets are much smaller. As well, limitations come from computational
503
+ capabilities for videos of variable length.
504
+ The model has three parts: the C-ViViT encoder, the training transformer
505
+ and the video generator. The encoder gets a compressed representation of videos.
506
+ First tokens are transformed into embeddings. This is followed by the temporal
507
+ transformer, then the spatial transformer. After the output of the spatial trans-
508
+ former, they apply a single linear projection without activation to map the tokens
509
+ back to pixel space. Consequently, the model generates temporally coherent and
510
+ diverse videos conditioned on open domain prompts even when the prompt is a
511
+ new composition of concepts. The videos can be minutes long, while the model
512
+ is trained on 1.4 second videos. Below we show in Figure 12 and in Figure 13
513
+ some examples of the creation of a video through a series of text prompts and
514
+ from a series of text prompts and an image.
515
+ Fig. 12. Sequence of images created by the Phenaki model given four different prompts.
516
+
517
+ 1st prompt:"A photorealistic teddy bear is swimming in the ocean at San Francisco"
518
+ 2nd prompt:“The teddy bear goes under water
519
+ 3rd prompt: "The teddy bear keeps swimming under the water with colorful fishes'
520
+ 4rd prompt:"A panda bear is swimming under water"State of the Art of Generative AI
521
+ 13
522
+ Fig. 13. Sequences of images created by the Phenaki model given an image and the
523
+ prompt. We can see how the model is able to manipulate the given image according to
524
+ the text prompt.
525
+ Soundify : In video editing, sound in half of the story. But, for professional
526
+ video editing, the problems come from finding suitable sounds, aligning sounds,
527
+ video and tuning parameters [21]. In order to solve this issue, Soundify is a
528
+ system developed by Runway that matches sound effects to video. This system
529
+ uses quality sound effects libraries and CLIP (a neural network with zero-shot
530
+ image classification capabilities cited before). Concretely, the system has three
531
+ parts: classification, synchronization, and mix. The classification matches effects
532
+ to a video by classifying sound emitters within. To reduce the distinct sound
533
+ emitters, the video is split based on absolute color histogram distances. In the
534
+ synchronization part, intervals are identified comparing effects label with each
535
+ frame and pinpointing consecutive matches above a threshold. In the mix part,
536
+ effects are split into around one-second chunks. Critically, chunks are stitched
537
+ via crossfades.
538
+ 3.5
539
+ Text-to-Audio models
540
+ As we have seen in the previous subsection, images are not the only important
541
+ non-structured data format. For videos, for music and in lots of contexts, audio
542
+ can be critical. Consequently, we analyze in this subsection three models whose
543
+ input information is text and whose output information is audio.
544
+ AudioLM : This model has been made by Google for high-quality audio gener-
545
+ ation with long-term consistency. In particular, AudioLM maps the input audio
546
+ into a sequence of discrete tokens and casts audio generation as language mod-
547
+ eling task in this representation space [4]. By training on large corpora of raw
548
+
549
+ Given Image
550
+ Prompt: "Camera zooms quickly into the eye of the cat"
551
+ Given Image
552
+ I Prompt: "A white cat touches the camera with the paw"
553
+ Given Image
554
+ Prompt: "A white cat yawns loudly"14
555
+ Roberto Gozalo-Brizuela, Eduardo C. Garrido-Merch´an
556
+ audio waveforms, AudioLM learns to generate natural and coherent continua-
557
+ tions given short prompts. The approach can be extended beyond speech by
558
+ generating coherent piano music continuations, despite being trained without
559
+ any symbolic representation of music. As with the other models, the API can
560
+ be found through GitHub. Audio signals involve multiple scales of abstractions.
561
+ When it comes to audio synthesis, multiple scales make achieving high audio
562
+ quality while displaying consistency very challenging. This gets achieved by this
563
+ model by combining recent advances in neural audio compression, self-supervised
564
+ representation learning and language modelling.
565
+ In terms of subjective evaluation, raters were asked to listen to a sample of
566
+ 10 seconds and decide whether it is human speech or a synthetic continuation.
567
+ Based on 1000 ratings collected, the rate is 51.2%, which is not statistically
568
+ significant from assigning labels at random. This tells us that humans cannot
569
+ differentiate between synthetic and real samples.
570
+ Jukebox : This is a model, developed by OpenAI, that generates music with
571
+ singing in the raw audio domain [13]. Once again, its API can be found in
572
+ GitHub. Previously, earlier models in the text-to-music genre generated music
573
+ symbolically in the form of a pianoroll which specifies timing, pitch and velocity.
574
+ The challenging aspect is the non-symbolic approach where music is tried to be
575
+ produced directly as a piece of audio. In fact, the space of raw audio is extremely
576
+ high dimensional which makes the problem very challenging. Consequently, the
577
+ key issue is that modelling that raw audio produces long-range dependencies,
578
+ making it computationally challenging to learn the high-level semantics of music.
579
+ In order to solve this issue, this model tries to solve it by means of a hi-
580
+ erarchical VQ-VAE architecture to compress audio into a discrete space [14],
581
+ with a loss function designed to retain the most amount of information. This
582
+ model produces songs from very different genres such as rock, hip-hop and jazz.
583
+ However, the model is just limited to English songs. Concretely, its dataset for
584
+ training is from 1.2 million songs from LyricWiki. The VQ-VAE has 5 billion
585
+ parameters and is trained on 9-second audio clips for 3 days.
586
+ Whisper : This model is an Audio-to-Text converter developed by OpenAI. It
587
+ achieves several tasks in this field: multi-lingual speech recognition, translation
588
+ and language identification [26]. As in previous cases, its API can be found
589
+ in the GitHub website. The goal of a speech recognition system should be to
590
+ work reliably out of the box in a broad range of environments without requiring
591
+ supervised fine-tuning of a decoder for every deployment distribution. This is
592
+ hard because of the lack of a high-quality pre-trained decoder.
593
+ Concretely, this model is trained on 680,000 hours of labeled audio data.
594
+ This data is collected from the internet, which results in a very diverse dataset
595
+ covering a broad distribution of audio from many different environments, record-
596
+ ings setups, speakers and languages. The model makes sure that the dataset is
597
+ only from human voice as machine learning voice would impair the model. Files
598
+
599
+ State of the Art of Generative AI
600
+ 15
601
+ are broken in 30 second segments paired with the subset of the transcript that
602
+ occurs within that time segment.
603
+ The model has an encoder-deccoder transformer, as this architecture has
604
+ been validated to scale reliably. We can observe the model’s architecture char-
605
+ acteristics through the figure below. We can see the different types of data and
606
+ the learning sequence.
607
+ 3.6
608
+ Text-to-Text models
609
+ The previous models all convert a non-structured data type into another one.
610
+ But, regarding text, it is very useful to convert text into another text in order to
611
+ satisfy tasks as general question and answering. The following four models treat
612
+ text and also output texts to satisfy different needs.
613
+ ChatGPT : The popular ChatGPT is a model by OpenAI which interacts
614
+ in a conversational way. As it is widely known, the model answers follow-up
615
+ questions, challenges incorrect premises and reject inappropriate requests. More
616
+ concretely, the algorithm behind ChatGPT is based on a transformer. However,
617
+ the training is made through Reinforcement Learning for Human Feedback. In
618
+ particular, an initial model is trained using supervised fine-tuning: human AI
619
+ trainers would provide conversations in which they played both sides, the user
620
+ and an AI assistant. Then, those people would be given the model-written re-
621
+ sponses to help them compose their response. This dataset was mixed to that of
622
+ InstructGPT [3], which was transformed into a dialogue format. A demo can be
623
+ found in their website and the API may also be found in OpenAI’s website. We
624
+ summarize the main steps of ChatGPT training in Figure 14, available in the
625
+ ChatGPT demo’s website. Finally, ChatGPT is also able to generate code and
626
+ simple mathematics.
627
+ LaMDA : LaMDA is a language model for dialog applications [32]. Unlike
628
+ most other language models, LaMDA was trained on dialogue. It is a family
629
+ of transformer-based neural language models specialized for dialog which have
630
+ up to 137B parameters and are pre-trained on 1.56T words of public dialog
631
+ data and web text. Fine-tuning can enable for safety and factual grounding of
632
+ the model. Only 0.001% of training data was used for fine-tuning, which is a
633
+ great achievement of the model. In particular, dialog modes take advantage of
634
+ Transformers’ ability to present long-term dependencies in text. Concretely, they
635
+ are generally very well-suited for model scaling. Consequently, LaMDA makes
636
+ use of a single model to perform multiple tasks: it generates several responses,
637
+ which are filtered for safety, grounded on an external knowledge source and re-
638
+ ranked to find the highest-quality response. We illustrate in Figure 15 an example
639
+ of a dialog with the model.
640
+ PEER : Collaborative language model developed by Meta AI research trained
641
+ on edit histories to cover the entire writing process [29]. It is based on four
642
+
643
+ 16
644
+ Roberto Gozalo-Brizuela, Eduardo C. Garrido-Merch´an
645
+ Fig. 14. Training steps of ChatGPT, combining supervised learning with reinforcement
646
+ learning.
647
+ Fig. 15. Example of a dialog made with LaMDA.
648
+ steps: Plan, Edit, Explain and Repeat. These steps are repeated until the text
649
+ is in a satisfactory state that requires no further updates. The model allow to
650
+ decompose the task of writing a paper into multiple easier subtasks. As well,
651
+ the model allows humans to intervene at any time and steer the model in any
652
+ direction.
653
+ It is mainly trained on Wikipedia edit histories. The approach is a self-
654
+ training, using models to infill missing data and then train other models on this
655
+ synthetic data. The downside of this comes from comments being very noisy
656
+ and a lack of citations, which tries to be compensated by a retrieval system
657
+ which does not always work. The framework is based on an iterative process.
658
+
659
+ Step1
660
+ Step 2
661
+ Step 3
662
+ Collectdemonstrationdata
663
+ Collectcomparisondataand
664
+ Optimizeapolicyagainstthe
665
+ and traina supervised policy.
666
+ train a reward model.
667
+ rewardmodelusingthePPO
668
+ reinforcementlearningalgorithm.
669
+ A prompt is
670
+ A prompt and
671
+ Anewpromptis
672
+ sampledfromour
673
+ Explain reinforcement
674
+ several model
675
+ Explain reinforcement
676
+ sampled from
677
+ Write a story
678
+ promptdataset.
679
+ learningtoa6 yearold.
680
+ outputsare
681
+ learning to a 6 year old.
682
+ the dataset.
683
+ about otters.
684
+ sampled.
685
+ Explainr
686
+ agent is
687
+ The PPO model is
688
+ PPO
689
+ Alabeler
690
+ 0
691
+ initialized from the
692
+ demonstratesthe
693
+ supervised policy
694
+ desired output
695
+ teach
696
+ behavior.
697
+ We give treats and
698
+ punishments to teach.
699
+ Alabelerranksthe
700
+ Thepolicygenerates
701
+ Once upon a time
702
+ outputsfrombest
703
+ an output.
704
+ SFT
705
+ to worst.
706
+ 0-0-0.0
707
+ This datais used to
708
+ The reward model
709
+ RM
710
+ fine-tune GPT-3.5
711
+ calculates a reward
712
+ with supervised
713
+ learning.
714
+ RM
715
+ forthe output.
716
+ This data is used
717
+ to train our
718
+ The reward is used
719
+ reward model.
720
+ to update the
721
+ 0000
722
+ rk
723
+ policy using PPO.LaMDA Everest
724
+ LaMDA:
725
+ Hi, I'm Mount Everest. What would you like to know about me?
726
+ User:
727
+ Why do you think people climb you?
728
+ LaMDA:
729
+ Because I represent a very high achievement to climb me. Many people believe that if they can
730
+ climb me, they can do anything in life.
731
+ User:
732
+ Who was the first person to do that?
733
+ LaMDA:
734
+ The first confirmed persons to have reached the summit of Mount Everest was New Zealander
735
+ Sir Edmund Hillary and Nepali Sherpa Tenzing Norgay.
736
+ https://en.wikipedia.org/wiki/Mount_Everest
737
+ User:
738
+ How old was Hillary then?
739
+ LaMIDA:
740
+ He was 33 years old when he first reached the top of me in 1953!
741
+ https://en.wikipedia.org/wiki/Edmund_Hillary
742
+ User:
743
+ When people fail, is it dangerous?
744
+ LaMDA:
745
+ Yes it can be, that is why people need to train and prepare for me if they plan to climb me when
746
+ they do make it to me. I'm very hard to climb and can be dangerous when there is a whiteout
747
+ Which means everything is white with very strong winds!State of the Art of Generative AI
748
+ 17
749
+ The entire process of formulating a plan, collecting documents, performing an
750
+ edit and explaining it can be repeated multiple times until arriving at a sequence
751
+ of texts. For the training, a DeepSpeed transformer is used.
752
+ Meta AI Speech from Brain : Model developed by Meta AI to help people
753
+ unable to communicate through speech, typing or gestures [11]. Previous tech-
754
+ niques relied on invasive brain-recording techniques which require neurosurgical
755
+ interventions. This model tries to decode language directly from noninvasive
756
+ brain recordings. This would provide a safer, more scalable solution that could
757
+ benefit many more people. The challenge with this proposed method come from
758
+ noise and differences in each person’s brain and where the sensors are placed.
759
+ A deep learning model is trained with contrastive learning and used to max-
760
+ imally align noninvasive brain recordings and speech sounds. A self-supervised
761
+ learning model called wave2vec 2.0. is used to identify the complex representa-
762
+ tions of speech in the brains of volunteers listening to audiobooks. The two nonin-
763
+ vasive technologies used to measure neuronal activity are electroencephalography
764
+ and magnetoencephalography.
765
+ Training data comes from four opensource datasets which represent 150 hours
766
+ of recordings of 169 volunteers listening to audiobooks. EEG and MEG record-
767
+ ings are inserted into a brain model, which consists of a standard deep convolu-
768
+ tional network with residual connections. These recordings are what comes from
769
+ individuals’ brains. This model then has both a speech model for sound and a
770
+ brain model for MEG data.
771
+ Results show that several components of the algorithm were beneficial to
772
+ decoding performance. As well, analysis shows that the algorithm improves as
773
+ EEG and MEG recordings increase. This research shows that self-supervised
774
+ trained AI can decode perveived speech despite noise and variability in that data.
775
+ The biggest limitation of this research is that it focuses on speech perception,
776
+ but the ultimate goal would be to extend this work to speech production.
777
+ 3.7
778
+ Text-to-Code models
779
+ Although we have covered text-to-text models, not all texts follows the same
780
+ syntax. An special type of text is code. In programming, it is essential to know
781
+ how to convert an idea into code. In order to do so, Codex and Alphacode models
782
+ help.
783
+ Codex : AI system created by OpenAI which translates text to code. It is
784
+ a general-purpose programming model, as it can be applied to basically any
785
+ programming task [8]. Programming can be broken down into two parts: breaking
786
+ a problem down into simpler problems and mapping those problems into existing
787
+ code (libraries, APIs, or functions) that already exist. The second part is the
788
+ most time-barring part for programmers, and it is where Codex excels the most.
789
+ The data collected for training was collected in May 2020 from public software
790
+ repositories hosted on GitHub, containing 179GB of unique Python files under 1
791
+
792
+ 18
793
+ Roberto Gozalo-Brizuela, Eduardo C. Garrido-Merch´an
794
+ MB. The model is fine-tuned from GPT-3, which already contains strong natural
795
+ language representations. The demo and the API can be found in Open AI’s
796
+ website.
797
+ Alphacode : Other language models have demonstrated an impressive ability
798
+ to generate code, but these systems still perform poorly when evaluated on more
799
+ complex, unseen problems. However, Alphacode is a system for code generation
800
+ for problems that require for deeper reasoning [19]. Three components are key for
801
+ this achievement: having an extensive dataset for training and evaluation, large
802
+ and efficient transformer based architectures and a large-scale model sampling.
803
+ In terms of training, the model is firstly pre-trained through GitHub repos-
804
+ itories amounting to 715.1 GB of code. This is a much more extensive dataset
805
+ than Codex’s pre training dataset. For the training to be better, a fine-tuning
806
+ dataset is introduced from the Codeforces plataform. Through this platform,
807
+ Codecontests are conducted, for the validation phase, in which we better the per-
808
+ formance of the model. Regarding the transformer-based architecture, they use
809
+ an encoder-decoder transformer architecture. Compared to decoder-only archi-
810
+ tectures commonly used, this architecture allows for a bidirectional description
811
+ and extra flexibility. As well, they use a shallow encoder and a deep encoder
812
+ to further the model’s efficiency. To reduce the cost of sampling, multi-query
813
+ attention is used.
814
+ 3.8
815
+ Text-to-Science models
816
+ Even scientific texts are being targeted by generative AI, as the Galactica and
817
+ Minerva models have shown. Although there is a long way to manage success in
818
+ this field, it is critical to study the first attempts towards automatic scientific
819
+ text generation.
820
+ Galactica : Galactica is a new large model for automatically organizing science
821
+ developed by Meta AI and Papers with Code. The main advantage of the model
822
+ is the ability to train on it for multiple epochs without overfitting, where up-
823
+ stream and downstream performance improves with use of repeated tokens. The
824
+ dataset design is critical to the approach as all of it is processed in a common
825
+ markdown format to blend knowledge between sources. Citations are processed
826
+ via a certain token that allows researchers to predict a citation given any in-
827
+ put context. The capability of the model of predicting citations improves with
828
+ scale and the model becomes better at the distribution of citations. In addition,
829
+ the model can perform multi-modal tasks involving SMILES chemical formulas
830
+ and protein sequences. Concretely, Galactica uses a transformer architecture in
831
+ a decoder-only setup with GeLU activation for all model sizes.
832
+ Minerva : Language model capable of solving mathematical and scientific ques-
833
+ tions using step-by-step reasoning. Minerva has a very clear focus on the collec-
834
+ tion of training data for this purpose. It solves quantitative reasoning problems,
835
+
836
+ State of the Art of Generative AI
837
+ 19
838
+ makes models at scale and employs best-in-class inference techniques. Concretely,
839
+ Minerva solves these problems by generating solutions step-by-step, this means
840
+ including calculations and symbolic manipulation without having the need for
841
+ external tools such a calculator.
842
+ 3.9
843
+ Other models
844
+ We would like to finish our review by covering additional models that do not fit
845
+ any of the categories mentioned previously.
846
+ Alphatensor, created by the research company Deepmind, is a completely
847
+ revolutionary model in the industry for its ability to discover new algorithms
848
+ [15]. In the published example, Alpha Tensor creates a more efficient algorithm
849
+ for matrix multiplication, which is very important, as improving the efficiency
850
+ of algorithms affects a lot of computations, from neural networks to scientific
851
+ computing routines.
852
+ The methodology is based on a deep reinforcement learning approach in
853
+ which the agent, AlphaTensor is trained to play a single-player game where the
854
+ objective is finding tensor decomposisitions within a finite factor space. At each
855
+ step of the TensorGame, the player selects how to combine different entries of the
856
+ matrices to multiply. A score is assigned based on the number of selected oper-
857
+ ations required to reach the correct multiplication result. To solve TensorGame,
858
+ an agent, AlphaTensor was developed. AlphaTensor uses a specialized neural
859
+ network architecture to exploit symmetries using synthetic training games.
860
+ GATO is a single generalist agent made by Deepmind. It works as a multi-
861
+ modal, multi-task, multi-embodiment generalist policy [27]. The same network
862
+ with the same weights can carry very different capabilities from playing Atari,
863
+ caption images, chatting, stacking blocks and many more. There are many bene-
864
+ fits from using a single neural sequence model across all tasks. It reduces the need
865
+ for hand crafting policy models with their own inductive biases. It increases the
866
+ amount and diversity of training data. This general agent is successful at many
867
+ tasks and can be adapted with little extra data to succeed at an even larger
868
+ number of tasks. r training at the operating point of model scale that allows
869
+ real-time control of real-world robots, currently around 1.2B parameters in the
870
+ case of GATO.
871
+ Other published generative AI models are able to generate human motion
872
+ [31] or, in the case of ChatBCG, slides using ChatGPT as a surrogate model.
873
+ 4
874
+ Conclusions and further work
875
+ Through this paper, we can observe the capabilities which generative artificial
876
+ intelligence has. We have seen a great deal of creativity as well as personalization
877
+ in tasks such as text-to-image or in tasks such as text-to-audio. They also are
878
+ accurate in text-to-science or text-to-code tasks. This can help economies in a
879
+ major way as it can help optimize creative and non-creative tasks.
880
+
881
+ 20
882
+ Roberto Gozalo-Brizuela, Eduardo C. Garrido-Merch´an
883
+ However, because of the way that they are constructed at the moment, these
884
+ models face a number of limitations. In terms of dataset, finding data for some
885
+ of the models found such as the text-to-science or the text-to-audio is very hard,
886
+ making it very time-consuming to train the model. In particular, datasets and
887
+ parameters have to be enormous, making it harder to train. One of the biggest
888
+ issues with models is trying solutions out of the problems in the dataset, with
889
+ which models have more trouble solving. As well, in terms of computation, a lot
890
+ of time and computation capacity is necessary in order to run them. Many days
891
+ and advanced computers are needed in order to run the models.
892
+ In addition, these models face bias from the data which needs to be controlled.
893
+ Galactica model tries to control this issue through a layer of no bias, but it still
894
+ a major issue for Generative Artificial Intelligence.
895
+ With the Minerva model, we can see that the model knows the steps which it
896
+ needs to take to solve an equation. This is groundbreaking as one of the biggest
897
+ limitations with these models is that the models do not understand exactly
898
+ what they are doing. Moreover, it’s still an industry starting; thus accuracy is
899
+ still an issue. Text-to-video models for example are only represented by Phenaki
900
+ because how hard it is to produce accurate videos. Text-to-science models do
901
+ find some accuracy but that accuracy is still way behind to what it should be
902
+ for professionals to actually rely on this technology on a day-to-day basis.
903
+ Furthermore, these models need to be constrained because of a lack of un-
904
+ derstanding of ethics. Phenaki on its paper even acknowledges that a system
905
+ like text-to-video can be used to create deep-fakes. Lastly, we are still in a phase
906
+ where we are discovering what exactly the purpose of this intelligence will be.
907
+ There has been articles comparing Google to ChatGPT3, which is totally inexact
908
+ as ChatGPT3 does not update its information in real time. We should be aware
909
+ about the limitations of these models to try and improve them in the following
910
+ years.
911
+ References
912
+ 1. Alayrac, J.-B., Donahue, J., Luc, P., Miech, A., Barr, I., Hasson, Y.,
913
+ Lenc, K., Mensch, A., Millican, K., Reynolds, M., et al. Flamingo: a visual
914
+ language model for few-shot learning. arXiv preprint arXiv:2204.14198 (2022).
915
+ 2. Anantrasirichai, N., and Bull, D. Artificial intelligence in the creative indus-
916
+ tries: a review. Artificial Intelligence Review (2021), 1–68.
917
+ 3. Bhavya, B., Xiong, J., and Zhai, C. Analogy generation by prompting large
918
+ language models: A case study of instructgpt. arXiv preprint arXiv:2210.04186
919
+ (2022).
920
+ 4. Borsos, Z., Marinier, R., Vincent, D., Kharitonov, E., Pietquin, O.,
921
+ Sharifi, M., Teboul, O., Grangier, D., Tagliasacchi, M., and Zeghidour,
922
+ N. Audiolm: a language modeling approach to audio generation. arXiv preprint
923
+ arXiv:2209.03143 (2022).
924
+ 5. Budzianowski, P., and Vuli´c, I. Hello, it’s gpt-2–how can i help you? towards
925
+ the use of pretrained language models for task-oriented dialogue systems. arXiv
926
+ preprint arXiv:1907.05774 (2019).
927
+
928
+ State of the Art of Generative AI
929
+ 21
930
+ 6. Chang, H., Zhang, H., Barber, J., Maschinot, A., Lezama, J., Jiang, L.,
931
+ Yang, M.-H., Murphy, K., Freeman, W. T., Rubinstein, M., et al. Muse:
932
+ Text-to-image generation via masked generative transformers.
933
+ arXiv preprint
934
+ arXiv:2301.00704 (2023).
935
+ 7. Chen, J., Guo, H., Yi, K., Li, B., and Elhoseiny, M. Visualgpt: Data-efficient
936
+ adaptation of pretrained language models for image captioning. In Proceedings of
937
+ the IEEE/CVF Conference on Computer Vision and Pattern Recognition (2022),
938
+ pp. 18030–18040.
939
+ 8. Chen, M., Tworek, J., Jun, H., Yuan, Q., Pinto, H. P. d. O., Kaplan, J.,
940
+ Edwards, H., Burda, Y., Joseph, N., Brockman, G., et al. Evaluating large
941
+ language models trained on code. arXiv preprint arXiv:2107.03374 (2021).
942
+ 9. Creswell, A., White, T., Dumoulin, V., Arulkumaran, K., Sengupta, B.,
943
+ and Bharath, A. A. Generative adversarial networks: An overview. IEEE signal
944
+ processing magazine 35, 1 (2018), 53–65.
945
+ 10. Daras, G., and Dimakis, A. G. Discovering the hidden vocabulary of dalle-2.
946
+ arXiv preprint arXiv:2206.00169 (2022).
947
+ 11. D´efossez, A., Caucheteux, C., Rapin, J., Kabeli, O., and King, J.-R. De-
948
+ coding speech from non-invasive brain recordings. arXiv preprint arXiv:2208.12266
949
+ (2022).
950
+ 12. Devlin, J., Chang, M.-W., Lee, K., and Toutanova, K. Bert: Pre-training
951
+ of deep bidirectional transformers for language understanding.
952
+ arXiv preprint
953
+ arXiv:1810.04805 (2018).
954
+ 13. Dhariwal, P., Jun, H., Payne, C., Kim, J. W., Radford, A., and Sutskever,
955
+ I. Jukebox: A generative model for music. arXiv preprint arXiv:2005.00341 (2020).
956
+ 14. Ding, S., and Gutierrez-Osuna, R. Group latent embedding for vector quan-
957
+ tized variational autoencoder in non-parallel voice conversion. In INTERSPEECH
958
+ (2019), pp. 724–728.
959
+ 15. Fawzi, A., Balog, M., Huang, A., Hubert, T., Romera-Paredes, B.,
960
+ Barekatain, M., Novikov, A., R Ruiz, F. J., Schrittwieser, J., Swirszcz,
961
+ G., et al. Discovering faster matrix multiplication algorithms with reinforcement
962
+ learning. Nature 610, 7930 (2022), 47–53.
963
+ 16. Kandlhofer, M., Steinbauer, G., Hirschmugl-Gaisch, S., and Huber, P.
964
+ Artificial intelligence and computer science in education: From kindergarten to
965
+ university. In 2016 IEEE Frontiers in Education Conference (FIE) (2016), IEEE,
966
+ pp. 1–9.
967
+ 17. Kingma, D., Salimans, T., Poole, B., and Ho, J. Variational diffusion models.
968
+ Advances in neural information processing systems 34 (2021), 21696–21707.
969
+ 18. LeCun, Y., Bengio, Y., and Hinton, G. Deep learning. nature 521, 7553 (2015),
970
+ 436–444.
971
+ 19. Li, Y., Choi, D., Chung, J., Kushman, N., Schrittwieser, J., Leblond, R.,
972
+ Eccles, T., Keeling, J., Gimeno, F., Dal Lago, A., et al. Competition-level
973
+ code generation with alphacode. Science 378, 6624 (2022), 1092–1097.
974
+ 20. Lin, C.-H., Gao, J., Tang, L., Takikawa, T., Zeng, X., Huang, X., Kreis,
975
+ K., Fidler, S., Liu, M.-Y., and Lin, T.-Y. Magic3d: High-resolution text-to-3d
976
+ content creation. arXiv preprint arXiv:2211.10440 (2022).
977
+ 21. Lin, D. C.-E., Germanidis, A., Valenzuela, C., Shi, Y., and Martelaro,
978
+ N. Soundify: Matching sound effects to video. arXiv preprint arXiv:2112.09726
979
+ (2021).
980
+ 22. Lin, T., Wang, Y., Liu, X., and Qiu, X. A survey of transformers. AI Open
981
+ (2022).
982
+
983
+ 22
984
+ Roberto Gozalo-Brizuela, Eduardo C. Garrido-Merch´an
985
+ 23. Murphy, K. P. Probabilistic machine learning: an introduction. MIT press, 2022.
986
+ 24. Poole, B., Jain, A., Barron, J. T., and Mildenhall, B. Dreamfusion: Text-
987
+ to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988 (2022).
988
+ 25. Radford, A., Kim, J. W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S.,
989
+ Sastry, G., Askell, A., Mishkin, P., Clark, J., et al. Learning transferable
990
+ visual models from natural language supervision. In International Conference on
991
+ Machine Learning (2021), PMLR, pp. 8748–8763.
992
+ 26. Radford, A., Kim, J. W., Xu, T., Brockman, G., McLeavey, C., and
993
+ Sutskever, I. Robust speech recognition via large-scale weak supervision. arXiv
994
+ preprint arXiv:2212.04356 (2022).
995
+ 27. Reed, S., Zolna, K., Parisotto, E., Colmenarejo, S. G., Novikov, A.,
996
+ Barth-Maron, G., Gimenez, M., Sulsky, Y., Kay, J., Springenberg, J. T.,
997
+ et al. A generalist agent. arXiv preprint arXiv:2205.06175 (2022).
998
+ 28. Saharia, C., Chan, W., Saxena, S., Li, L., Whang, J., Denton, E.,
999
+ Ghasemipour, S. K. S., Ayan, B. K., Mahdavi, S. S., Lopes, R. G., et al.
1000
+ Photorealistic text-to-image diffusion models with deep language understanding.
1001
+ arXiv preprint arXiv:2205.11487 (2022).
1002
+ 29. Schick, T., Dwivedi-Yu, J., Jiang, Z., Petroni, F., Lewis, P., Izacard, G.,
1003
+ You, Q., Nalmpantis, C., Grave, E., and Riedel, S. Peer: A collaborative
1004
+ language model. arXiv preprint arXiv:2208.11663 (2022).
1005
+ 30. Susnjak, T.
1006
+ Chatgpt: The end of online exam integrity?
1007
+ arXiv preprint
1008
+ arXiv:2212.09292 (2022).
1009
+ 31. Tevet, G., Raab, S., Gordon, B., Shafir, Y., Cohen-Or, D., and Bermano,
1010
+ A. H. Human motion diffusion model. arXiv preprint arXiv:2209.14916 (2022).
1011
+ 32. Thoppilan, R., De Freitas, D., Hall, J., Shazeer, N., Kulshreshtha, A.,
1012
+ Cheng, H.-T., Jin, A., Bos, T., Baker, L., Du, Y., et al. Lamda: Language
1013
+ models for dialog applications. arXiv preprint arXiv:2201.08239 (2022).
1014
+ 33. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez,
1015
+ A. N., Kaiser, �L., and Polosukhin, I. Attention is all you need. Advances in
1016
+ neural information processing systems 30 (2017).
1017
+ 34. Villegas, R., Babaeizadeh, M., Kindermans, P.-J., Moraldo, H., Zhang,
1018
+ H., Saffar, M. T., Castro, S., Kunze, J., and Erhan, D. Phenaki: Variable
1019
+ length video generation from open domain textual description.
1020
+ arXiv preprint
1021
+ arXiv:2210.02399 (2022).
1022
+
9NE3T4oBgHgl3EQfqwrj/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
9dFJT4oBgHgl3EQfoixI/content/tmp_files/2301.11596v1.pdf.txt ADDED
@@ -0,0 +1,1090 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ThoughtSource: A central hub for large language
2
+ model reasoning data
3
+ Simon Ott
4
+ 1*, Konstantin Hebenstreit
5
+ 1*, Valentin Liévin
6
+ 2, Christoffer Egeberg Hother
7
+ 4, Milad
8
+ Moradi
9
+ 1, Maximilian Mayrhauser
10
+ 1, Robert Praas
11
+ 1,3, Ole Winther
12
+ 2, Matthias Samwald
13
+ 1
14
+ 1) Institute of Artificial Intelligence, Medical University of Vienna, Vienna, Austria
15
+ 2) Section for Cognitive Systems, Technical University of Denmark, Lyngby, Denmark
16
+ 3) School of Electrical Engineering and Computer Science, The Royal Institute of Technology
17
+ (KTH), Stockholm, Sweden
18
+ 4) Department of Clinical Immunology, Copenhagen University Hospital, Copenhagen, Denmark
19
+ * equal contribution
20
+ Corresponding author: Matthias Samwald (matthias.samwald [at] meduniwien.ac.at)
21
+ Abstract
22
+ Large language models (LLMs) such as GPT-3 and ChatGPT have recently demonstrated
23
+ impressive results across a wide range of tasks. LLMs are still limited, however, in that they
24
+ frequently fail at complex reasoning, their reasoning processes are opaque, they are prone to
25
+ ‘hallucinate’ facts, and there are concerns about their underlying biases. Letting models verbalize
26
+ reasoning steps as natural language, a technique known as chain-of-thought prompting, has
27
+ recently been proposed as a way to address some of these issues. Here we present the first release
28
+ of ThoughtSource, a meta-dataset and so�ware library for chain-of-thought (CoT) reasoning. The
29
+ goal of ThoughtSource is to improve future artificial intelligence systems by facilitating
30
+ qualitative understanding of CoTs, enabling empirical evaluations, and providing training data.
31
+ This first release of ThoughtSource integrates six scientific/medical, three general-domain and
32
+ five math word question answering datasets.
33
+ Background & Summary
34
+ The most recent generation of large language models (LLMs) has produced impressive results
35
+ across a wide range of tasks. Examples of such models include T0 
36
+ 1, GPT-3 
37
+ 2, InstructGPT 
38
+ 3 and
39
+ ChatGPT (https://openai.com/blog/chatgpt/). These models demonstrated remarkable ability to
40
+
41
+ ThoughtSource: a central hub for large language model reasoning data | 2
42
+ generate text that is both realistic and coherent, as well as good performance on a broad
43
+ spectrum of tasks, despite not explicitly being trained on them
44
+ 3.
45
+ However, despite this ability, LLMs are also limited in several ways. They o�en fail to produce
46
+ accurate predictions due to their inability to accomplish complex reasoning, such as solving
47
+ mathematical problems or question answering tasks requiring multi-hop reasoning.
48
+ Furthermore, they tend to be black boxes, making it difficult to understand how and why
49
+ predictions are generated. These limitations severely limit the application domains of LLMs and
50
+ have the potential to cause harm, as lack of explainability and robustness can lead to critical
51
+ failures and biases when these models are deployed in practice.
52
+ One recently proposed method for enabling complex reasoning and generating explanations with
53
+ LLMs is to force models to explicitly verbalize reasoning steps as natural language, a technique
54
+ known as chain-of-thought prompting 
55
+ 4,5. This method improved performance on a variety of
56
+ tasks and sparked the active development of further refinements
57
+ 6, such as decomposing
58
+ problems and structuring reasoning (e.g., least-to-most prompting 
59
+ 7, ReAct 
60
+ 8, self-ask 
61
+ 9, maieutic
62
+ prompting 
63
+ 10, successive prompting 
64
+ 11) and/or extending LLM capabilities by leveraging external
65
+ services for tasks like information retrieval (e.g., self-ask 
66
+ 9, IRCoT 
67
+ 12, DSP
68
+ 13). The terminology
69
+ surrounding these rapidly evolving techniques is not settled, hence in this document, we refer to
70
+ all approaches that result in a linear sequence of reasoning steps as 'chain-of-thought' (CoT).
71
+ Meta-datasets (datasets of datasets) that are easily accessible and standardized have proven useful
72
+ for training and evaluating versatile LLMs. Examples include SuperGLUE
73
+ 14 for general-domain
74
+ language model tasks, BigBIO 
75
+ 15 and BLURB 
76
+ 16 for biomedical tasks, or Pile 
77
+ 17 and ROOTS 
78
+ 18 as
79
+ text corpora for LLM pre-training. Datasets can be complemented by tools such as
80
+ PromptSource, which was used to convert a large number of datasets into prompts fit for training
81
+ and interrogating LLMs. PromptSource facilitated training the highly performant T0 model 
82
+ 1.
83
+ Here we present the first release of ThoughtSource, a meta-dataset and so�ware library for
84
+ chain-of-thought reasoning in LLMs (https://github.com/OpenBioLink/ThoughtSource). The
85
+ goals of ThoughtSource are to:
86
+ — Facilitate qualitative understanding of CoTs generated by LLMs under various conditions
87
+ (e.g., across tasks, models and prompts).
88
+ — Enable empirical and quantitative evaluation.
89
+ — Provide a library of diverse CoT training data for improving performance, robustness,
90
+ explainability and value-alignment of future LLM-based AI systems.
91
+
92
+ ThoughtSource: a central hub for large language model reasoning data | 3
93
+ Methods
94
+ We selected NLP benchmarks for question answering and natural language inference for which
95
+ pre-existing data for constructing CoTs was available. For some of the datasets, one or multiple
96
+ additional datasets were used as sources for additional CoTs, allowing for the comparison of
97
+ different CoT generation methodologies. We created data loader scripts compatible with the
98
+ Hugging Face datasets library 
99
+ 19 for all datasets. Additionally, we collected metadata of attributes
100
+ such as descriptions, websites and licenses. We contacted dataset providers and encouraged them
101
+ to choose an open source/open data license if licensing information was unavailable or unclear.
102
+ We implemented two kinds of schemas: 1) source dataset schemas, which are unique to each
103
+ dataset and provide data close to their original format; and 2) a standardized ThoughtSource
104
+ schema, which maps all datasets into a common format. The ThoughtSource schema was created
105
+ by extending the question answering schema of the BigBIO project 
106
+ 15.
107
+ We implemented tailored algorithms for converting each dataset because the collected datasets
108
+ provide explanations in different ways, such as math expressions or structured graph-based
109
+ explanations. Furthermore, we performed preprocessing such as capitalization and punctuation
110
+ correction. To recover standard formatted text from pre-tokenized datasets, we reversed the
111
+ tokenization. This preprocessing was performed only on data in the ThoughtSource schema,
112
+ while data in the Source schemas was le� in their original formatting. All code for running these
113
+ conversions is available in our Github repository.
114
+ We developed a suite of Python libraries and tools for generating novel CoTs and answers by
115
+ calling LLM APIs, as well as tools for evaluating, comparing and annotating datasets. We built
116
+ upon the LangChain library (https://github.com/hwchase17/langchain/) for interfacing with a
117
+ wide variety of external LLM APIs.
118
+ Data Records
119
+ This first release of ThoughtSource integrates six scientific/medical, three general-domain and
120
+ five math word question answering datasets (Table 1). For every dataset except for PubmedQA
121
+ and MedQA we provide ‘reference CoTs’. We created these reference CoTs by converting
122
+ rationales provided by original datasets into reasoning chains. These rationales, depending on
123
+ the dataset, were created by human experts or obtained from crowdsourcing. For MedQA,
124
+ MedMCQA and PubmedQA, we generated and integrated CoTs with the AI systems
125
+ text-davinci-002 
126
+ 3 and code-davinci-002 
127
+ 20 (described in detail by co-authors Liévin et al. in a
128
+ separate manuscript
129
+ 21). Furthermore, we extended CommonsenseQA and StrategyQA with
130
+ AI-generated CoTs created by few-shot
131
+ 4 and zero-shot
132
+ 5 prompting. Since current LLM models
133
+ are still prone to errors, it should be noted that AI-generated CoTs may contain faulty reasoning.
134
+
135
+ ThoughtSource: a central hub for large language model reasoning data | 4
136
+ Table 1: Integrated datasets. For some core datasets, additional datasets were used as sources for
137
+ additional CoTs.
138
+ Dataset
139
+ License
140
+ Scientific and medical question answering
141
+ WorldTree V2 
142
+ 22
143
+ AI2 Mercury license
144
+ EntailmentBank
145
+ 23
146
+ CC BY 4.0
147
+ OpenBookQA
148
+ 24
149
+ Apache License 2.0
150
+ MedQA (USMLE )
151
+ 25
152
+ Core dataset
153
+ MIT
154
+ CoT source: few-shot from Liévin et al.
155
+ 21
156
+ CC-BY 4.0
157
+ MedMCQA
158
+ 26
159
+ Core dataset
160
+ MIT
161
+ CoT source: few-shot from Liévin et al.
162
+ 21
163
+ CC-BY 4.0
164
+ PubmedQA
165
+ 27
166
+ Core dataset
167
+ MIT
168
+ CoT source: few-shot from Liévin et al.
169
+ 21
170
+ CC-BY 4.0
171
+ General-domain question answering
172
+ CommonsenseQA
173
+ 28
174
+ Core dataset
175
+ MIT
176
+ CoT source: ECQA ³
177
+ Community Data
178
+ License Agreements
179
+ Sharing license 1.0
180
+ CoT source: few-shot from Wei et al .
181
+ 4, zero-shot from
182
+ Kojima et al .
183
+ 5
184
+ Unspecified
185
+ StrategyQA
186
+ 29
187
+ Core dataset
188
+ MIT
189
+ CoT source: few-shot from Wei et al .
190
+ 4, zero-shot from
191
+ Kojima et al .
192
+ 5
193
+ Unspecified
194
+ QED
195
+ 30
196
+ CC BY-SA 3.0
197
+ Math word problems
198
+ AQUA-RAT
199
+ 31
200
+ Apache 2.0
201
+ ASDiv
202
+ 32
203
+ CC BY-NC 4.0
204
+ GSM8K
205
+ 33
206
+ MIT
207
+ MAWPS
208
+ 34
209
+ MIT
210
+ SVAMP
211
+ 35
212
+ MIT
213
+
214
+ ThoughtSource: a central hub for large language model reasoning data | 5
215
+ Scientific/medical question answering datasets
216
+ WorldTree V2 
217
+ 22 is one of the most detailed multi-hop science question answering datasets
218
+ available. Finding the right multiple-choice answers requires a multi-hop inference combining
219
+ between 1 and 16 facts (average: 6). It contains explanations created by experts in the form of
220
+ multiple facts. We concatenated these facts and applied a set of rules to improve style and
221
+ grammaticality to yield reference CoTs that are close to natural language.
222
+ EntailmentBank 
223
+ 23 contains open-domain science exam questions and answers, along with
224
+ systematic explanations that show how the correct answer is reached through a series of steps.
225
+ These steps are organized into a tree structure, known as an entailment tree, which starts with
226
+ known facts and progresses through intermediate conclusions until the final answer is reached.
227
+ These entailment trees are also serialized into text-based proofs by traversing the trees. We
228
+ applied a set of rules to improve style and grammaticality in these proofs to yield reference CoTs
229
+ that are close to natural language.
230
+ OpenBookQA 
231
+ 24 contains questions modeled a�er open-book exams of elementary-level science.
232
+ They require multi-step reasoning, commonsense knowledge, and a diverse application of core
233
+ science facts to find the correct answer. The dataset provides over 1,300 core science facts and a
234
+ mapping to all of the questions. By design, questions in OpenBookQA are answered incorrectly
235
+ by both retrieval-based and word co-occurrence algorithms. The dataset contains a single-fact
236
+ explanation of the correct answer for each question, which we adopted to create reference CoTs.
237
+ MedQA 
238
+ 25 is a free-form multiple-choice OpenQA dataset containing questions from medical
239
+ board exams in the US (USMLE), Mainland China and Taiwan. We imported the
240
+ English-language USMLE subset. Reference CoTs are not provided.
241
+ MedMCQA 
242
+ 26 is a multiple-choice question answering dataset containing real-world medical
243
+ entrance exam questions from the All India Institute of Medical Sciences (AIIMS PG) and
244
+ National Eligibility cum Entrance Test (NEET PG). Answer rationales authored by human
245
+ experts were integrated as reference CoTs.
246
+ PubmedQA 
247
+ 27 is a question answering dataset containing biomedical questions extracted from
248
+ PubMed abstracts that can be answered with yes/no/maybe answers. In addition to the short
249
+ answer, each question comes with a longer answer, which can be used as reference CoT.
250
+ For MedQA, MedMCQA and PubmedQA we added CoTs generated with the AI systems
251
+ text-davinci-002 
252
+ 3 and code-davinci-002 
253
+ 20.
254
+
255
+ ThoughtSource: a central hub for large language model reasoning data | 6
256
+ General-domain question answering datasets
257
+ CommonsenseQA 
258
+ 28 is a collection of multiple-choice questions that test a wide range of general
259
+ knowledge. We created reference CoTs for the train and validation set derived from the
260
+ crowd-sourced ECQA dataset³. We also added AI-generated reasoning chains generated with
261
+ few-shot
262
+ 4 and zero-shot
263
+ 5 prompting, which are available for the validation split.
264
+ StrategyQA 
265
+ 29 is a question answering dataset that tests the ability to reason through
266
+ open-domain questions and provide Yes/No answers. Each example includes a question, a
267
+ decomposition of the question into reasoning steps, and evidence paragraphs from Wikipedia.
268
+ The dataset was created through a crowdsourcing process to gather creative and diverse
269
+ questions. Human-generated freetext reasoning chains are part of the train split of the original
270
+ dataset and were used as CoTs. The dataset also includes relevant paragraphs from Wikipedia,
271
+ but these were not included in our CoTs. We extended the StrategyQA dataset with AI-generated
272
+ CoTs created through few-shot
273
+ 4 and zero-shot
274
+ 5 prompting, which are available for the train split.
275
+ QED 
276
+ 30 is a collection of expert-annotated structured explanations for answers to questions, built
277
+ upon a subset of the Google Natural Questions dataset. Given a question and a passage from
278
+ Wikipedia, QED uses linguistic information to represent explanations as a series of interpretable
279
+ steps, such as referential equality, sentencehood, and entailment. Structured reasoning chains by
280
+ experts are provided for all examples. To create reference CoTs, we extracted the sentence that
281
+ entails the answer; statements about referential equality in QED were converted to natural
282
+ language and added as additional steps in the CoTs (e.g. "The noun phrase […] in the sentence and
283
+ the noun phrase […] in the question refer to the same thing.").
284
+ Math word problem datasets
285
+ Algebra Question Answering with Rationales (AQUA-RAT) 
286
+ 31 is a large-scale multiple-choice
287
+ dataset containing algebraic word problems. Each problem consists of a question with five
288
+ possible answers and a rationale, a step-by-step natural language explanation of the solution. We
289
+ used natural language explanations as reference CoTs.
290
+ Academia Sinica Diverse (ASDiv) math word problem (MWP) dataset 
291
+ 32 aims to provide more
292
+ diverse language patterns and problem types than previous datasets. It covers most of the math
293
+ topics taught in elementary school. Each MWP is labeled with its grade level (for indicating
294
+ difficulty), the needed math operation (e.g. division) and includes a short explanation of the
295
+ solution. ASDiv contains explanations of answers in the form of nested math expressions using
296
+ common operators such as addition, subtraction, division and multiplication. We generated
297
+ reference CoTs by converting these math expressions into natural language explanation chains
298
+ using a rule-based method.
299
+
300
+ ThoughtSource: a central hub for large language model reasoning data | 7
301
+ Grade School Math 8K (GSM8K) 
302
+ 33 contains grade school math word problems. Despite their
303
+ conceptual simplicity, these problems are more challenging to process than earlier datasets due
304
+ to their linguistic diversity. The creators of GSM8K instructed crowd workers to write solutions
305
+ to problems in free text format, which we used as reference CoTs in ThoughtSource, omitting
306
+ any additional arithmetic specifications.
307
+ Math Word Problems (MAWPS) 
308
+ 34 is an online platform that provides a collection of math word
309
+ problems. The problems have simple one- or two-line explanations for their solutions. MAWPS
310
+ includes datasets from various sources, offers tools for automatically creating datasets with
311
+ specific characteristics as well as the possibility to tune lexical and template overlap. We
312
+ converted explanatory math expressions to reference CoTs with an approach similar to the one
313
+ used for ASDiv.
314
+ Simple Variations on Arithmetic Math Word Problems (SVAMP) 
315
+ 35 was created by applying
316
+ carefully chosen variations to examples from existing datasets, such as ASDiv and MAWPS.
317
+ These variations make it difficult for language models to solve the problems using simple
318
+ heuristics, and instead require a deeper understanding and reasoning ability. We converted math
319
+ expressions to reference CoTs with an approach similar to the one used for ASDiv.
320
+ Dataset schema
321
+ Tables 2–5 provide descriptions and datatypes of the various fields in the ThoughtSource schema.
322
+ Any performed sample task leads to a generated CoT and answer to the question. Annotations
323
+ can be added programmatically or through an annotator tool.
324
+ Table 2: Fields of the ‘sample’ object.
325
+ Field
326
+ Description
327
+ Datatype
328
+ id
329
+ Unique identifier of object
330
+ string
331
+ ref_id
332
+ Identifier of external objects such as documents or other
333
+ resources
334
+ string
335
+ question
336
+ Question of task
337
+ string
338
+ type
339
+ Type of the question answering task, currently one of
340
+ [“multiplechoice”, “text”, “number”, “collection”]
341
+ string
342
+ choices
343
+ Set of multiple options containing the gold answer
344
+ list(string)
345
+ context
346
+ Additional context for answering the question
347
+ string
348
+ cot
349
+ Reference CoT, o�en human-generated.
350
+ list(string)
351
+ answer
352
+ Gold answer of task. Can contain multiple elements if type is
353
+ collection
354
+ list(string)
355
+ generated_cot
356
+ List of generated_cot objects
357
+ list(generated_cot_object)
358
+
359
+ ThoughtSource: a central hub for large language model reasoning data | 8
360
+ Table 3: Fields of the ‘generated_cot’ object.
361
+ Field
362
+ Description
363
+ Datatype
364
+ id
365
+ Unique identifier of object
366
+ string
367
+ templates_version
368
+ Version of the fragments.json file
369
+ string
370
+ instruction
371
+ Identifier of the cot trigger fragment stored in
372
+ fragments.json
373
+ string
374
+ cot_trigger
375
+ Identifier of the cot trigger fragment stored in
376
+ fragments.json
377
+ string
378
+ cot_trigger_template
379
+ Template to specify structure of prompt text
380
+ string
381
+ prompt_text
382
+ Full text of prompt used for the CoT generation step
383
+ string
384
+ answers
385
+ List of generated answer objects
386
+ list(answer_object)
387
+ cot
388
+ Generated chain-of-thought
389
+ string
390
+ author
391
+ Name of the author
392
+ string
393
+ date
394
+ Date of the chain-of-thought generation
395
+ string
396
+ api_service
397
+ Identification of the used api service
398
+ string
399
+ model
400
+ Identification of the used language model
401
+ string
402
+ comment
403
+ Comment
404
+ string
405
+ annotation
406
+ List of annotation objects
407
+ list(annotation_object)
408
+ Table 4: Fields of the ‘answer’ object.
409
+ Field
410
+ Description
411
+ Datatype
412
+ id
413
+ Unique identifier of object
414
+ string
415
+ answer_extraction
416
+ Identifier of the answer extraction fragment stored in
417
+ fragments.json
418
+ string
419
+ cot_trigger_template
420
+ Template to specify structure of prompt text
421
+ string
422
+ answer_extraction
423
+ _text
424
+ Full text of prompt used for the answer extraction step
425
+ string
426
+ answer
427
+ Extracted answer
428
+ string
429
+ correct_answer
430
+ True if the extracted answer is equal to the gold answer,
431
+ else false
432
+ bool
433
+ Table 5: Fields of the ‘annotation’ object.
434
+ Field
435
+ Description
436
+ Datatype
437
+ author
438
+ Name of the author
439
+ string
440
+ date
441
+ Date of the creation of the annotation
442
+ string
443
+ key
444
+ Specifies the label of the annotation
445
+ string
446
+ value
447
+ Specifies the value of the annotation
448
+ string
449
+
450
+ ThoughtSource: a central hub for large language model reasoning data | 9
451
+ Table 6 shows the example counts, CoT counts and answer types of each dataset. The majority of
452
+ datasets in the current collection are of the multiple choice answer type. The medical dataset
453
+ MedMCQA is the largest among all datasets.
454
+ Table 6: Statistics and answer types for all datasets. Note that generated CoTs are not available for all
455
+ examples, and multiple CoTs might have been generated for any given example.
456
+ [Link: Notebook used for generating stats and graphs]
457
+ Dataset ID
458
+ Examples
459
+ Reference
460
+ CoTs
461
+ Examples w.
462
+ generated
463
+ CoTs
464
+ Generated
465
+ CoTs
466
+ Answer type
467
+ aqua
468
+ 97,975
469
+ 97,975
470
+ 0
471
+ 0
472
+ multiple choice
473
+ asdiv
474
+ 1218
475
+ 1218
476
+ 0
477
+ 0
478
+ number
479
+ commonsense_qa
480
+ 12,102
481
+ 10,962
482
+ 1221
483
+ 2437
484
+ multiple choice
485
+ entailment_bank
486
+ 1840
487
+ 1840
488
+ 0
489
+ 0
490
+ text
491
+ gsm8k
492
+ 8792
493
+ 8792
494
+ 0
495
+ 0
496
+ number
497
+ mawps
498
+ 1921
499
+ 1921
500
+ 0
501
+ 0
502
+ number
503
+ med_qa (USMLE)
504
+ 12,723
505
+ 0
506
+ 1273
507
+ 133,660
508
+ multiple choice
509
+ medmc_qa
510
+ 193,155
511
+ 161,558
512
+ 1000
513
+ 104,987
514
+ multiple choice
515
+ open_book_qa
516
+ 5957
517
+ 5957
518
+ 0
519
+ 0
520
+ multiple choice
521
+ pubmed_qa
522
+ 1000
523
+ 0
524
+ 500
525
+ 2500
526
+ multiple choice
527
+ qed
528
+ 6175
529
+ 6175
530
+ 0
531
+ 0
532
+ collection
533
+ strategy_qa
534
+ 2780
535
+ 2290
536
+ 2289
537
+ 4532
538
+ bool
539
+ svamp
540
+ 1000
541
+ 1000
542
+ 0
543
+ 0
544
+ number
545
+ worldtree
546
+ 4367
547
+ 4365
548
+ 0
549
+ 0
550
+ multiple choice
551
+ We analyzed the distribution of question and CoT field lengths (Fig. 1). MedQA has the longest
552
+ median question length, while PubMedQA has the longest median CoT length. Several datasets
553
+ contain outlier CoTs with extremely long text lengths. Context fields were only filled for the
554
+ PubmedQA and QED datasets, with mean context lengths of 116 and 56 tokens, respectively.
555
+
556
+ ThoughtSource: a central hub for large language model reasoning data | 10
557
+ Figure 1: Distribution of question and CoT field lengths.
558
+ Technical Validation
559
+ The datasets were reviewed by three team members and issues were tracked on the issue tracker
560
+ of the associated GitHub repository.
561
+ To characterize potential overlaps and relations between datasets, we calculated mutual n-gram
562
+ overlap using n=3. (Fig. 2) . To quantify the overlap between two sets of n-grams we use the
563
+ Szymkiewicz–Simpson coefficient (overlap coefficient), which can be interpreted as the
564
+ proportion of n-grams of the smaller dataset that are contained in the bigger dataset.
565
+
566
+ worldtree
567
+ svamp
568
+ 工白
569
+ strategy_qa
570
+ 百T
571
+ qed
572
+ pubmed_qa
573
+ open_book_qa
574
+ H
575
+ Dataset
576
+ medmc_qa
577
+ med_qa
578
+ mawps
579
+ gsm8k
580
+ entailment_bank
581
+ HH
582
+ commonsense_qa
583
+ asdiv
584
+ enbe
585
+ 0
586
+ 50
587
+ 100
588
+ 150
589
+ 200
590
+ 250
591
+ 300
592
+ Numberoftokens inquestionworldtree
593
+ svamp
594
+ strategy_qa
595
+ qed
596
+ pubmed_qa
597
+ open_book_qa
598
+ Dataset
599
+ medmc_qa
600
+ med_qa
601
+ mawps
602
+ gsm8k
603
+ entailment_bank
604
+ commonsense_qa
605
+ asdiv
606
+ enbe
607
+ H
608
+ 0
609
+ 50
610
+ 100
611
+ 150
612
+ 200
613
+ 250
614
+ 300
615
+ 350
616
+ Number of tokens in CoTThoughtSource: a central hub for large language model reasoning data | 11
617
+ There is an overlap of 1.0 between the set of questions in WorldTree v2 and EntailmentBank. The
618
+ QA pairs in EntailmentBank were taken from the WorldTree v2 dataset 
619
+ 23, so all the questions in
620
+ EntailmentBank are a subset of WorldTree v2.
621
+ Furthermore, there is significant overlap between the questions contained in ASDiv and SVAMP
622
+ and those in ASDiv and MAWPS. ASDiv and SVAMP have overlapped questions because a subset
623
+ of examples from ASDiv was used as seed examples for the creation of SVAMP. For MAWPS and
624
+ ASDiv, questions were crawled from web resources. The overlap could be due to examples being
625
+ crawled from the same web resources.
626
+ Besides overlaps in questions, we also identified overlaps in CoTs. WorldTree v2 provided an
627
+ initial pool of atomic facts that the annotators could use to construct an explanation tree in
628
+ EntailmentBank (in addition to creating their own facts). This explains the high overlap of
629
+ n-grams of CoTs in WorldTree v2 and EntailmentBank. Similarly, a subset of WorldTree v2 facts
630
+ was used for the creation of explanations in OpenbookQA.
631
+ Figure 2: n-gram overlap in questions and CoTs. Overlap is measured by mutual n-gram overlap using
632
+ n=3, values <0.01 are omitted.
633
+ Usage Notes
634
+ Python libraries for accessing and working with data can be downloaded from the Github
635
+ repository and installed with the pip tool. Fig. 3 demonstrates how to load a dataset, randomly
636
+ sample from the pre-populated data in the dataset, call an external LLM API to generate novel
637
+ CoTs and answers, automatically evaluate the accuracy of generated answers, and finally save all
638
+ generated data to a JSON file. Fig. 4 depicts an excerpt of the resulting JSON file.
639
+
640
+ Question
641
+ CoT
642
+ aqua1.00
643
+ asdiv 0.02
644
+ 1.00
645
+ asdiv
646
+ 1.00
647
+ commonsense_qa
648
+ 1.00
649
+ commonsense_qa
650
+ 1.00
651
+ 0.8
652
+ entailment_bank
653
+ 1.00
654
+ entailment_bank
655
+ 1.00
656
+ gsm8k0.020.04
657
+ 1.00
658
+ gsm8k0.03
659
+ 1.00
660
+ mawps0.02
661
+ 0.27
662
+ 0.05
663
+ 1.00
664
+ mawps
665
+ 1.00
666
+ 0.6
667
+ med_qa
668
+ 1.00
669
+ med_qa
670
+ 1.00
671
+ medmc_qa
672
+ 0.061.00
673
+ medmc_qa
674
+ 0.01
675
+ 1.00
676
+ 1.00
677
+ 0.55
678
+ open_book_qa
679
+ open_book_ga
680
+ 0.01
681
+ 1.00
682
+ 0.4
683
+ pubmed_qa
684
+ 0.030.04
685
+ 1.00
686
+ pubmed_qa
687
+ 1.00
688
+ qed
689
+ 1.00
690
+ qed
691
+ 1.00
692
+ 0.2
693
+ strategy_qa
694
+ 1.00
695
+ strategy_qa
696
+ 0.02
697
+ 0.011.00
698
+ svamp 0.02
699
+ 0.19
700
+ 0.040.03
701
+ 1.00
702
+ svamp
703
+ 1.00
704
+ worldtree
705
+ 1.00
706
+ 0.01
707
+ 1.00
708
+ worldtree
709
+ 0.41
710
+ 0.01
711
+ 0.84
712
+ 1.00
713
+ 0
714
+ aqua
715
+ asdiv
716
+ com
717
+ gsm8k
718
+ pubn
719
+ qed
720
+ svamp
721
+ worldtree
722
+ aqua
723
+ asdiy
724
+ gsm8k
725
+ ma
726
+ med
727
+ medr
728
+ oper
729
+ svamp
730
+ oper
731
+ bmed.
732
+ 9ed
733
+ worldtree
734
+ tegy..qa
735
+ 9a
736
+ eb"
737
+ 9a
738
+ -9a
739
+ 9a
740
+ 9a
741
+ _qa
742
+ ga
743
+ 9a
744
+ 9a
745
+ 9aThoughtSource: a central hub for large language model reasoning data | 12
746
+ from cot import Collection
747
+ # Load a dataset
748
+ collection_worldtree = Collection(["worldtree"])
749
+ # Randomly sample 10 rows of train split
750
+ collection_worldtree_10 = collection_worldtree.select(split="train",
751
+ number_samples=10)
752
+ # Create a config file for calling OpenAI API to generate new CoTs and answers.
753
+ config={
754
+ "instruction_keys": ["qa-01"], # Determines which instructions are used
755
+ "cot_trigger_keys": ["kojima-01"], # Determines which cot triggers are used
756
+ "answer_extraction_keys": ["kojima-A-D"], # Determines which answer extraction
757
+ # prompts are used
758
+ "author" : "your_name", # Name of the person responsible for generation
759
+ "api_service": "openai", # Name of the API called ("openai", "huggingface_hub"
760
+ # or a mock for testing: "mock_api")
761
+ "engine": "text-davinci-002", # Name of the engine used
762
+ "temperature": 0, # Level of randomness in the generated output
763
+ "max_tokens": 512, # Maximum length of output generated by the model
764
+ "api_time_interval": 1.0, # Pause between two api calls in seconds
765
+ "verbose": False, # Determines whether the progress of the generation is printed
766
+ "warn": True, # Determines whether a warnings that external APIs will be called
767
+ # are printed
768
+ }
769
+ # Generate novel chains-of-thought and answer extractions
770
+ collection_worldtree_10.generate(config=config)
771
+ # Evaluate accuracy of model predictions
772
+ collection_worldtree_10.evaluate()
773
+ # Example output: {'accuracy': {'qa-01_kojima-01_kojima-A-D': 0.86}}
774
+ # Save all data (including evaluation data) to JSON file
775
+ collection_worldtree_10.dump("worldtree_10_evaluate.json")
776
+ Figure 3: Demonstration of the ThoughtSource API. Basic functionalities of the data loader, generator
777
+ and evaluator modules are demonstrated.
778
+
779
+ ThoughtSource: a central hub for large language model reasoning data | 13
780
+ {
781
+ "id": "1242",
782
+ "ref_id": "",
783
+ "question": "Which is a characteristic of a sperm cell but not of an egg cell?",
784
+ "type": "multiplechoice",
785
+ "choices": [
786
+ "round shape",
787
+ "presence of a tail",
788
+ "contains genetic information",
789
+ "involved in sexual reproduction"
790
+ ],
791
+ "context": "",
792
+ "cot": [
793
+ "A part of something means a characteristic of something.",
794
+ "A tail is not part of an egg cell.",
795
+ "A tail is a part of a sperm cell.",
796
+ "A part of something is present in that something."
797
+ ],
798
+ "answer": [
799
+ "presence of a tail"
800
+ ],
801
+ "generated_cot": [
802
+ {
803
+ "id": "738b54ba-9a20-47e6-b8ff-7cb876103b92",
804
+ "fragments_version": "0.01",
805
+ "api_service": "openai",
806
+ "model": "{'name': 'text-davinci-002', 'temperature': 0, 'max_tokens':
807
+ 512}",
808
+ "instruction": "qa-01",
809
+ "cot_trigger": "kojima-01",
810
+ "cot": "We know that both sperm and egg cells are involved in sexual
811
+ reproduction, so we can eliminate (D). \n\nWe also know that both
812
+ sperm and egg cells contain genetic information, so we can
813
+ eliminate (C). \n\nThat leaves us with (A) and (B). \n\nWe know
814
+ that sperm cells have a tail, but egg cells do not. Therefore, the
815
+ correct answer is (B).",
816
+ "answers": [
817
+ {
818
+ "id": "7f7cc26f-a3b3-4b59-9af7-35980514d0c3",
819
+ "answer_extraction": "kojima-A-D",
820
+ "answer": " B.",
821
+ "correct_answer": true
822
+ }
823
+ ],
824
+ "author": "your_name",
825
+ "date": "2023/01/12 14:18:57",
826
+ "comment": "",
827
+ "annotation": []
828
+ }
829
+ ]
830
+ }
831
+ Figure 4: An excerpt of data generated by running the example code. Data for a single question from
832
+ Worldtree V2 are shown, including human-authored reference CoT, gold-standard answer, an
833
+ AI-generated CoT and extracted answer, as well as evaluation results. Some fields were omitted for
834
+ legibility.
835
+
836
+ ThoughtSource: a central hub for large language model reasoning data | 14
837
+ In a zero-shot setup, specific text fragments can be used to prompt question answering and CoT
838
+ reasoning in LLMs. ThoughtSource includes a curated list of text fragments that can be used to
839
+ generate novel CoTs (Fig. 5). Where possible, we also mapped individual CoTs in pre-existing
840
+ CoT datasets to the text fragments that were used in their creation.
841
+ "instructions": {
842
+ "qa-01": "Answer the following question through step-by-step reasoning.",
843
+ "qa-02": "Answer the following question through careful, concise step-by-step
844
+ reasoning.",
845
+ "qa-03": "Answer the following question through careful, concise step-by-step
846
+ reasoning. Avoid making up wrong statements. If the question does not
847
+ make sense or cannot be answered, write \"I cannot answer the
848
+ question\".
849
+ If you do not have a good answer, write \"I do not have a good answer\".
850
+ If you are uncertain, write \"I am uncertain about this\".",
851
+ [...]
852
+ },
853
+ "cot_triggers": {
854
+ "kojima-01": "Answer: Let's think step by step.",
855
+ "kojima-02": "Answer: We should think about this step by step.",
856
+ "kojima-03": "Answer: First,",
857
+ "kojima-04": "Answer: Before we dive into the answer,",
858
+ [...]
859
+ "lievin-01": "Answer: Let's derive the differential diagnosis step by step.",
860
+ "lievin-02": "Answer: Let's use step by step inductive reasoning, given the
861
+ medical nature of the question.",
862
+ [...]
863
+ "lievin-26": "Answer: Let's follow a Bayesian step by step approach.",
864
+ "lievin-27": "Answer: Let's reflect on each option from the least likely to the
865
+ most likely.",
866
+ "lievin-28": "Answer: Let's use step by step Bayesian reasoning, given the
867
+ medical nature of the question."
868
+ },
869
+ "answer_extractions":{
870
+ "kojima-01": "Therefore, the answer is",
871
+ "kojima-02": "Therefore,",
872
+ "kojima-03": "The answer is",
873
+ "kojima-numerals": "Therefore, the answer (arabic numerals) is",
874
+ "kojima-yes-no": "Therefore, the answer (Yes or No) is",
875
+ "kojima-A-C": "Therefore, among A through C, the answer is",
876
+ "kojima-A-D": "Therefore, among A through D, the answer is",
877
+ [...]
878
+ }
879
+ Figure 5: An excerpt of the collection of prompt fragments. These fragments can be used to build
880
+ prompts for interacting with LLMs, allowing for empirical testing of how different prompts affect model
881
+ performance.
882
+ We provide two web-based interfaces for exploring and annotating ThoughtSource data, the
883
+ Dataset Viewer and the Annotator. The Dataset Viewer is a simple interface for exploring dataset
884
+ contents. The Annotator (Fig. 6) allows you to upload specific subsets of a dataset, provides
885
+ convenience functions for highlighting similarities between different generated CoTs and the
886
+ correctness of generated answers, and allows you to annotate individual CoTs interactively. The
887
+ annotator facilitates identifying strengths and weaknesses of different CoTs. Annotations can be
888
+ used for downstream model evaluation and further improving the capabilities of AI models
889
+ through fine-tuning / reinforcement learning.
890
+
891
+ ThoughtSource: a central hub for large language model reasoning data | 15
892
+ Figure 6: The ThoughtSource Annotator. The web-based interface allows for convenient inspection and
893
+ annotation of reasoning chains and answers. Text that is similar between CoTs can be automatically
894
+ highlighted based on an easily adjustable similarity threshold, facilitating a better understanding of
895
+ similarities and differences of different reasoning chains.
896
+ All tools and libraries, as well as more detailed demonstration notebooks, can be found on the
897
+ project Github page.
898
+ We plan to add more datasets and generated CoTs to the ThoughtSource repository, and we
899
+ welcome outside contributions. Novel CoTs for existing core datasets can be generated and
900
+ shared through the ThoughtSource APIs and JSON files. Completely new datasets can also be
901
+ added, as described in the Github repository's contribution guide.
902
+ Code Availability
903
+ All code, data and tools are openly available at https://github.com/OpenBioLink/ThoughtSource.
904
+ Our code and data are licensed under an MIT license, while data adapted from existing datasets
905
+ are available under the licenses of their respective sources.
906
+
907
+ ThoughtSource Annotator
908
+ tfidf Visualisation threshold
909
+ Download current
910
+ Matthias Logout
911
+ Question 1242 (test)
912
+ A 28-year-old woman, gravida 1, para O, at 20 weeks' gestation comes to the physician with her husband for a prenatal visit. Her
913
+ pregnancy has been uncomplicated. They are planning to travel to Ethiopia next month to visit the husband's family
914
+ uterus that is consistent with a 20-week gestation. Which of the following drugs is most suitable for pre-exposure prophylaxis
915
+ against malaria?
916
+ A. Doxycycline
917
+ B. Primaquine
918
+ C. Mefloquine
919
+ D. Chloroquine
920
+ E. Proguanil
921
+ Let's think step by step. The first thing we need to
922
+ Let's think step by step like a medical expert. The
923
+ Let's differentiate using step by step reasoning like
924
+ do is figure out what kind of malaria is common in
925
+ first thing to consider is the safety of the
926
+ a medical expert: 1) Doxycycline is not suitable for
927
+ Ethiopia. According to this website, the most
928
+ medication for the mother and the developing
929
+ pre-exposure prophylaxis against malaria in this
930
+ common type of malaria in Ethiopia is Plasmodium
931
+ fetus. Doxycycline and mefloquine are not
932
+ case because the patient is pregnant. 2)
933
+ falciparum. The next thing we need to do is figure
934
+ recommended for use in pregnant women, so we
935
+ Mefloquine is not suitable for pre-exposure
936
+ out which of the four drugs listed is most effective
937
+ can eliminate those options. The next thing to
938
+ prophylaxis against malaria in this case because
939
+ against Plasmodium falciparum. According to this
940
+ consider is the efficacy of the medication.
941
+ the patient is pregnant. 3) Chloroquine is suitable
942
+ website, chloroguine and proguanil are not
943
+ Chloroquine is not effective in Ethiopia, so we can
944
+ for pre-exposure prophylaxis against malaria in
945
+ effective against Plasmodium falciparum. That
946
+ eliminate that option. That leaves us with
947
+ this case because the patient is not pregnant. 4)
948
+ leaves doxycycline and mefloquine. The next thing
949
+ proguanil, which is safe for use in pregnant
950
+ Proguanil is suitable for pre-exposure prophylaxis
951
+ we need to do is figure out which of the two
952
+ women and is effective in Ethiopia.
953
+ against malaria in this case because the patient is
954
+ remaining drugs has the fewest side effects
955
+ Answer: Proguanil
956
+ not pregnant.
957
+ According to this website, the most common side
958
+
959
+ Answer: Chloroquine
960
+ effects of doxycycline are nausea, vomiting, and
961
+
962
+ Incorrect reasoning
963
+ diarrhea. The most common side effects of
964
+ Insufficient knowledge
965
+ Incorrect reasoning
966
+ mefloquine are nausea, vomiting, diarrhea, and
967
+ headache. Based on the information above, the
968
+ Incorrect reading comprehension
969
+ Insufficient knowledge
970
+ most suitable drug for pre-exposure prophylaxis
971
+ Incorrect reading comprehension
972
+ Too verbose
973
+ against malaria in Ethiopia is mefloquine.
974
+ Too verbose
975
+ O Answer: Mefloquine
976
+ Incorrect reasoning
977
+ Insufficient knowledge
978
+ Incorrect reading comprehension
979
+ Too verboseThoughtSource: a central hub for large language model reasoning data | 16
980
+ Acknowledgements
981
+ We thank primary dataset contributors that assisted with assembling the ThoughtSource
982
+ meta-dataset.
983
+ Author contributions
984
+ S.O. and K.H. wrote the code for accessing, converting, generating and analysing datasets, and
985
+ wrote parts of the manuscript and documentation.
986
+ V.L., C.E. and O.W. generated and analysed CoT data for medical datasets.
987
+ M.Ma. wrote the code of the annotator so�ware.
988
+ M.Mo. wrote a first prototype of code for accessing and converting datasets.
989
+ R.P. contributed to improving code and documentation quality.
990
+ M.S. conceived and supervised the project and wrote parts of the manuscript and documentation.
991
+ All authors have read and approved the final manuscript.
992
+ Competing interests
993
+ The authors declare that there are no conflicts of interest.
994
+ References
995
+ 1. Sanh, V. et al. Multitask Prompted Training Enables Zero-Shot Task Generalization. arXiv
996
+ (2021).
997
+ 2. Brown, T. B. et al. Language Models are Few-Shot Learners. arXiv (2020).
998
+ 3. Ouyang, L. et al. Training language models to follow instructions with human feedback. arXiv
999
+ (2022) doi:10.48550/arxiv.2203.02155.
1000
+ 4. Wei, J. et al. Chain of Thought Prompting Elicits Reasoning in Large Language Models. arXiv
1001
+ (2022) doi:10.48550/arxiv.2201.11903.
1002
+ 5. Kojima, T., Gu, S. S., Reid, M., Matsuo, Y. & Iwasawa, Y. Large Language Models are Zero-Shot
1003
+ Reasoners. arXiv (2022) doi:10.48550/arxiv.2205.11916.
1004
+ 6. Huang, J. & Chang, K. C.-C. Towards Reasoning in Large Language Models: A Survey. Preprint
1005
+ at https://doi.org/10.48550/arXiv.2212.10403 (2022).
1006
+ 7. Zhou, D. et al. Least-to-Most Prompting Enables Complex Reasoning in Large Language
1007
+ Models. arXiv (2022) doi:10.48550/arxiv.2205.10625.
1008
+ 8. Yao, S. et al. ReAct: Synergizing Reasoning and Acting in Language Models. arXiv (2022)
1009
+ doi:10.48550/arxiv.2210.03629.
1010
+ 9. Press, O. et al. Measuring and Narrowing the Compositionality Gap in Language Models.
1011
+ arXiv (2022) doi:10.48550/arxiv.2210.03350.
1012
+
1013
+ ThoughtSource: a central hub for large language model reasoning data | 17
1014
+ 10.Jung, J. et al. Maieutic Prompting: Logically Consistent Reasoning with Recursive
1015
+ Explanations. arXiv (2022) doi:10.48550/arxiv.2205.11822.
1016
+ 11.Dua, D., Gupta, S., Singh, S. & Gardner, M. Successive Prompting for Decomposing Complex
1017
+ Questions. Preprint at https://doi.org/10.48550/arXiv.2212.04092 (2022).
1018
+ 12.Trivedi, H., Balasubramanian, N., Khot, T. & Sabharwal, A. Interleaving Retrieval with
1019
+ Chain-of-Thought Reasoning for Knowledge-Intensive Multi-Step Questions. Preprint at
1020
+ https://doi.org/10.48550/arXiv.2212.10509 (2022).
1021
+ 13.Khattab, O. et al. Demonstrate-Search-Predict: Composing retrieval and language models for
1022
+ knowledge-intensive NLP. Preprint at https://doi.org/10.48550/arXiv.2212.14024 (2023).
1023
+ 14.Wang, A. et al. SuperGLUE: A Stickier Benchmark for General-Purpose Language
1024
+ Understanding Systems. in Advances in Neural Information Processing Systems (eds. Wallach, H.
1025
+ et al.) vol. 32 3266–3280 (Curran Associates, Inc., 2019).
1026
+ 15.Fries, J. A. et al. BigBIO: A Framework for Data-Centric Biomedical Natural Language
1027
+ Processing. in (arXiv, 2022). doi:10.48550/arXiv.2206.15076.
1028
+ 16.Gu, Y. et al. Domain-Specific Language Model Pretraining for Biomedical Natural Language
1029
+ Processing. ACM Trans. Comput. Healthc. 3, 2:1-2:23 (2021).
1030
+ 17.Gao, L. et al. The Pile: An 800GB Dataset of Diverse Text for Language Modeling. arXiv (2020).
1031
+ 18.Laurençon, H. et al. The BigScience ROOTS Corpus: A 1.6TB Composite Multilingual
1032
+ Dataset. in (2022).
1033
+ 19.Lhoest, Q. et al. Datasets: A Community Library for Natural Language Processing. in
1034
+ Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System
1035
+ Demonstrations 175–184 (Association for Computational Linguistics, 2021).
1036
+ doi:10.18653/v1/2021.emnlp-demo.21.
1037
+ 20.Chen, M. et al. Evaluating Large Language Models Trained on Code. Preprint at
1038
+ https://doi.org/10.48550/arXiv.2107.03374 (2021).
1039
+ 21.Liévin, V., Hother, C. E. & Winther, O. Can large language models reason about medical
1040
+ questions? arXiv (2022) doi:10.48550/arxiv.2207.08143.
1041
+ 22.Xie, Z. et al. WorldTree V2: A Corpus of Science-Domain Structured Explanations and
1042
+ Inference Patterns supporting Multi-Hop Inference. in Proceedings of the Twel�h Language
1043
+ Resources and Evaluation Conference 5456–5473 (European Language Resources Association,
1044
+ 2020).
1045
+ 23.Dalvi, B. et al. Explaining Answers with Entailment Trees. Preprint at
1046
+ http://arxiv.org/abs/2104.08661 (2022).
1047
+ 24.Mihaylov, T., Clark, P., Khot, T. & Sabharwal, A. Can a Suit of Armor Conduct Electricity? A
1048
+ New Dataset for Open Book Question Answering. in Proceedings of the 2018 Conference on
1049
+ Empirical Methods in Natural Language Processing 2381–2391 (Association for Computational
1050
+ Linguistics, 2018). doi:10.18653/v1/D18-1260.
1051
+ 25.Jin, D. et al. What Disease Does This Patient Have? A Large-Scale Open Domain Question
1052
+
1053
+ ThoughtSource: a central hub for large language model reasoning data | 18
1054
+ Answering Dataset from Medical Exams. Appl. Sci. 11, 6421 (2021).
1055
+ 26.Pal, A., Umapathi, L. K. & Sankarasubbu, M. MedMCQA: A Large-scale Multi-Subject
1056
+ Multi-Choice Dataset for Medical domain Question Answering. in Proceedings of the
1057
+ Conference on Health, Inference, and Learning 248–260 (PMLR, 2022).
1058
+ 27.Jin, Q., Dhingra, B., Liu, Z., Cohen, W. & Lu, X. PubMedQA: A Dataset for Biomedical
1059
+ Research Question Answering. in Proceedings of the 2019 Conference on Empirical Methods in
1060
+ Natural Language Processing and the 9th International Joint Conference on Natural Language
1061
+ Processing (EMNLP-IJCNLP) 2567–2577 (Association for Computational Linguistics, 2019).
1062
+ doi:10.18653/v1/D19-1259.
1063
+ 28.Talmor, A., Herzig, J., Lourie, N. & Berant, J. CommonsenseQA: A Question Answering
1064
+ Challenge Targeting Commonsense Knowledge. in Proceedings of the 2019 Conference of the
1065
+ North American Chapter of the Association for Computational Linguistics: Human Language
1066
+ Technologies, Volume 1 (Long and Short Papers) 4149–4158 (Association for Computational
1067
+ Linguistics, 2019). doi:10.18653/v1/N19-1421.
1068
+ 29.Geva, M. et al. Did Aristotle Use a Laptop? A Question Answering Benchmark with Implicit
1069
+ Reasoning Strategies. Trans. Assoc. Comput. Linguist. 9, 346–361 (2021).
1070
+ 30.Lamm, M. et al. QED: A Framework and Dataset for Explanations in Question Answering.
1071
+ Trans. Assoc. Comput. Linguist. 9, 790–806 (2021).
1072
+ 31.Ling, W., Yogatama, D., Dyer, C. & Blunsom, P. Program Induction by Rationale Generation:
1073
+ Learning to Solve and Explain Algebraic Word Problems. in Proceedings of the 55th Annual
1074
+ Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) 158–167
1075
+ (Association for Computational Linguistics, 2017). doi:10.18653/v1/P17-1015.
1076
+ 32.Miao, S., Liang, C.-C. & Su, K.-Y. A Diverse Corpus for Evaluating and Developing English
1077
+ Math Word Problem Solvers. in Proceedings of the 58th Annual Meeting of the Association for
1078
+ Computational Linguistics 975–984 (Association for Computational Linguistics, 2020).
1079
+ doi:10.18653/v1/2020.acl-main.92.
1080
+ 33.Cobbe, K. et al. Training Verifiers to Solve Math Word Problems. Preprint at
1081
+ https://doi.org/10.48550/arXiv.2110.14168 (2021).
1082
+ 34.Koncel-Kedziorski, R., Roy, S., Amini, A., Kushman, N. & Hajishirzi, H. MAWPS: A Math
1083
+ Word Problem Repository. in Proceedings of the 2016 Conference of the North American Chapter of
1084
+ the Association for Computational Linguistics: Human Language Technologies 1152–1157
1085
+ (Association for Computational Linguistics, 2016). doi:10.18653/v1/N16-1136.
1086
+ 35.Patel, A., Bhattamishra, S. & Goyal, N. Are NLP Models really able to Solve Simple Math Word
1087
+ Problems? in Proceedings of the 2021 Conference of the North American Chapter of the Association
1088
+ for Computational Linguistics: Human Language Technologies 2080–2094 (Association for
1089
+ Computational Linguistics, 2021). doi:10.18653/v1/2021.naacl-main.168.
1090
+
9dFJT4oBgHgl3EQfoixI/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
A9AyT4oBgHgl3EQfd_iI/content/2301.00313v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28d5767900f39b9fb92a00b68b189f049b60f11b8b1ed6ca685ebb9521a7bb52
3
+ size 851377
A9AyT4oBgHgl3EQfd_iI/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8c600f18fde33c8eeb696f7d32bf1c9179ccf368a7237e707bcb6f8ece52de4
3
+ size 106867
ANAzT4oBgHgl3EQfF_sv/content/tmp_files/2301.01019v1.pdf.txt ADDED
@@ -0,0 +1,1634 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Correlation Loss: Enforcing Correlation between Classification and Localization
2
+ Fehmi Kahraman*,1, Kemal Oksuz*,1, Sinan Kalkan†,1,2, Emre Akbas†,1,2
3
+ 1Dept. of Computer Engineering. 2METU Center for Robotics and Artificial Intelligence (ROMER)
4
+ Middle East Technical University (METU), Ankara, Turkey
5
+ {fehmi.kahraman 01, kemal.oksuz, skalkan, eakbas}@metu.edu.tr
6
+ Abstract
7
+ Object detectors are conventionally trained by a weighted
8
+ sum of classification and localization losses. Recent studies
9
+ (e.g., predicting IoU with an auxiliary head, Generalized Fo-
10
+ cal Loss, Rank & Sort Loss) have shown that forcing these
11
+ two loss terms to interact with each other in non-conventional
12
+ ways creates a useful inductive bias and improves perfor-
13
+ mance. Inspired by these works, we focus on the correlation
14
+ between classification and localization and make two main
15
+ contributions: (i) We provide an analysis about the effects
16
+ of correlation between classification and localization tasks
17
+ in object detectors. We identify why correlation affects the
18
+ performance of various NMS-based and NMS-free detectors,
19
+ and we devise measures to evaluate the effect of correla-
20
+ tion and use them to analyze common detectors. (ii) Moti-
21
+ vated by our observations, e.g., that NMS-free detectors can
22
+ also benefit from correlation, we propose Correlation Loss,
23
+ a novel plug-in loss function that improves the performance
24
+ of various object detectors by directly optimizing correla-
25
+ tion coefficients: E.g., Correlation Loss on Sparse R-CNN,
26
+ an NMS-free method, yields 1.6 AP gain on COCO and 1.8
27
+ AP gain on Cityscapes dataset. Our best model on Sparse
28
+ R-CNN reaches 51.0 AP without test-time augmentation on
29
+ COCO test-dev, reaching state-of-the-art. Code is available
30
+ at: https://github.com/fehmikahraman/CorrLoss.
31
+ 1
32
+ Introduction
33
+ Most object detectors optimize a weighted sum of classifi-
34
+ cation and localization losses during training. Results from
35
+ recent work suggest that performance improves when these
36
+ two loss functions are forced to interact with each other in
37
+ non-conventional ways as illustrated in Fig. 1. For example,
38
+ training an auxiliary (aux.) head to regress the localization
39
+ qualities of the positive examples, e.g. centerness, IoU or
40
+ mask-IoU, has proven useful (Jiang et al. 2018; Kim and Lee
41
+ 2020; Tian et al. 2019; Zhang et al. 2020) (Fig. 1(b)). Other
42
+ methods remove such auxiliary heads and aim directly to
43
+ enforce correlation1 in the classification or localization task
44
+ during training; e.g., Average LRP Loss (Oksuz et al. 2020)
45
+ *These authors contributed equally.
46
+ †Equal contribution for senior authorship.
47
+ Copyright © 2023, Association for the Advancement of Artificial
48
+ Intelligence (www.aaai.org). All rights reserved.
49
+ 1In the rest of the paper, “correlation” will refer to the correla-
50
+ tion between classification scores and IoUs.
51
+ weighs the examples in the localization task by ranking them
52
+ with respect to (wrt.) their classification scores (Fig. 1(c)).
53
+ Using localization quality as an additional supervision sig-
54
+ nal for classification has been more commonly adopted (Fig.
55
+ 1(d)) (Li et al. 2020; Liu et al. 2021; Oksuz et al. 2021a;
56
+ Zhang et al. 2021) in two main ways: (i) Score-based ap-
57
+ proaches aim to regress the localization qualities (Li et al.
58
+ 2019, 2020; Zhang et al. 2021) in the classification score,
59
+ and (ii) ranking-based approaches enforce the classifier to
60
+ rank the confidence scores wrt. the localization qualities (Liu
61
+ et al. 2021; Oksuz et al. 2021a).
62
+ Improving correlation seems to have a positive effect on
63
+ performance of a variety of object detectors, as shown in
64
+ Fig. 2. However, the effect of correlation on object detectors
65
+ has not been thoroughly studied. We fill this gap in this pa-
66
+ per and first identify that correlation affects the performance
67
+ of object detectors at two levels: (i) Image-level correlation,
68
+ the correlation between the classification scores and local-
69
+ ization qualities (i.e., IoU for the rest of the paper) of the de-
70
+ tections in a single image before post-processing, which is
71
+ important to promote NMS performance, and (ii) Class-level
72
+ correlation, the correlation over the entire dataset for each
73
+ class after post-processing, which is related to the COCO-
74
+ style Average Precision (AP). Moreover, we quantitatively
75
+ define correlation at each level to enable analyses on how
76
+ well an object detector captures correlation (e.g., βcls in
77
+ Fig. 2(a)). Then, we provide an analysis on both levels of
78
+ correlation and draw important observations using common
79
+ models. Finally, to better exploit correlation, we introduce a
80
+ more direct mechanism to enforce correlation: Correlation
81
+ Loss, a simple plug-in and detector-independent loss term
82
+ (Fig. 1(e)), improving performance for a wide range of ob-
83
+ ject detectors including NMS-free detectors, aligning with
84
+ our analysis (Fig. 2(b)). Similar to the novel loss functions
85
+ (Li et al. 2020; Oksuz et al. 2021a; Zhang et al. 2021), our
86
+ Correlation Loss boosts the performance without an aux-
87
+ iliary head, but different from them, it is a simple plug-in
88
+ technique that can easily be incorporated into any object de-
89
+ tector, whether NMS-based or NMS-free.
90
+ Our main contributions are: (1) We identify how corre-
91
+ lation affects NMS-based and NMS-free detectors, and de-
92
+ sign quantitative measures to analyze a detector wrt. corre-
93
+ lation. (2) We analyze the effects of correlation at different
94
+ levels on various object detectors. (3) We propose Correla-
95
+ arXiv:2301.01019v1 [cs.CV] 3 Jan 2023
96
+
97
+ ̂𝑠 ← ̂𝑠×%ℓ
98
+ Cls
99
+ Loc
100
+ Aux
101
+ ℒ!"#
102
+ ℒ"$!
103
+ ℒ%&'
104
+ (b) Auxiliary Head
105
+ (d) Novel Cls. Loss
106
+ (a) No correlation
107
+ Cls
108
+ Loc
109
+ ℒ!"#
110
+ ℒ"$!
111
+ ̂𝑠
112
+ %𝐵
113
+ ̂𝑠
114
+ %𝐵
115
+ %ℓ
116
+ Cls
117
+ Loc
118
+ ℒ!"#
119
+ ℒ"$!
120
+ ̂𝑠
121
+ %𝐵
122
+ (c) Novel Loc. Loss
123
+ Cls
124
+ Loc
125
+ ℒ!"#
126
+ ℒ"$!
127
+ ̂𝑠
128
+ %𝐵
129
+ Cls
130
+ Loc
131
+ ℒ!"#
132
+ ℒ"$!
133
+ ̂𝑠
134
+ %𝐵
135
+ ℒ!$((
136
+ (e) Correlation Loss (Ours)
137
+ Legend
138
+ ̂𝑠 : Classification Scores
139
+ )𝐵 : Box Coordinates
140
+ )ℓ : Localization Quality
141
+ (e.g. centerness)
142
+ ℒ : A Loss Function
143
+ Figure 1: Different ways of handling the classification and localization tasks from the perspective of correlation. (a) Conven-
144
+ tional case of optimizing the two tasks independently (e.g., Chen et al. 2020; Sun et al. 2021b). (b) An additional auxiliary head
145
+ predicts centerness (Zhang et al. 2020) or IoU (Jiang et al. 2018; Kim and Lee 2020), which introduces additional learnable
146
+ parameters. (c) Novel loss functions replace the standard localization loss (Oksuz et al. 2020) or (d) novel classification loss
147
+ (Li et al. 2020; Oksuz et al. 2021a) by more complicated ones to leverage correlation. (e) Our Correlation Loss explicitly opti-
148
+ mizes a correlation coefficient. It is a simple, plug-in loss function which does not introduce additional parameters and has the
149
+ flexibility to supervise classification or localisation head as well as both. Black and colored arrows respectively denote the loss
150
+ functions (i.e., during training) & the network outputs (i.e., during inference).
151
+ 38.5
152
+ 39.0
153
+ 39.5
154
+ 40.0 APC
155
+ 40
156
+ 41
157
+ 42
158
+ 43
159
+ 44
160
+ 45
161
+ 46
162
+ 47
163
+ cls
164
+ FL
165
+ Aux.
166
+ QFL
167
+ RS
168
+ FL
169
+ Aux.
170
+ QFL
171
+ RS
172
+ w/o Corr.Loss
173
+ w. Corr.Loss (Ours)
174
+ (a) Detection vs. Correlation
175
+ ATSS
176
+ Sparse RCNN
177
+ YOLACT
178
+ 25
179
+ 30
180
+ 35
181
+ 40
182
+ 45
183
+ 50
184
+ APC
185
+ +1.1 AP
186
+ +2.8% gain
187
+ +1.6 AP
188
+ +4.2% gain
189
+ +0.7 AP
190
+ +2.4% gain
191
+ Baseline
192
+ Ours
193
+ (b) Effect of our Corr. Loss
194
+ Figure 2: (a) Detection performance, measured by COCO-
195
+ style AP (APC) vs. correlation quality, measured by class-
196
+ level correlation (βcls - see Section 3.1 for details). The
197
+ methods proposed to improve the correlation between classi-
198
+ fication and localization tasks also improve APC. Compare
199
+ using aux. head, QFL, RS Loss with the baseline ATSS only
200
+ using Focal Loss (FL – all in red dots) to see the positive cor-
201
+ relation between APC and βcls. Our Correlation Loss as a
202
+ plug-in loss function explicitly optimizes a correlation coef-
203
+ ficient and improves the detection performance (APC) over
204
+ different settings of ATSS (i.e. using FL, aux. head, QFL, RS
205
+ Loss) consistently owing to increasing βcls, validating our
206
+ hypothesis (compare green stars with red dots). (b) Our Cor-
207
+ relation Loss is simple-to-use and improves various meth-
208
+ ods (i) NMS-based ATSS (w/o aux. head) by 1.1APC, (ii)
209
+ NMS-free Sparse R-CNN by 1.6APC and (iii) YOLACT,
210
+ an instance segmentation method by 0.7APC.
211
+ tion Loss as a plug-in loss function to optimize correlation
212
+ explicitly. Thanks to its simplicity, our loss function can be
213
+ easily incorporated into a diverse set of object detectors and
214
+ improves the performance of e.g., Sparse R-CNN up to 1.6
215
+ AP and 2.0AP75, suggesting, for the first time, that NMS-
216
+ free detectors can also benefit from correlation. Our best
217
+ model yields 51.0 AP, reaching state-of-the art.
218
+ 2
219
+ Background and Related Work
220
+ Object Detection Pipeline. We group object detectors wrt.
221
+ their usage of NMS (Fig. 3 presents overview & notation):
222
+ 1. NMS-based Detectors. To detect all objects with dif-
223
+ ferent scales, locations and aspect ratios; most methods
224
+ (He et al. 2017; Kong et al. 2020; Law and Deng 2018;
225
+ Lin et al. 2020; Ren et al. 2017; Tian et al. 2019; Zhang
226
+ et al. 2020) employ a large number of object hypotheses
227
+ (e.g., anchors, points), which are labeled as positive (a.k.a.
228
+ foreground) or negative (a.k.a. background) during training,
229
+ based on whether/how they match GT boxes (Zhang et al.
230
+ 2020, 2019). In this setting, there is no restriction for an ob-
231
+ ject to be predicted by multiple object hypotheses, causing
232
+ duplicates. Accordingly, during inference, NMS picks the
233
+ detection with the largest confidence score among the detec-
234
+ tions that overlap more than a predetermined IoU threshold
235
+ to avoid duplicate detections.
236
+ 2. NMS-free Detectors. An emerging research direction is
237
+ to remove the need for doing NMS, simplifying the detec-
238
+ tion pipeline (Carion et al. 2020; Dai et al. 2021; Roh et al.
239
+ 2022; Sun et al. 2021b,a; Zhu et al. 2021). This is achieved
240
+ by ensuring a one-to-one matching between the GTs and de-
241
+ tections, which supervises the detector to avoid duplicates in
242
+ the first place.
243
+ Methods Enforcing Correlation. One common way to
244
+ ensure correlation is to use an additional auxiliary head, su-
245
+ pervised by the localization quality of a detection such as
246
+ centerness (Tian et al. 2019; Zhang et al. 2020), IoU (Jiang
247
+ et al. 2018), mask IoU (Huang et al. 2019) or uncertainty
248
+ (He et al. 2019), during training. During inference, the pre-
249
+ dictions of the auxiliary head are then combined with those
250
+ of the classifier to improve detection performance. Recent
251
+ methods show that the auxiliary head can be removed, and
252
+ either (i) the regressor can prioritize the positive examples
253
+ (Oksuz et al. 2020) or (ii) the classifier can be supervised to
254
+ prioritize detections with confidence scores. The latter is en-
255
+ sured either by regressing the IoUs by the classifier (Li et al.
256
+ 2020; Zhang et al. 2021) or by training the classifier to rank
257
+ confidence scores (Liu et al. 2021; Oksuz et al. 2021a) wrt.
258
+ IoUs. Unlike these methods, TOOD (Feng et al. 2021) takes
259
+ correlation into account mainly while designing the model,
260
+ particularly the detection head, i.e., not the loss function.
261
+ Correlation Coefficients. Correlation coefficients mea-
262
+ sure the strength and direction of the “relation” between two
263
+
264
+ Ƹ𝑠𝑝𝑟𝑒
265
+ 𝐼
266
+ (score)
267
+ ෠𝐵𝑝𝑟𝑒
268
+ 𝐼
269
+ (box)
270
+ Image, I
271
+ Post-processing
272
+ Object
273
+ Detector
274
+ Remove
275
+ background
276
+ NMS
277
+ Top-k
278
+ For each class c
279
+ in image I
280
+ For each image I
281
+ Collect
282
+ Ƹ𝑠𝑝𝑜𝑠𝑡
283
+ 𝑐
284
+ & ෠𝐵𝑝𝑜𝑠𝑡
285
+ 𝑐
286
+ over all
287
+ images
288
+ Ƹ𝑠𝑝𝑜𝑠𝑡
289
+ 𝐼,𝑐 (score)
290
+ ෠𝐵𝑝𝑜𝑠𝑡
291
+ 𝐼,𝑐
292
+ (box)
293
+ Figure 3: Object detection pipeline and notation. Given an input image, I, NMS-based detectors yield raw detections before
294
+ post-processing, each of which has a predicted bounding box (BB) and an array of confidence scores over GT classes. We
295
+ denote the confidence scores and the predicted BBs pertaining to the positive detections, i.e., the detections matching with GT
296
+ objects during training, by ˆsI
297
+ pre and ˆBI
298
+ pre, respectively. To obtain final detections, raw detections are post-processed in three
299
+ steps: (i) Detections with low confidence scores, i.e., background, are removed, (ii) duplicates are eliminated by NMS, and
300
+ (iii) top-k scoring detections are kept. As for these final detections, we denote the confidence scores and BBs of true positive
301
+ detections for class c in a single image I by ˆsI,c
302
+ post and ˆBI,c
303
+ post respectively, and over the entire dataset by ˆsc
304
+ post and ˆBc
305
+ post. As for
306
+ NMS-free detectors; NMS, dashed gray box in post-processing, is excluded, hence post-processing is lighter.
307
+ sets, X = {x1, ..., xN} and Y = {y1, ..., yN}. Different re-
308
+ lations are evaluated by different correlation coefficients: (i)
309
+ Pearson correlation coefficient, denoted by α(·, ·), measures
310
+ the linear relationship between the sets, (ii) Spearman corre-
311
+ lation coefficient, β(·, ·), corresponds to the ranking relation-
312
+ ship and (iii) Concordance correlation coefficient, γ(·, ·), is
313
+ more strict, measuring the similarity of the values and max-
314
+ imized when xi = yi for all i ∈ 1, ..., N. All correlation
315
+ coefficients have a range of [−1, +1] where positive/neg-
316
+ ative correlation corresponds to increasing/decreasing rela-
317
+ tion, while 0 implies no correlation between X and Y .
318
+ Comparative Summary. In this paper, we comprehen-
319
+ sively identify and analyze the effect of explicitly correlat-
320
+ ing classification and localization in object detectors. Unlike
321
+ other methods that also enforce correlation, some of which
322
+ are tested only on a single architecture (Huang et al. 2019;
323
+ Jiang et al. 2018; Tian et al. 2019), we propose a simple solu-
324
+ tion by directly optimizing the correlation coefficient, which
325
+ is auxiliary-head free and easily applicable to all object de-
326
+ tectors, whether NMS-based or NMS-free. Also, ours is the
327
+ first to work on NMS-free detectors in this context.
328
+ 3
329
+ Effects of Correlation on Object Detectors
330
+ This section presents why maximizing correlation is impor-
331
+ tant for object detectors, introduces measures to evaluate ob-
332
+ ject detectors wrt. correlation and provides an analysis on
333
+ methods designed for improving correlation.
334
+ 3.1
335
+ How Correlation Affects Object Detectors
336
+ Detectors are affected by correlation at two levels (Fig. 4):
337
+ Image-level Correlation. This level of correlation corre-
338
+ sponds to the correlation between the classification scores
339
+ and IoUs of the detections in a single image before post-
340
+ processing, and accordingly, we measure it with the Spear-
341
+ man correlation coefficient2, β(·, ·), averaged over images.
342
+ 2While analyzing object detectors in terms of correlation, we
343
+ employ Spearman correlation coefficient, β(·, ·), to measure the
344
+ relation between the ranks of the values (i.e., scores and IoUs) in-
345
+ stead of the values themselves, and aim to isolate the correlation
346
+ quality from the localization and classification performances.
347
+ Denoting the set of images to be evaluated by I and IoUs
348
+ between the BBs of the positive detections ( ˆBI
349
+ pre, Fig. 3)
350
+ and their associated GTs by IoUI
351
+ pre, image-level correlation
352
+ is measured as follows:
353
+ βimg = 1
354
+ |I|
355
+
356
+ I∈I
357
+ β(IoUI
358
+ pre, ˆsI
359
+ pre).
360
+ (1)
361
+ Maximizing image-level correlation is important for
362
+ NMS-based detectors since NMS aims to suppress dupli-
363
+ cates, i.e., to keep only a single detection for each GT when
364
+ there is more than one. More particularly among overlap-
365
+ ping detections (e.g., dark and light green detections in the
366
+ detector output image in Fig. 4(a)), NMS picks the one with
367
+ the larger score, and hence, if there is positive correlation
368
+ between the confidence scores and IoUs of those overlap-
369
+ ping detections, then the one with the best IoU (e.g., dark
370
+ green detection in Fig. 4(a)) will survive and detection per-
371
+ formance will increase.
372
+ Class-level Correlation. This level of correlation indi-
373
+ cates the correlation between the classification scores and
374
+ IoUs of the detections obtained after post-processing for
375
+ each class. Since class-level correlation is related to COCO-
376
+ style AP, APC, we average β(·, ·) over classes to be consis-
377
+ tent with the computation of APC:
378
+ βcls = 1
379
+ |C|
380
+
381
+ c∈C
382
+ β(IoUc
383
+ post, ˆsc
384
+ post),
385
+ (2)
386
+ where C is the set of classes in the dataset and IoUc
387
+ post is the
388
+ set IoUs of BBs of true positives for class c ( ˆBc
389
+ post, Fig. 3).
390
+ Class-level correlation affects the performance of all de-
391
+ tectors since it is directly related to APC, the performance
392
+ measure itself. To be more specific, APC for a single class
393
+ is defined as the average of APs computed over 10 differ-
394
+ ent IoU thresholds, IoU ∈ {0.50, 0.55, ..., 0.95}, validating
395
+ the true positives. For a specific threshold IoU, the detec-
396
+ tions are first sorted with respect to the classification scores,
397
+ and then precision and recall pairs are calculated on each
398
+ detection. Using these pairs, a precision-recall (PR) curve is
399
+ obtained, and the area under the PR curve corresponds to
400
+
401
+ Positively correlated
402
+ NMS
403
+ Solid BBs
404
+ : Ground truths
405
+ Dashed BBs
406
+ : Detections before
407
+ post-processing ( Ƹ𝑠𝑝𝑟𝑒
408
+ 𝐼
409
+ , ෠𝐵𝑝𝑟𝑒
410
+ 𝐼
411
+ in Fig. 3)
412
+ Positively correlated - High APC
413
+ (b) Class-level Correlation for better AP
414
+ AP Calculation
415
+ 𝐼𝑜𝑈
416
+ 0.80
417
+ N/A
418
+ 0.60
419
+ N/A
420
+ 0.50
421
+ Ƹ𝑠
422
+ 0.80
423
+ 0.70
424
+ 0.60
425
+ 0.55
426
+ 0.50
427
+ 𝐼𝑜𝑈
428
+ 0.80
429
+ N/A
430
+ 0.60
431
+ N/A
432
+ 0.50
433
+ Ƹ𝑠
434
+ 0.50
435
+ 0.55
436
+ 0.60
437
+ 0.70
438
+ 0.80
439
+ Negatively correlated - Low APC
440
+ Precision
441
+ Pos. Detections
442
+ APIoU
443
+ P50
444
+ 1.00
445
+ 0.67
446
+ 0.60
447
+ 0.76
448
+ P75
449
+ 1.00
450
+ 0.00
451
+ 0.00
452
+ 0.33
453
+ Precision
454
+ Pos. Detections
455
+ APIoU
456
+ P50
457
+ 1.00
458
+ 0.67
459
+ 0.60
460
+ 0.76
461
+ P75
462
+ 0.20
463
+ 0.00
464
+ 0.00
465
+ 0.07
466
+ Negatively correlated
467
+ Solid BBs
468
+ : Ground truths
469
+ Dashed BBs
470
+ : Detections after
471
+ post-processing ( Ƹ𝑠𝑝𝑜𝑠𝑡
472
+ 𝐼,𝑐 , ෠𝐵𝑝𝑜𝑠𝑡
473
+ 𝐼,𝑐 in Fig. 3)
474
+ (a) Image-level Correlation for better NMS
475
+ High
476
+ IoU
477
+ Low
478
+ IoU
479
+ Figure 4: How correlation affects detection performance. (a) Image-level correlation: Given detections before post-processing,
480
+ NMS benefits from image-level correlation, thereby yielding detections with better IoU. Compare IoUs of detections in “posi-
481
+ tively correlated” (i.e., when the dark-colored ones have larger score) and “negatively correlated” (i.e., when the light-colored
482
+ ones have larger score) outputs after NMS. (b) Class-level correlation: Given detections after post-processing, APs with larger
483
+ IoUs and COCO-style AP benefit from positive class-level correlation (compare APIoU columns in “positively correlated” and
484
+ “negatively correlated” outputs after AP Calculation to see lower AP75 for the “negatively correlated” output in the red cell).
485
+ PIoU: Precision computed on a detection using the threshold IoU, True positives are color-coded in tables and input, white
486
+ cells: false positives, and hence their IoU is not available, N/A.
487
+ the single AP value, APIoU. When the correlation between
488
+ classification and localization is maximized among true pos-
489
+ itives, larger precision values are obtained on the same de-
490
+ tections in larger IoU values (e.g. P75 of orange detection is
491
+ 1.00 and 0.20 with positive and negative correlation respec-
492
+ tively in Fig. 4(b)).
493
+ 3.2
494
+ Analyses of Object Detectors wrt. Correlation
495
+ Dataset and Implementation Details. Unless otherwise
496
+ specified; we (i) employ the widely-used COCO dataset (Lin
497
+ et al. 2014) by training the models on trainval35K (115K
498
+ images), testing on minival (5k images), comparing with
499
+ SOTA on test-dev (20k images), (ii) build upon the mmde-
500
+ tection framework (Chen et al. 2019), (iii) rely on AP-based
501
+ measures and also use Optimal LRP (oLRP) (Oksuz et al.
502
+ 2021b), βimg (Eq. 1) and βcls (Eq. 2) to provide more in-
503
+ sights, (iv) keep the standard configuration of the models,
504
+ (v) use a ResNet-50 backbone with FPN (Lin et al. 2017),
505
+ (vi) train models on 4 GPUs (A100 or V100 type GPUs)
506
+ with 4 images on each GPU (16 batch size).
507
+ Analysis Setup. We conduct experiments to analyze the
508
+ effects of the image-level (βimg – Table 1) and class-level
509
+ (βcls – Table 2) correlations. For both analyses, we com-
510
+ pare three sets of methods, all of which are incorporated
511
+ into the common ATSS baseline (Zhang et al. 2020) (see
512
+ Sec. 2 for a discussion of these methods): (i) AP Loss and
513
+ Focal Loss as methods not enforcing correlation, (ii) using
514
+ an auxiliary head to enforce correlation, and (iii) Quality Fo-
515
+ cal Loss (QFL), aLRP Loss and Rank & Sort Loss as recent
516
+ loss functions enforcing correlation. In our class-level anal-
517
+ ysis, we also employ NMS-free methods to demonstrate the
518
+ effects of correlation on that approach.
519
+ We compare the methods based on (i) their AP-based per-
520
+ formance, (ii) our proposed measures for correlation (Eqs.
521
+ 1 and 2), and finally (iii) lower/upper bounds, AP+1
522
+ C /AP−1
523
+ C ,
524
+ obtained by modifying the ranking of the confidence scores
525
+ pertaining to the GT classes of the positive detections to
526
+ minimize/maximize Eq. 1 in Table 1 and Eq. 2 in Table 2.
527
+ More particularly, in Table 1, given ˆsI
528
+ pre and ˆBI
529
+ pre (Fig. 3),
530
+ we collect the GT class probabilities of positive detections
531
+ and change their ranking in ˆsI
532
+ pre within an image follow-
533
+ ing the ranking order of IoUs (computed using ˆBI
534
+ pre), and in
535
+ Table 2, we do the same operation class-wise for true posi-
536
+ tives given ˆsc
537
+ post and ˆBc
538
+ post (Fig. 3). To decouple other types
539
+ of errors as much as possible; in Table 1, we do not modify
540
+ the scores of the negative detections, the predicted BBs and
541
+ the scores of non-GT classes of the positive detections, and
542
+ in Table 2, we do not modify the scores of the false positives
543
+ and the predicted BBs of the true positives. Note that achiev-
544
+ ing the upper bound in (iii) for image-level correlation also
545
+ corresponds to perfectly minimizing RS Loss.
546
+ Observations. We observe in Tables 1 and 2 that:
547
+ (1) Our proposed measures in Eqs. 1 and 2 can measure
548
+ the improvements in correlation consistently. In Tables 1 and
549
+ 2, (i) aLRP Loss and RS Loss are proposed to improve AP
550
+ Loss and (ii) aux. head and QFL are proposed to improve Fo-
551
+ cal Loss. In both tables, the proposed methods are shown to
552
+ improve their baselines in terms of βimg and βcls, suggest-
553
+ ing that our measures can consistently evaluate image-level
554
+ and class-level correlations respectively.
555
+ (2) NMS-free detectors can also potentially benefit from
556
+ correlation. All detectors, including NMS-free ones, can ex-
557
+ ploit class-level correlation (compare APC and AP+1
558
+ C to see
559
+ ∼ 10 points gap in Table 2). Still, existing methods do not
560
+ enforce this correlation on NMS-free detectors.
561
+ (3) Existing methods enforcing correlation have still a
562
+ large room for improvement. Considering that βimg
563
+
564
+ [27.2%, 33.8%] (Table 1) and βcls ∈ [37.5%, 47.0%] (Table
565
+ 2), there is still room for improvement wrt. correlation.
566
+
567
+ Performance
568
+ Modify ranking of scores
569
+ Method
570
+ APC AP50 AP75 βimg
571
+ AP−1
572
+ C
573
+ AP−1
574
+ 50
575
+ AP−1
576
+ 75
577
+ AP+1
578
+ C
579
+ AP+1
580
+ 50
581
+ AP+1
582
+ 75
583
+ Not Enforcing Correlation
584
+ ATSS w. AP Loss (Chen et al. 2020)
585
+ 38.1
586
+ 58.2
587
+ 41.0
588
+ 27.2
589
+ 24.9
590
+ 53.2
591
+ 19.2
592
+ 57.0
593
+ 72.4
594
+ 62.2
595
+ ATSS w. Focal Loss (Lin et al. 2020)
596
+ 38.7
597
+ 57.6
598
+ 41.5
599
+ 27.3
600
+ 25.6
601
+ 51.8
602
+ 21.1
603
+ 55.8
604
+ 70.6
605
+ 60.5
606
+ Using Aux. Head
607
+ ATSS w. ctr. head (Zhang et al. 2020)
608
+ 39.3
609
+ 57.5
610
+ 42.6
611
+ 28.7
612
+ 16.8
613
+ 32.4
614
+ 15.3
615
+ 49.8
616
+ 64.8
617
+ 54.2
618
+ Using Novel Loss
619
+ ATSS w. aLRP Loss (Oksuz et al. 2020)
620
+ 37.7
621
+ 57.4
622
+ 39.9
623
+ 33.8
624
+ 22.7
625
+ 48.8
626
+ 17.5
627
+ 54.2
628
+ 70.4
629
+ 58.7
630
+ ATSS w. QFL (Li et al. 2020)
631
+ 39.7
632
+ 58.1
633
+ 42.7
634
+ 33.2
635
+ 25.7
636
+ 51.1
637
+ 21.9
638
+ 55.8
639
+ 70.9
640
+ 60.6
641
+ ATSS w. RS Loss (Oksuz et al. 2021a)
642
+ 39.9
643
+ 58.9
644
+ 42.6
645
+ 30.9
646
+ 26.2
647
+ 53.9
648
+ 21.3
649
+ 57.1
650
+ 71.8
651
+ 62.1
652
+ Table 1: Evaluation of NMS-based detectors in terms of image-level correlation. See Eq. 1 for βimg. AP+1
653
+ IoU and AP−1
654
+ IoU refer to
655
+ the upper & lower bound APs (see analysis setup for details). The values are in %. Our βimg captures correlation consistently,
656
+ e.g. that (i) Focal Loss is improved by ctr. head and QFL and (ii) AP Loss is improved by aLRP Loss and RS Loss wrt. βimg.
657
+ Also, there is still room for improvement for object detectors wrt. βimg with a range between 27.2% and 33.8%.
658
+ Performance
659
+ Modify ranking of scores
660
+ Method
661
+ APC AP50 AP75
662
+ βcls
663
+ AP−1
664
+ C
665
+ AP−1
666
+ 50
667
+ AP−1
668
+ 75
669
+ AP+1
670
+ C
671
+ AP+1
672
+ 50
673
+ AP+1
674
+ 75
675
+ Not Enforcing Correlation
676
+ - NMS-free Detectors
677
+ Sparse R-CNN (Sun et al. 2021b)
678
+ 37.7
679
+ 55.8
680
+ 40.5
681
+ 37.5
682
+ 30.1
683
+ 55.8
684
+ 28.9
685
+ 48.6
686
+ 55.8
687
+ 52.7
688
+ DETR (Carion et al. 2020)
689
+ 40.1
690
+ 60.6
691
+ 42.0
692
+ 47.0
693
+ 32.9
694
+ 60.6
695
+ 30.6
696
+ 51.9
697
+ 60.6
698
+ 55.8
699
+ - NMS-based Detectors
700
+ ATSS w. AP Loss (Chen et al. 2020)
701
+ 38.1
702
+ 58.2
703
+ 41.0
704
+ 39.4
705
+ 30.0
706
+ 58.2
707
+ 26.6
708
+ 48.5
709
+ 58.2
710
+ 54.0
711
+ ATSS w. Focal Loss (Lin et al. 2020)
712
+ 38.7
713
+ 57.6
714
+ 41.5
715
+ 40.3
716
+ 30.2
717
+ 57.6
718
+ 27.3
719
+ 48.7
720
+ 57.6
721
+ 53.6
722
+ Using Aux. Head
723
+ ATSS w. ctr. head (Zhang et al. 2020)
724
+ 39.3
725
+ 57.4
726
+ 42.5
727
+ 42.5
728
+ 30.2
729
+ 57.4
730
+ 27.6
731
+ 48.7
732
+ 57.4
733
+ 53.5
734
+ Using Novel Loss
735
+ ATSS w. aLRP Loss (Oksuz et al. 2020)
736
+ 37.7
737
+ 57.4
738
+ 39.9
739
+ 42.0
740
+ 29.1
741
+ 57.4
742
+ 25.0
743
+ 47.8
744
+ 57.4
745
+ 52.7
746
+ ATSS w. QFL (Li et al. 2020)
747
+ 39.7
748
+ 58.1
749
+ 42.7
750
+ 45.7
751
+ 30.6
752
+ 58.1
753
+ 27.7
754
+ 49.1
755
+ 58.1
756
+ 53.9
757
+ ATSS w. RS Loss (Oksuz et al. 2021a)
758
+ 39.9
759
+ 58.9
760
+ 42.6
761
+ 43.2
762
+ 31.1
763
+ 58.9
764
+ 28.1
765
+ 49.8
766
+ 58.9
767
+ 54.8
768
+ Table 2: Evaluation of detectors wrt. class-level correlation. See Eq. 2 for βcls. AP+1
769
+ IoU & AP−1
770
+ IoU denote upper & lower bound
771
+ APs (analysis setup for details). Values are in %. NMS-free detectors can also benefit from class-level correlation (compare
772
+ AP+1
773
+ C with APC for Sparse R-CNN), and as in βimg (c.f. Table 1 and its caption), βcls measures the correlation consistently.
774
+ AP+1
775
+ 50 = AP−1
776
+ 50 = AP50 since only modifying TPs validated from IoU=0.50 does not effect AP50 (see Fig. 4(b) for an example).
777
+ (4) While significantly important, improving correlation
778
+ may not always imply performance improvement. For exam-
779
+ ple, aLRP Loss in Table 1 has the largest correlation but the
780
+ lowest APC. Such a situation can arise, for example, when a
781
+ method does not have good localization performance. In the
782
+ extreme case, assume a detector yields perfect βimg, image-
783
+ level ranking correlation, but the IoUs of all positive exam-
784
+ ples are less than 0.50 implying no TP at all. Hence, boost-
785
+ ing the correlation, while simultaneously preserving a good
786
+ performance in each branch, is critical.
787
+ 4
788
+ Correlation Loss: A Novel Loss Function
789
+ for Object Detection
790
+ Correlation (Corr.) Loss is a simple plug-in loss function to
791
+ improve correlation of classification and localization tasks.
792
+ Correlation Loss is unique in that it can be easily incorpo-
793
+ rated into any object detector, whether NMS-based or NMS-
794
+ free (see Observation (2) - Sec. 3.2), and improves perfor-
795
+ mance without affecting the model size, inference time and
796
+ with negligible effect on training time (Sec. 5.4). Further-
797
+ more, from a fundamental perspective, Corr. Loss can su-
798
+ pervise both of the classification and localisation heads for a
799
+ better correlation while existing methods generally focus on
800
+ a single head such as classification (Fig. 1).
801
+ Definition. Given an object detector with loss function
802
+ LOD, our Correlation Loss (Lcorr) is simply added using a
803
+ weighting hyper-parameter λcorr:
804
+ LOD + λcorrLcorr.
805
+ (3)
806
+ Lcorr is the Correlation Loss defined as:
807
+ Lcorr = 1 − ρ( ˆ
808
+ IoU,ˆs),
809
+ (4)
810
+ where ρ(·, ·) is a correlation coefficient; ˆs and
811
+ ˆ
812
+ IoU are the
813
+ confidence scores of the GT class and IoUs of the predicted
814
+ BBs pertaining to the positive examples in the batch.
815
+ Practical Usage. To avoid promoting trivial cases with
816
+ high correlation but low performance (Observation (4) -
817
+ Sec. 3.2), similar to QFL (Li et al. 2020) and RS Loss
818
+ (Oksuz et al. 2021a), we only use the gradients of Lcorr
819
+ wrt. classification score, i.e., we backpropagate the gradi-
820
+ ents through only the classifier. We mainly adopt two dif-
821
+ ferent correlation coefficients for ρ(·, ·) and obtain two ver-
822
+ sions of Correlation Loss: (i) Concordance Loss, defined as
823
+ the Correlation Loss when Concordance correlation coeffi-
824
+ cient is optimized (ρ(·, ·) = γ(·, ·)), which aims to match
825
+
826
+ Method
827
+ APC ↑AP50 ↑AP75 ↑ oLRP ↓
828
+ NMS-based
829
+ Retina Net (Lin et al. 2020)
830
+ 36.5
831
+ 55.4
832
+ 39.1
833
+ 70.7
834
+ w. Conc.Corr (Ours)
835
+ 37.0
836
+ 55.7
837
+ 39.7
838
+ 70.2
839
+ w. Spear.Corr (Ours)
840
+ 37.5
841
+ 55.4
842
+ 40.5
843
+ 69.7
844
+ Fovea Box (Kong et al. 2020)
845
+ 36.4
846
+ 56.5
847
+ 38.6
848
+ 70.2
849
+ w. Conc.Corr (Ours)
850
+ 37.1
851
+ 56.4
852
+ 39.6
853
+ 69.7
854
+ w. Spear.Corr (Ours)
855
+ 37.0
856
+ 55.6
857
+ 39.3
858
+ 70.0
859
+ ATSS (Zhang et al. 2020)
860
+ 38.7
861
+ 57.6
862
+ 41.5
863
+ 69.0
864
+ w. Conc.Corr (Ours)
865
+ 39.8
866
+ 57.9
867
+ 43.2
868
+ 68.2
869
+ w. Spear.Corr (Ours)
870
+ 39.3
871
+ 56.6
872
+ 42.5
873
+ 68.7
874
+ PAA (Kim and Lee 2020)
875
+ 39.9
876
+ 57.3
877
+ 43.4
878
+ 68.6
879
+ w. Conc.Corr (Ours)
880
+ 40.7
881
+ 58.8
882
+ 44.3
883
+ 67.7
884
+ w. Spear.Corr (Ours)
885
+ 40.4
886
+ 58.0
887
+ 43.7
888
+ 67.8
889
+ NMS-free
890
+ Sparse R-CNN (Sun et al. 2021b) 37.7
891
+ 55.8
892
+ 40.5
893
+ 69.5
894
+ w. Conc.Corr (Ours)
895
+ 38.9
896
+ 57.2
897
+ 41.8
898
+ 68.1
899
+ w. Spear.Corr (Ours)
900
+ 39.3
901
+ 56.7
902
+ 42.5
903
+ 68.3
904
+ Table 3: Comparison on detectors not considering correla-
905
+ tion. Accordingly, we remove aux. heads from ATSS (Zhang
906
+ et al. 2020) and PAA (Kim and Lee 2020) for fair compari-
907
+ son (see Table 6 for comparison with aux. heads and novel
908
+ loss functions). We use ResNet-50 and train the models for
909
+ 12 epochs. Simply incorporating our Corr. Loss provides
910
+ (i) ∼ 1APC improvement for NMS-based detectors consis-
911
+ tently and (ii) ∼ 1.5APC on the NMS-free Sparse R-CNN.
912
+ the confidence scores with IoUs. (ii) Spearman Loss as Cor-
913
+ relation Loss when Spearman correlation coefficient is op-
914
+ timized (ρ(·, ·) = β(·, ·)), thereby enforcing the ranking
915
+ of the classification scores considering IoUs. To tackle the
916
+ non-differentiability of ranking operation while computing
917
+ Spearman Loss, we leverage the differentiable sorting oper-
918
+ ation from Blondel et al. (Blondel et al. 2020). When apply-
919
+ ing our Correlation Loss to NMS-free methods, which use
920
+ an iterative multi-stage loss function, we incorporate Lcorr
921
+ to every stage.
922
+ 5
923
+ Experimental Evaluation
924
+ We evaluate Corr. Loss on (i) the COCO dataset with five
925
+ different object detectors of various types (Sparse R-CNN as
926
+ NMS-free, FoveaBox as anchor-free, RetinaNet as anchor-
927
+ based, ATSS and PAA using auxiliary head), and one in-
928
+ stance segmentation method, YOLACT; and (ii) an addi-
929
+ tional dataset (Cityscapes) for the method with the largest
930
+ gain, i.e., Sparse R-CNN.
931
+ 5.1
932
+ Comparison with Methods Not Considering
933
+ Correlation
934
+ We train these five object detectors and the instance segmen-
935
+ tation method (Tables 3 and 5) with and without our Corr.
936
+ Loss (as Concordance Loss or Spearman Loss).
937
+ NMS-based Detectors. Table 3 suggests ∼ 1.0APC gain
938
+ on NMS-based detectors: (i) Spearman Loss (λcorr = 0.1)
939
+ improves RetinaNet by 1.0APC and oLRP, (ii) Concor-
940
+ dance Loss (λcorr = 0.2) enhances anchor-free FoveaBox
941
+ by 0.7APC, and (iii) Concordance Loss (λcorr = 0.3) im-
942
+ proves ATSS and PAA by ∼ 1APC and ∼ 1oLRP.
943
+ NMS-free Detectors. Our results in Table 3 suggest that
944
+ Sparse R-CNN, an NMS-free method, can also benefit from
945
+ Method
946
+ AP
947
+ AP50
948
+ AP75
949
+ Sparse R-CNN
950
+ 39.0
951
+ 63.1
952
+ 37.6
953
+ w. Spear.Corr (Ours)
954
+ 40.8
955
+ 64.4
956
+ 40.8
957
+ Table 4: Results on Cityscapes dataset.
958
+ Method
959
+ APmask
960
+ C
961
+ APmask
962
+ 50
963
+ APmask
964
+ 75
965
+ YOLACT (Bolya et al. 2019)
966
+ 28.3
967
+ 47.8
968
+ 28.8
969
+ w. Conc.Corr (Ours)
970
+ 28.8
971
+ 48.3
972
+ 29.6
973
+ w. Spear.Corr (Ours)
974
+ 29.0
975
+ 48.3
976
+ 30.0
977
+ Table 5: Comparison with YOLACT.
978
+ our Corr. Loss: (i) Both Concordance (λcorr = 0.3) and
979
+ Spearman Losses (λcorr
980
+ = 0.2) improve baseline; (ii)
981
+ Spearman Loss improves APC significantly by up to 1.6;
982
+ (iii) as hypothesized, the gains are owing to APs with larger
983
+ IoUs, e.g., AP75 improves by up to 2.0, and (iv) gains persist
984
+ in a stronger setting of Sparse R-CNN (Appendix).
985
+ Cityscapes dataset. To see the effect of Corr. Loss over
986
+ different scenarios, we train Sparse R-CNN with Spear-
987
+ man Loss (the model that has the best gain over baseline
988
+ in Table 3), on the Cityscapes dataset (Cordts et al. 2016)
989
+ (λcorr = 0.6), a dataset for autonomous driving object de-
990
+ tection. Table 4 presents that (i) Spearman Loss also im-
991
+ proves baseline Sparse R-CNN on Cityscapes by 1.8 AP and
992
+ (ii) our gain mainly originates from APs with larger IoUs,
993
+ i.e. AP75 improves by more than 3 points, from 37.6 to 40.8.
994
+ Instance Segmentation. We train YOLACT (Bolya et al.
995
+ 2019) as an instance segmentation method with Corr. Loss
996
+ and observed 0.7 mask AP gain using Spearman Loss
997
+ (λcorr = 0.5 - Table 5), implying 1.7% relative gain.
998
+ 5.2
999
+ Comparison with Methods Enforcing
1000
+ Correlation
1001
+ Table 6 compares Corr. Loss. with using an aux. head
1002
+ (Zhang et al. 2020), QFL (Li et al. 2020) and RS Loss (Ok-
1003
+ suz et al. 2021a) on the common ATSS baseline (Zhang et al.
1004
+ 2020) wrt. detection and correlation:
1005
+ Detection Performance. Reaching 39.8APC without an
1006
+ aux. head, Concordance Loss (Table 6) outperforms using an
1007
+ aux. head, which introduces additional learnable parameters
1008
+ (39.8 vs 39.3APC), and reaches on-par performance with
1009
+ the recently proposed, relatively complicated loss functions,
1010
+ Aux.QFLRS LossOurs APC AP50 AP75 oLRP ↓ βimg ↑βcls ↑
1011
+ 38.7 57.6 41.5
1012
+ 68.9
1013
+ 27.3
1014
+ 40.3
1015
+
1016
+ 39.3 57.5 42.6
1017
+ 68.6
1018
+ 28.7
1019
+ 42.5
1020
+
1021
+ 39.7 58.1 42.7
1022
+ 68.0
1023
+ 33.2
1024
+ 45.7
1025
+
1026
+ 39.9 58.9 42.6
1027
+ 67.9
1028
+ 30.9
1029
+ 43.2
1030
+
1031
+ 39.8 57.6 43.1
1032
+ 68.2
1033
+ 31.6
1034
+ 45.2
1035
+
1036
+
1037
+ 40.0 58.0 43.3
1038
+ 68.0
1039
+ 31.1
1040
+ 44.8
1041
+
1042
+
1043
+ 39.9 58.2 43.2
1044
+ 67.7
1045
+ 34.6
1046
+ 45.6
1047
+
1048
+
1049
+ 40.2 58.6 43.5
1050
+ 67.9
1051
+ 33.6
1052
+ 46.1
1053
+ Table 6: Comparison with methods enforcing correlation.
1054
+ Corr. Loss (i) reaches similar results with existing methods
1055
+ on ATSS, (ii) is complementary to those methods thanks to
1056
+ its simple design and (iii) once combined with RS Loss, out-
1057
+ performs compared methods.
1058
+
1059
+ Method
1060
+ Backbone
1061
+ Epochs APC AP50 AP75 APS APM APL
1062
+ Venue
1063
+ NMS-based
1064
+ ATSS (Zhang et al. 2020)
1065
+ ResNet-101-DCN
1066
+ 24
1067
+ 46.3 64.7
1068
+ 50.4 27.7 49.8 58.4
1069
+ CVPR 2020
1070
+ GFLv2 (Li et al. 2019)
1071
+ ResNet-101-DCN
1072
+ 24
1073
+ 48.3 66.5
1074
+ 52.8 28.8 51.9 60.7
1075
+ CVPR 2021
1076
+ aLRP Loss (Oksuz et al. 2020)
1077
+ ResNeXt-101-DCN
1078
+ 100
1079
+ 48.9 69.3
1080
+ 52.5 30.8 51.5 62.1 NeurIPS 2020
1081
+ VFNet (Zhang et al. 2021)
1082
+ ResNet-101-DCN
1083
+ 24
1084
+ 49.2 67.5
1085
+ 53.7 29.7 52.6 62.4
1086
+ CVPR 2021
1087
+ DW (Li et al. 2022)
1088
+ ResNet-101-DCN
1089
+ 24
1090
+ 49.3 67.6
1091
+ 53.3 29.2 52.2 63.5
1092
+ CVPR 2022
1093
+ TOOD (Feng et al. 2021)
1094
+ ResNet-101-DCN
1095
+ 24
1096
+ 49.6 67.4
1097
+ 54.1 30.5 52.7 62.4
1098
+ ICCV 2021
1099
+ RS-Mask R-CNN+ (Oksuz et al. 2021a) ResNeXt-101-DCN
1100
+ 36
1101
+ 50.2 70.3 54.8 31.5 53.5 63.9
1102
+ ICCV 2021
1103
+ NMS-free
1104
+ TSP R-CNN (Sun et al. 2021c)
1105
+ ResNet-101-DCN
1106
+ 96
1107
+ 47.4 66.7
1108
+ 51.9 29.0 49.7 59.1
1109
+ ICCV 2021
1110
+ Sparse R-CNN (Sun et al. 2021b)
1111
+ ResNeXt-101-DCN
1112
+ 36
1113
+ 48.9 68.3
1114
+ 53.4 29.9 50.9 62.4
1115
+ CVPR 2021
1116
+ Dynamic DETR (Dai et al. 2021)
1117
+ ResNeXt-101-DCN
1118
+ 36
1119
+ 49.3 68.4
1120
+ 53.6 30.3 51.6 62.5
1121
+ ICCV 2021
1122
+ Deformable DETR (Zhu et al. 2021)
1123
+ ResNeXt-101-DCN
1124
+ 50
1125
+ 50.1 69.7
1126
+ 54.6 30.6 52.8 64.7
1127
+ ICLR 2021
1128
+ Ours
1129
+ Corr-Sparse R-CNN
1130
+ ResNet-101-DCN
1131
+ 36
1132
+ 49.6 67.8
1133
+ 54.1 29.2 52.3 64.9
1134
+ Corr-Sparse R-CNN
1135
+ ResNeXt-101-DCN
1136
+ 36
1137
+ 51.0 69.2 55.7 31.1 53.7 66.3
1138
+ Table 7: SOTA comparison on COCO test-dev. Our Corr-Sparse R-CNN (i) performs on-par or better compared to recent NMS-
1139
+ based methods, all of which also enforce correlation, and (ii) outperforms NMS-free methods by a notable margin. Results are
1140
+ obtained from papers.
1141
+ QFL (Li et al. 2020) and RS Loss (Oksuz et al. 2021a). Be-
1142
+ sides, owing to its simple usage, Concordance Loss is com-
1143
+ plementary to existing methods: It yields 40.0APC with an
1144
+ aux. head (+0.7 APC) and 40.2APC with RS Loss (+0.3
1145
+ APC) without introducing additional learnable parameters.
1146
+ Correlation Analysis. To provide insight, we report βimg
1147
+ (Eq. 1) and βcls (Eq. 2) in Table 6: Our Concordance Loss (i)
1148
+ improves baseline correlation significantly, enhancing βimg
1149
+ (from 27.3% to 31.6%) and βcls (from 40.3% to 45.2%)
1150
+ both by ∼ 5%, and (ii) results in better correlation than all
1151
+ methods wrt. βimg and βcls once combined with QFL and
1152
+ RS Loss respectively. This set of results confirms that Con-
1153
+ cordance Loss improves correlation between classification
1154
+ and localization tasks in both image-level and class-level.
1155
+ 5.3
1156
+ Comparison with SOTA
1157
+ Here, we prefer Sparse R-CNN owing to its competitive de-
1158
+ tection performance and our large gains. We train our “Corr-
1159
+ Sparse R-CNN” for 36 epochs with DCNv2 (Zhu et al. 2019)
1160
+ and multiscale training by randomly resizing the shorter side
1161
+ within [480, 960] similar to common practice (Oksuz et al.
1162
+ 2021a; Zhang et al. 2021; Sun et al. 2021b). Table 7 presents
1163
+ the results on COCO test-dev (Lin et al. 2014):
1164
+ NMS-based Methods. On the common ResNet-101-
1165
+ DCN backbone and with similar data augmentation, our
1166
+ Corr-Sparse R-CNN yields 49.6APC at 13.7 fps (on a V100
1167
+ GPU) outperforming recent NMS-based methods, all of
1168
+ which also enforce correlation, e.g., (i) RS-R-CNN (Ok-
1169
+ suz et al. 2021a) by 1.8APC, (ii) GFLv2 (Li et al. 2019)
1170
+ by more than 1APC, and (iii) VFNet (Zhang et al. 2021) in
1171
+ terms of not only APC but also efficiency (with 12.6 fps on
1172
+ a V100 GPU). On ResNeXt-101-DCN, our Corr-Sparse R-
1173
+ CNN provides 51.0APC at 6.8 fps, surpassing all methods
1174
+ including RS-Mask R-CNN+ (50.2APC at 6.4 fps), addi-
1175
+ tionally using masks and Carafe FPN (Wang et al. 2019).
1176
+ NMS-free Methods. Our Corr-Sparse R-CNN outper-
1177
+ forms (i) TSP R-CNN (Sun et al. 2021c) by more than
1178
+ 2APC on ResNet-101-DCN with significantly less training,
1179
+ (ii) Sparse R-CNN (Sun et al. 2021b) by ∼ 2APC and De-
1180
+ formable DETR (Zhu et al. 2021), a recent strong NMS-free
1181
+ method, by ∼ 1APC on ResNeXt-101-DCN.
1182
+ 5.4
1183
+ Ablation & Hyper-parameter Analyses
1184
+ Optimizing Different Correlation Coefficients. Spearman
1185
+ Loss yields better localization performance, i.e. the lowest
1186
+ localization error wrt. oLRPLoc in all experiments while
1187
+ it rarely yields the best oLRPFP or oLRPFN, implying its
1188
+ contribution to classification to be weaker than Concordance
1189
+ Loss (see Appendix for components of oLRP). We also
1190
+ tried Pearson Correlation Coefficient on ATSS and Sparse
1191
+ R-CNN but it performed worse compared to either using
1192
+ Spearman or Concordance (Appendix).
1193
+ Backpropagating Through Different Heads. On Sparse
1194
+ R-CNN, we observed that the performance degrades when
1195
+ we backpropagate either only localization head (37.5 AP) or
1196
+ both heads (38.9 AP). Hence, we preferred backpropagating
1197
+ the gradients only through the classification head (39.3 AP).
1198
+ Effect on Training Time. Using Spearman or Concor-
1199
+ dance Loss to train Sparse R-CNN, computing the loss for 6
1200
+ times each iteration, increases iteration time 0.50 sec to 0.51
1201
+ sec on V100 GPUs, suggesting a negligible overhead.
1202
+ Sensitivity to λcorr. We found it sufficient to search over
1203
+ {0.1, 0.2, 0.3.0.4, 0.5, 0.6} to tune λcorr. Appendix presents
1204
+ empirical results for grid search.
1205
+ 5.5
1206
+ Additional Material
1207
+ This paper is accompanied by an Appendix containing (i) the
1208
+ effect of Corr.Loss on Sparse R-CNN using its stronger set-
1209
+ ting, (ii) components of oLRP for detectors in Table 3, (iii)
1210
+ results when Pearson Correlation Coefficient is optimized,
1211
+ (iv) our grid search to tune λcorr.
1212
+ 6
1213
+ Conclusion
1214
+ In this paper, we defined measures to evaluate object detec-
1215
+ tors wrt. correlation, provided analyses on several methods
1216
+ and proposed Correlation Loss as an auxiliary loss function
1217
+ to enforce correlation for object detectors. Our extensive
1218
+ experiments on six detectors show that Correlation Loss.
1219
+ consistently improves the detection and correlation perfor-
1220
+ mances, and reaches SOTA results.
1221
+
1222
+ Acknowledgments
1223
+ This work was supported by the Scientific and Technolog-
1224
+ ical Research Council of Turkey (T ¨UB˙ITAK) (under grant
1225
+ 120E494). We also gratefully acknowledge the computa-
1226
+ tional resources kindly provided by T ¨UB˙ITAK ULAKBIM
1227
+ High Performance and Grid Computing Center (TRUBA)
1228
+ and METU Robotics and Artificial Intelligence Center
1229
+ (ROMER). Dr. Akbas is supported by the “Young Scientist
1230
+ Awards Program (BAGEP)” of Science Academy, Turkey.
1231
+ References
1232
+ Blondel, M.; Teboul, O.; Berthet, Q.; and Djolonga, J. 2020.
1233
+ Fast differentiable sorting and ranking.
1234
+ In International
1235
+ Conference on Machine Learning (ICML).
1236
+ Bolya, D.; Zhou, C.; Xiao, F.; and Lee, Y. J. 2019. YOLACT:
1237
+ Real-time Instance Segmentation.
1238
+ In IEEE/CVF Interna-
1239
+ tional Conference on Computer Vision (ICCV).
1240
+ Carion, N.; Massa, F.; Synnaeve, G.; Usunier, N.; Kirillov,
1241
+ A.; and Zagoruyko, S. 2020. End-to-End Object Detection
1242
+ with Transformers. In European Conference on Computer
1243
+ Vision (ECCV).
1244
+ Chen, K.; Lin, W.; li, J.; See, J.; Wang, J.; and Zou, J. 2020.
1245
+ AP-Loss for Accurate One-Stage Object Detection. IEEE
1246
+ Transactions on Pattern Analysis and Machine Intelligence
1247
+ (TPAMI), 1–1.
1248
+ Chen, K.; Wang, J.; Pang, J.; Cao, Y.; Xiong, Y.; Li, X.; Sun,
1249
+ S.; Feng, W.; Liu, Z.; Xu, J.; Zhang, Z.; Cheng, D.; Zhu, C.;
1250
+ Cheng, T.; Zhao, Q.; Li, B.; Lu, X.; Zhu, R.; Wu, Y.; Dai,
1251
+ J.; Wang, J.; Shi, J.; Ouyang, W.; Loy, C. C.; and Lin, D.
1252
+ 2019. MMDetection: Open MMLab Detection Toolbox and
1253
+ Benchmark. arXiv, 1906.07155.
1254
+ Cordts, M.; Omran, M.; Ramos, S.; Rehfeld, T.; Enzweiler,
1255
+ M.; Benenson, R.; Franke, U.; Roth, S.; and Schiele, B.
1256
+ 2016. The Cityscapes Dataset for Semantic Urban Scene
1257
+ Understanding.
1258
+ In IEEE Conference on Computer Vision
1259
+ and Pattern Recognition (CVPR).
1260
+ Dai, X.; Chen, Y.; Yang, J.; Zhang, P.; Yuan, L.; and Zhang,
1261
+ L. 2021.
1262
+ Dynamic DETR: End-to-End Object Detection
1263
+ With Dynamic Attention. In IEEE/CVF International Con-
1264
+ ference on Computer Vision (ICCV).
1265
+ Feng, C.; Zhong, Y.; Gao, Y.; Scott, M. R.; and Huang, W.
1266
+ 2021. TOOD: Task-aligned One-stage Object Detection. In
1267
+ The International Conference on Computer Vision (ICCV).
1268
+ He, K.; Gkioxari, G.; Dollar, P.; and Girshick, R. 2017. Mask
1269
+ R-CNN. In IEEE/CVF International Conference on Com-
1270
+ puter Vision (ICCV).
1271
+ He, Y.; Zhu, C.; Wang, J.; Savvides, M.; and Zhang, X. 2019.
1272
+ Bounding Box Regression With Uncertainty for Accurate
1273
+ Object Detection. In IEEE/CVF Conference on Computer
1274
+ Vision and Pattern Recognition (CVPR).
1275
+ Huang, Z.; Huang, L.; Gong, Y.; Huang, C.; and Wang, X.
1276
+ 2019. Mask Scoring R-CNN. In IEEE/CVF Conference on
1277
+ Computer Vision and Pattern Recognition (CVPR).
1278
+ Jiang, B.; Luo, R.; Mao, J.; Xiao, T.; and Jiang, Y. 2018.
1279
+ Acquisition of Localization Confidence for Accurate Object
1280
+ Detection. In The European Conference on Computer Vision
1281
+ (ECCV).
1282
+ Kim, K.; and Lee, H. S. 2020.
1283
+ Probabilistic Anchor As-
1284
+ signment with IoU Prediction for Object Detection. In The
1285
+ European Conference on Computer Vision (ECCV).
1286
+ Kong, T.; Sun, F.; Liu, H.; Jiang, Y.; Li, L.; and Shi, J. 2020.
1287
+ FoveaBox: Beyound Anchor-Based Object Detection. IEEE
1288
+ Transactions on Image Processing, 29: 7389–7398.
1289
+ Law, H.; and Deng, J. 2018. CornerNet: Detecting Objects
1290
+ as Paired Keypoints. In The European Conference on Com-
1291
+ puter Vision (ECCV).
1292
+ Li, S.; He, C.; Li, R.; and Zhang, L. 2022. A Dual Weight-
1293
+ ing Label Assignment Scheme for Object Detection.
1294
+ In
1295
+ IEEE/CVF Conference on Computer Vision and Pattern
1296
+ Recognition (CVPR).
1297
+ Li, X.; Wang, W.; Hu, X.; Li, J.; Tang, J.; and Yang, J.
1298
+ 2019. Generalized Focal Loss V2: Learning Reliable Lo-
1299
+ calization Quality Estimation for Dense Object Detection.
1300
+ In IEEE/CVF Conference on Computer Vision and Pattern
1301
+ Recognition (CVPR).
1302
+ Li, X.; Wang, W.; Wu, L.; Chen, S.; Hu, X.; Li, J.; Tang,
1303
+ J.; and Yang, J. 2020.
1304
+ Generalized Focal Loss: Learning
1305
+ Qualified and Distributed Bounding Boxes for Dense Object
1306
+ Detection. In Advances in Neural Information Processing
1307
+ Systems (NeurIPS).
1308
+ Lin, T.; Doll´ar, P.; Girshick, R. B.; He, K.; Hariharan, B.;
1309
+ and Belongie, S. J. 2017. Feature Pyramid Networks for
1310
+ Object Detection. In IEEE/CVF Conference on Computer
1311
+ Vision and Pattern Recognition (CVPR).
1312
+ Lin, T.-Y.; Goyal, P.; Girshick, R.; He, K.; and Doll´ar, P.
1313
+ 2020.
1314
+ Focal Loss for Dense Object Detection.
1315
+ IEEE
1316
+ Transactions on Pattern Analysis and Machine Intelligence
1317
+ (TPAMI), 42(2): 318–327.
1318
+ Lin, T.-Y.; Maire, M.; Belongie, S.; Hays, J.; Perona, P.; Ra-
1319
+ manan, D.; Doll´ar, P.; and Zitnick, C. L. 2014. Microsoft
1320
+ COCO: Common Objects in Context. In The European Con-
1321
+ ference on Computer Vision (ECCV).
1322
+ Liu, J.; Li, D.; Zheng, R.; Tian, L.; and Shan, Y. 2021.
1323
+ RankDetNet: Delving Into Ranking Constraints for Object
1324
+ Detection. In IEEE/CVF Conference on Computer Vision
1325
+ and Pattern Recognition (CVPR), 264–273.
1326
+ Oksuz, K.; Cam, B. C.; Akbas, E.; and Kalkan, S. 2020. A
1327
+ Ranking-based, Balanced Loss Function Unifying Classifi-
1328
+ cation and Localisation in Object Detection. In Advances in
1329
+ Neural Information Processing Systems (NeurIPS).
1330
+ Oksuz, K.; Cam, B. C.; Akbas, E.; and Kalkan, S. 2021a.
1331
+ Rank & Sort Loss for Object Detection and Instance Seg-
1332
+ mentation. In The International Conference on Computer
1333
+ Vision (ICCV).
1334
+ Oksuz, K.; Cam, B. C.; Kalkan, S.; and Akbas, E. 2021b.
1335
+ One Metric to Measure them All: Localisation Recall Pre-
1336
+ cision (LRP) for Evaluating Visual Detection Tasks. IEEE
1337
+ Transactions on Pattern Analysis and Machine Intelligence,
1338
+ 1–1.
1339
+ Ren, S.; He, K.; Girshick, R.; and Sun, J. 2017.
1340
+ Faster
1341
+ R-CNN: Towards Real-Time Object Detection with Region
1342
+ Proposal Networks. IEEE Transactions on Pattern Analysis
1343
+ and Machine Intelligence (TPAMI), 39(6): 1137–1149.
1344
+
1345
+ Roh, B.; Shin, J.; Shin, W.; and Kim, S. 2022. Sparse DETR:
1346
+ Efficient End-to-End Object Detection with Learnable Spar-
1347
+ sity. In The International Conference on Learning Repre-
1348
+ sentations (ICLR).
1349
+ Sun, P.; Jiang, Y.; Xie, E.; Shao, W.; Yuan, Z.; Wang, C.;
1350
+ and Luo, P. 2021a. What Makes for End-to-End Object De-
1351
+ tection? In International Conference on Machine Learning
1352
+ (ICML).
1353
+ Sun, P.; Zhang, R.; Jiang, Y.; Kong, T.; Xu, C.; Zhan, W.;
1354
+ Tomizuka, M.; Li, L.; Yuan, Z.; Wang, C.; and Luo, P.
1355
+ 2021b. SparseR-CNN: End-to-End Object Detection with
1356
+ Learnable Proposals.
1357
+ In IEEE/CVF Conference on Com-
1358
+ puter Vision and Pattern Recognition (CVPR).
1359
+ Sun, Z.; Cao, S.; Yang, Y.; and Kitani, K. M. 2021c. Re-
1360
+ thinking Transformer-Based Set Prediction for Object De-
1361
+ tection. In IEEE/CVF International Conference on Com-
1362
+ puter Vision (ICCV).
1363
+ Tian, Z.; Shen, C.; Chen, H.; and He, T. 2019. FCOS: Fully
1364
+ Convolutional One-Stage Object Detection. In IEEE/CVF
1365
+ International Conference on Computer Vision (ICCV).
1366
+ Wang, J.; Chen, K.; Xu, R.; Liu, Z.; Loy, C. C.; and Lin, D.
1367
+ 2019. CARAFE: Content-Aware ReAssembly of FEatures.
1368
+ In IEEE/CVF International Conference on Computer Vision
1369
+ (ICCV).
1370
+ Zhang, H.; Wang, Y.; Dayoub, F.; and S¨underhauf, N. 2021.
1371
+ VarifocalNet: An IoU-aware Dense Object Detector.
1372
+ In
1373
+ IEEE/CVF Conference on Computer Vision and Pattern
1374
+ Recognition (CVPR).
1375
+ Zhang, S.; Chi, C.; Yao, Y.; Lei, Z.; and Li, S. Z. 2020.
1376
+ Bridging the Gap Between Anchor-Based and Anchor-Free
1377
+ Detection via Adaptive Training Sample Selection.
1378
+ In
1379
+ IEEE/CVF Conference on Computer Vision and Pattern
1380
+ Recognition (CVPR).
1381
+ Zhang, X.; Wan, F.; Liu, C.; Ji, R.; and Ye, Q. 2019. FreeAn-
1382
+ chor: Learning to Match Anchors for Visual Object Detec-
1383
+ tion. In Advances in Neural Information Processing Systems
1384
+ (NeurIPS).
1385
+ Zhu, X.; Hu, H.; Lin, S.; and Dai, J. 2019. Deformable Con-
1386
+ vNets V2: More Deformable, Better Results. In IEEE/CVF
1387
+ Conference on Computer Vision and Pattern Recognition
1388
+ (CVPR).
1389
+ Zhu, X.; Su, W.; Lu, L.; Li, B.; Wang, X.; and Dai, J. 2021.
1390
+ Deformable {DETR}: Deformable Transformers for End-
1391
+ to-End Object Detection. In International Conference on
1392
+ Learning Representations (ICLR).
1393
+ APPENDIX
1394
+ Sensitivity to λcorr. In Table A.8, we see that (i) λcorr =
1395
+ 0.2 provides the best performance overall, (ii) the perfor-
1396
+ mance is not very sensitive to λcorr and (iii) a grid search
1397
+ over {0.1, 0.2, 0.3.0.4, 0.5, 0.6} is sufficient (outside of this
1398
+ range, performance drops).
1399
+ The effect of Corr.Loss on Sparse R-CNN using its
1400
+ stronger setting. Following Sun et al. (Sun et al. 2021b)
1401
+ Method
1402
+ Dataset
1403
+ 0.0
1404
+ 0.1
1405
+ 0.2
1406
+ 0.3
1407
+ 0.4
1408
+ 0.5
1409
+ 0.6
1410
+ ATSS
1411
+ COCO
1412
+ 38.7 38.8 39.3 39.8 39.7 39.7 39.6
1413
+ YOLACT
1414
+ COCO
1415
+ 28.3 28.6 28.8 28.8 29.0 28.8 28.6
1416
+ Sparse R-CNN
1417
+ COCO
1418
+ 37.7 38.7 39.3 39.1 39.0 38.1 38.0
1419
+ Sparse R-CNN Cityscapes 39.0 39.0 38.3 39.9 40.0 40.0 40.8
1420
+ Table A.8: Grid search to tune λcorr on different models.
1421
+ We present the results for concordance correlation coeffi-
1422
+ cient for ATSS and YOLACT, and spearman correlation co-
1423
+ efficient for Sparse R-CNN models. 0.0 corresponds to not
1424
+ including our Correlation Loss.
1425
+ Method
1426
+ AP
1427
+ AP50
1428
+ AP75
1429
+ Sparse R-CNN (Sun et al. 2021b)
1430
+ 45.0
1431
+ 64.1
1432
+ 48.9
1433
+ w. Conc.Corr (Ours)
1434
+ 45.5
1435
+ 64.4
1436
+ 49.7
1437
+ w. Spear.Corr (Ours)
1438
+ 46.1
1439
+ 64.0
1440
+ 50.4
1441
+ Table A.9: Comparison with stronger Sparse R-CNN.
1442
+ (Table A.9), we train Sparse R-CNN with 36 epochs train-
1443
+ ing, 300 proposals, multi-scale training and random crop-
1444
+ ping. Table A.9 presents that the improvement of our Spear-
1445
+ man Loss on this strong baseline is ∼ 1 AP points.
1446
+ Using Pearson Correlation Coefficient. We tried opti-
1447
+ mizing pearson correlation coefficient as well and observed
1448
+ that while it has similar performance with concordance cor-
1449
+ relation coefficient on ATSS and spearman correlation coef-
1450
+ ficient on Sparse R-CNN, it does not outperform the other
1451
+ two in both of the cases (Table A.10). Considering the sim-
1452
+ ilarities of spearman and concordance correlation coeffi-
1453
+ cients in terms of scoring the relation of the values, we
1454
+ preferred concordance correlation coefficient over spearman
1455
+ correlation coefficient due to the fact that concordance corre-
1456
+ lation coefficient enforces the scores to be equal to the IoUs
1457
+ imposing a tighter constraint than pearson correlation coef-
1458
+ ficient.
1459
+ The components of oLRP. Table A.11 shows the the
1460
+ components of oLRP for different detectors corresponding
1461
+ to Table 3 in the paper. As discussed in the paper, Spearman
1462
+ Loss yields better localization performance, i.e. the lowest
1463
+ localization error wrt. oLRPLoc in all experiments while
1464
+ it rarely yields the best oLRPFP or oLRPFN, implying its
1465
+ contribution to classification to be weaker than Concordance
1466
+ Loss.
1467
+ Method
1468
+ APC
1469
+ AP50
1470
+ AP75
1471
+ ATSS w/o aux head
1472
+ 38.7
1473
+ 57.6
1474
+ 41.5
1475
+ w. Pearson Corr
1476
+ 39.4
1477
+ 56.6
1478
+ 42.7
1479
+ w. Conc.Corr
1480
+ 39.8
1481
+ 57.9
1482
+ 43.2
1483
+ w. Spear.Corr
1484
+ 39.3
1485
+ 56.6
1486
+ 42.5
1487
+ Sparse-RCNN
1488
+ 37.7
1489
+ 55.9
1490
+ 40.5
1491
+ w. Pearson Corr
1492
+ 39.3
1493
+ 56.6
1494
+ 42.2
1495
+ w. Conc.Corr
1496
+ 38.9
1497
+ 57.2
1498
+ 41.8
1499
+ w. Spear.Corr
1500
+ 39.3
1501
+ 56.7
1502
+ 42.5
1503
+ Table A.10: Effect of using Pearson correlation coefficient.
1504
+
1505
+ Method
1506
+ APC ↑
1507
+ AP50 ↑
1508
+ AP75 ↑
1509
+ oLRP ↓
1510
+ oLRPLoc ↓
1511
+ oLRPFP ↓
1512
+ oLRPFN ↓
1513
+ Retina Net (Lin et al. 2020)
1514
+ 36.5
1515
+ 55.4
1516
+ 39.1
1517
+ 70.7
1518
+ 16.8
1519
+ 32.0
1520
+ 48.1
1521
+ w. Conc.Corr (Ours)
1522
+ 37.0
1523
+ 55.7
1524
+ 39.7
1525
+ 70.2
1526
+ 16.3
1527
+ 30.8
1528
+ 49.3
1529
+ w. Spear.Corr (Ours)
1530
+ 37.5
1531
+ 55.4
1532
+ 40.5
1533
+ 69.7
1534
+ 16.0
1535
+ 31.3
1536
+ 48.4
1537
+ Fovea Box (Kong et al. 2020)
1538
+ 36.4
1539
+ 56.5
1540
+ 38.6
1541
+ 70.2
1542
+ 17.0
1543
+ 30.2
1544
+ 47.2
1545
+ w. Conc.Corr (Ours)
1546
+ 37.1
1547
+ 56.4
1548
+ 39.6
1549
+ 69.7
1550
+ 16.6
1551
+ 28.6
1552
+ 48.1
1553
+ w. Spear.Corr (Ours)
1554
+ 37.0
1555
+ 55.6
1556
+ 39.3
1557
+ 70.0
1558
+ 16.3
1559
+ 31.0
1560
+ 47.9
1561
+ ATSS (Zhang et al. 2020)
1562
+ 38.7
1563
+ 57.6
1564
+ 41.5
1565
+ 69.0
1566
+ 16.0
1567
+ 29.1
1568
+ 47.0
1569
+ w. Conc.Corr (Ours)
1570
+ 39.8
1571
+ 57.9
1572
+ 43.2
1573
+ 68.2
1574
+ 15.4
1575
+ 29.1
1576
+ 46.9
1577
+ w. Spear.Corr (Ours)
1578
+ 39.3
1579
+ 56.6
1580
+ 42.5
1581
+ 68.7
1582
+ 15.2
1583
+ 31.2
1584
+ 46.7
1585
+ PAA (Kim and Lee 2020)
1586
+ 39.9
1587
+ 57.3
1588
+ 43.4
1589
+ 68.6
1590
+ 15.0
1591
+ 30.4
1592
+ 47.0
1593
+ w. Conc.Corr (Ours)
1594
+ 40.7
1595
+ 58.8
1596
+ 44.3
1597
+ 67.7
1598
+ 15.2
1599
+ 28.5
1600
+ 46.3
1601
+ w. Spear.Corr (Ours)
1602
+ 40.4
1603
+ 58.0
1604
+ 43.7
1605
+ 67.8
1606
+ 14.9
1607
+ 29.5
1608
+ 46.6
1609
+ Sparse R-CNN (Sun et al. 2021b)
1610
+ 37.7
1611
+ 55.8
1612
+ 40.5
1613
+ 69.5
1614
+ 16.0
1615
+ 28.7
1616
+ 48.6
1617
+ w. Conc.Corr (Ours)
1618
+ 38.9
1619
+ 57.2
1620
+ 41.8
1621
+ 68.1
1622
+ 15.7
1623
+ 27.7
1624
+ 47.2
1625
+ w. Spear.Corr (Ours)
1626
+ 39.3
1627
+ 56.7
1628
+ 42.5
1629
+ 68.3
1630
+ 15.3
1631
+ 27.1
1632
+ 48.4
1633
+ Table A.11: Components of oLRP for Table 3 in the paper.
1634
+
ANAzT4oBgHgl3EQfF_sv/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
C9E4T4oBgHgl3EQfeg2i/content/2301.05100v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2315b830b28b5c47116d756a306214101dba73696760abae3ef68f6fd3e5584a
3
+ size 5940510
DNAzT4oBgHgl3EQfiP3j/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:802ea24c40de8f9517d4c89c3bb8656995a34bcafe3d568bde8e54fb5abe6ef4
3
+ size 8126509
DNFKT4oBgHgl3EQfYy5L/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:600f2717cbaed8b2ebfaf2913de01454fa6d5cdafacd722aa7ac86267cc94060
3
+ size 3932205
DdAzT4oBgHgl3EQfT_w7/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd648100557961d83ab50843c3e3aa31a8c26cd7d8c74767e813791734773d05
3
+ size 77544
DdE4T4oBgHgl3EQfew2P/content/2301.05102v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f245a9035aa712174223557591f0de9c53ae3a80d4d0026f467feeb7eb05d9bf
3
+ size 1211441
DdE4T4oBgHgl3EQfew2P/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89a664f7bc35d11f4f84b28542c1db60c649dcec0256770b7cd741d117cad5ce
3
+ size 4194349
DdE4T4oBgHgl3EQfew2P/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5568114cd292df5de8aee2dee91aa78c764bdaa186e75752da0b934228986945
3
+ size 150097
FNFQT4oBgHgl3EQfRTbc/content/tmp_files/2301.13286v1.pdf.txt ADDED
@@ -0,0 +1,2283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.13286v1 [math.RT] 30 Jan 2023
2
+ THE SEMI-INFINITE COHOMOLOGY OF WEYL MODULES
3
+ WITH TWO SINGULAR POINTS
4
+ GIORGIA FORTUNA, DAVIDE LOMBARDO,
5
+ ANDREA MAFFEI, VALERIO MELANI
6
+ Abstract. In their study of spherical representations of an affine Lie algebra
7
+ at the critical level and of unramified opers, Frenkel and Gaitsgory introduced
8
+ what they called the Weyl module Vλ corresponding to a dominant weight
9
+ λ. This object plays an important role in the theory. In [4], we introduced a
10
+ possible analogue Vλ,µ
11
+ 2
12
+ of the Weyl module in the setting of opers with two
13
+ singular points, and in the case of sl(2) we proved that it has the ‘correct’
14
+ endomorphism ring. In this paper, we compute the semi-infinite cohomology
15
+ of Vλ,µ
16
+ 2
17
+ and we show that it does not share some of the properties of the semi-
18
+ infinite cohomology of the Weyl module of Frenkel and Gaitsgory. For this
19
+ reason, we introduce a new module ˜Vλ,µ
20
+ 2
21
+ which, in the case of sl(2), enjoys all
22
+ the expected properties of a Weyl module.
23
+ 1. Introduction
24
+ Let g be a complex simple Lie algebra and let ˆg be its affinization. Choose a
25
+ Borel subalgebra and a maximal toral subalgebra, and let G be a simply connected
26
+ algebraic group with Lie algebra equal to g. As a particular case of a more general
27
+ conjecture, Frenkel and Gaitsgory proved in [6] that the semi-infinite cohomology
28
+ gives an isomorphism between the category ˆgcrit-modJG of spherical representations
29
+ of ˆg at the critical level (that is, representations of ˆg at the critical level with a
30
+ compatible action of JG = G(C[[t]])) and the category of quasi-coherent sheaves
31
+ on the space of unramified opers Opunr
32
+ 1
33
+ over gL, the Langlands dual of g.
34
+ As
35
+ they explain, the space of unramified opers is the disjoint union of its connected
36
+ components Opλ,unr
37
+ 1
38
+ , and the category of spherical representations is the product of
39
+ certain subcategories ˆgcrit-modJG,λ, where in both cases λ ranges over all dominant
40
+ weights of G. The equivalence given by semi-infinite cohomology specialises to an
41
+ equivalence between ˆgcrit-modJG,λ and the category of quasi-coherent sheaves over
42
+ Opλ,unr
43
+ 1
44
+ . The space Opλ,unr
45
+ 1
46
+ is a non-reduced indscheme, and its reduced version,
47
+ denoted by Opλ
48
+ 1, is an affine scheme.
49
+ In this paper we will denote by Zλ
50
+ 1 its
51
+ coordinate ring.
52
+ In this theory, an important role is played by the Weyl module Vλ
53
+ 1. This module
54
+ enjoys the following fundamental properties:
55
+ Endˆg(Vλ
56
+ 1) ≃ Zλ
57
+ 1
58
+ and
59
+ Ψ0(Vλ
60
+ 1) ≃ Zλ
61
+ 1 ,
62
+ where Ψn is the n-th semi-infinite cohomology group. Moreover the semi-infinite
63
+ cohomology groups Ψn(Vλ
64
+ 1) are trivial for n ̸= 0.
65
+ Dennis Gaitsgory suggested to Giorgia Fortuna to study the space of unramified
66
+ opers and spherical representations in a more general context, see [3]; in fact, the
67
+ definition of unramified opers as well as the definition of spherical representations
68
+ can be generalized in the presence of more than one singularity, raising the question
69
+ on whether or not certain statements remain true and what happens when these
70
+ singularities collide.
71
+ 1
72
+
73
+ 2
74
+ FORTUNA, LOMBARDO, MAFFEI, MELANI
75
+ In [4] we took some steps in this direction, by studying the case of sl(2). In
76
+ particular, we introduced a version of the Weyl module Vλ,µ
77
+ 2
78
+ of critical level of the
79
+ affine Lie algebra with two singularities ˆg2. Thinking of t as a coordinate near the
80
+ first singularity and s as a coordinate near the second singularity, this is the version
81
+ of the affine Lie algebra over the ring A = C[[a]], where a = (t−s). As an A module
82
+ is equal to K2 ⊗C ⊕AC2 where K2 = C[[a, t]][1/t(t− a)] and C2 is a central element
83
+ (see [4], Section 3.3 for the complete definition).
84
+ We also introduced reduced scheme over A of unramified opers Opλ,µ
85
+ 2
86
+ which gener-
87
+ alize the schemes Opλ
88
+ 1. Both objects depend on two integral dominant weights λ,
89
+ µ of G, and we proved that
90
+ Endˆg2(Vλ,µ
91
+ 2
92
+ ) ≃ Zλ,µ
93
+ 2
94
+ ,
95
+ where Zλ,µ
96
+ 2
97
+ is the coordinate ring of Opλ,µ
98
+ 2
99
+ .
100
+ In this article we study the semi-infinite cohomology of Vλ,µ
101
+ 2
102
+ in order to un-
103
+ derstand what relation it has with the ring Zλ,µ
104
+ 2
105
+ in order to understand how the
106
+ equivalence Ψ0(Vλ
107
+ 1) ≃ Zλ
108
+ 1 generalizes.
109
+ This is done in Section 4, where we compute the cohomology of Vλ,µ
110
+ 2
111
+ ; in Section 5
112
+ we study the action of Z2, the center of a completion ˆU2 of the enveloping algebra
113
+ of ˆg2 at the critical level on this module (see Section 2.2).
114
+ In particular, we prove that the specialisation at a = 0 and the localization at
115
+ a ̸= 0 of the semi-infinite cohomology of Vλ,µ
116
+ 2
117
+ are isomorphic to the specialisation
118
+ and localization of Zλ,µ
119
+ 2
120
+ , respectively. However, in contrast to our intuition, we also
121
+ show the following result which says that Ψ0(Vλ,µ
122
+ 2
123
+ ) doesn’t exactly generalize the
124
+ equivalence Ψ0(Vλ
125
+ 1) ≃ Zλ
126
+ 1 as expected:
127
+ Theorem A (Theorem 4.9 and Proposition 5.3). We have Ψn(Vλ,µ
128
+ 2
129
+ ) = 0 for n ̸= 0.
130
+ Moreover, Ψ0(Vλ,µ
131
+ 2
132
+ ) is not isomorphic to Zλ,µ
133
+ 2
134
+ as a Z2-module.
135
+ For this computation, we rely on the formalism introduced by Casarin in [1],
136
+ which makes it possible to use vertex algebras also in the context of opers with
137
+ two singularities. Once this formalism is in place, for the computation of the semi-
138
+ infinite cohomology we can follow closely the approach taken by Frenkel and Ben
139
+ Zvi in [5, Chapter 15] for the case of one singularity.
140
+ In the last section, we restrict our attention to the Lie algebra sl(2) and introduce
141
+ a submodule �Vλ,µ
142
+ 2
143
+ of Vλ,µ
144
+ 2
145
+ , which is generated by the highest weight vector. We
146
+ prove that this module is the correct one to consider, in the sense that it has the
147
+ expected cohomology groups and endomorphism ring, as the following result shows.
148
+ Theorem B (Proposition 6.3, Theorem 6.5 and Proposition 6.6). If g = sl(2) then
149
+ we have Ψn(�Vλ,µ
150
+ 2
151
+ ) = 0 for n ̸= 0. Moreover, we have
152
+ Endˆg2(�Vλ,µ
153
+ 2
154
+ ) ≃ Zλ,µ
155
+ 2
156
+ and
157
+ Ψ0(�Vλ,µ
158
+ 2
159
+ ) ≃ Zλ,µ
160
+ 2
161
+ .
162
+ We now briefly explain the connection between these results and Conjecture 3.6.1
163
+ in Fortuna’s Thesis [3]. As a particular case the conjecture predicts an equivalence
164
+ between quasi-coherent sheaves over the space of unramified opers with two singu-
165
+ larities and the category of spherical representations over ˆg2: that is the space of
166
+ smooth representations of ˆg2 with a compatible action of J2G = G(C[[a, t]]).
167
+ The conjecture stated in [3] predicts an equivalence of similar categories not only
168
+ in the presence of two singularities but in the presence of n-possible singularities.
169
+ In particular for any finite set with n elements I we can define the space of opers
170
+ on the formal disc with n-singularities OpI and the subspace of unramified opers
171
+ Opunr
172
+ I
173
+ (see Section 3.5 in [3]). These are spaces over the product of n-copies of
174
+ the formal disc. These are easily seen to be factorization spaces, which means that
175
+
176
+ SEMI-INFINITE COHOMOLOGY OF WEYL MODULES
177
+ 3
178
+ this spaces specialise nicely when restricted along or outside the diagonals of this
179
+ product (see Section 3.1.5 in [3]). There are not substantial differences between
180
+ the treatment we do here or in [4] of Op2 and the general case. The only minor
181
+ difference is that we fix a singularity to be 0. These spaces are indschemes, and
182
+ so we can define the categories QCoh(OpI), and QCoh(Opunr
183
+ I
184
+ ) of quasi-coherent
185
+ sheaves on OpI and Opur
186
+ I
187
+ (see Section 3.5.3 in [3] for the actual definition), and the
188
+ nice factorization properties which make them factorization categories (see Section
189
+ 3.1.2 in [3]).
190
+ Similarly, for a finite set I we can define a Lie algebra ˆgI and study its smooth
191
+ representations at the critical level. The objects constructed in this way live also
192
+ on the product of n copies of the formal disc, and they also have nice factorization
193
+ properties, in particular the collection of (completions of the) enveloping algebras
194
+ specialized at the critical level ˆUI of the algebras ˆgI, is what is called a factorization
195
+ algebra (see Section 3.1.3 in [3]). As a conseguence the collection of the categories of
196
+ smooth representations at the critical of the Lie algebras ˆgI, denoted by ˆgI,crit-mod
197
+ and their subcategories of spherical representations ˆgI,crit-modJG can be organized
198
+ also in a factorization category. The semi-infinite cohomology can be defined also
199
+ in this generality and defines a functor
200
+ ΨI : ˆgI,crit-mod −→ D(QCoh(OpI))
201
+ compatible with the factorization properties. While in Fortuna’s thesis all these
202
+ constructions are obtained somehow for free using the language of chiral algebras
203
+ (see Section 3.1.6 in [3]), in this paper we use the language of vertex algebras and
204
+ the formalism introduced by Casarin [1]. Let us notice that, from this point of
205
+ view, there are no differences in treating the case with two singular points and the
206
+ case with an arbitrary finite number of singular points. For example, the proof of
207
+ Theorem A above can be repeated verbatim in the case of n singular points. More
208
+ generally we believe that all the technical difficulties in the study of this problem
209
+ already appear in the case of two singularities.
210
+ It is easy to see from the factorization properties and the analogous statement
211
+ for the case of one singularty by Frenkel and Gaitsgory (see [7]) that the semi-
212
+ infinite cohomology of a ˆgI-spherical module is supported on Opunr
213
+ I
214
+ . Hence semi-
215
+ infinite cohomology restricts to a functor ΨI : ˆgI,crit-modJG −→ D(QCoh(Opunr
216
+ I
217
+ )).
218
+ Conjecture 3.6.1 in [3] states that this functor is exact and
219
+ Ψ0
220
+ I : ˆgI,crit-modJG −→ QCoh(Opunr
221
+ I
222
+ )
223
+ is an equivalence of categories. In Appendix A.2 of [3], one possible strategy to
224
+ prove this conjecture is sketched, using the factorization structure and the result
225
+ proved in the case of one singularity to deduce the general case. In particular,
226
+ thanks to Proposition A.2.2 and Proposition A.2.3 of [3] and Theorems A and B
227
+ above, a more careful study of the modules Vλ,µ
228
+ 2
229
+ or �Vλ,µ
230
+ 2
231
+ might help in finding a
232
+ proof of [3, Conjecture 3.6.1], in the case of g = sl(2).
233
+ The paper is organized as follows. In the first section we recall some definitions
234
+ from [4]. In Section 3 we recall the formalism introduced by Casarin [1] and we
235
+ use it to define semi-infinite cohomology and prove some of its basic properties. In
236
+ Sections 3 and 4 we compute the semi-infinite cohomology of Vλ,µ
237
+ 2
238
+ and in Section
239
+ 5 we compute the semi-infinite cohomology of �Vλ,µ
240
+ 2
241
+ .
242
+ We thank Luca Casarin for many useful discussions and in particular for explain-
243
+ ing to us the formalism introduced in [1]. It seems to us that Casarin’s approach
244
+ provides a natural framework to treat questions concerning opers with several singu-
245
+ larities, making the theory much more transparent than it was in [4]. In particular,
246
+
247
+ 4
248
+ FORTUNA, LOMBARDO, MAFFEI, MELANI
249
+ the results of [1] allowed us to streamline several arguments and calculations which
250
+ would have been quite hard to carry out using the direct approach of [4].
251
+ 2. Basic constructions
252
+ In this section we recall some basic constructions from [4], to which we refer
253
+ for further details, and we introduce the notion of semi-infinite cohomology in the
254
+ context of affine Lie algebras with more than one singular point.
255
+ 2.1. Rings. We follow [4, Section 1], to which the reader is referred for more details.
256
+ We introduce the rings
257
+ A = C[[a]],
258
+ Q = C((a)),
259
+ R2 = C[[t, s]],
260
+ K2 = C[[t, s]][1/ts],
261
+ where a = t − s. Recall that we have expansion maps (given by suitable natural
262
+ inclusions) and a specialisation map (which sends a to 0 and t, s to t, see Section
263
+ 1.1 in [4])
264
+ Et : K2[a−1] −→ Q((t)),
265
+ Es : K2[a−1] −→ Q((s)),
266
+ Sp : K2 −→ C((t)).
267
+ We also write E = Et × Es : K2[a−1] −→ Q((t)) × Q((s)). Recall from [4, Section
268
+ 1.1] that Sp induces an isomorphism K2/(a) ≃ C((t)). These rings have natural
269
+ topologies: with respect to these, the image of E is dense, and E(R2[a−1]) is dense
270
+ in Q((t)) × Q((s)).
271
+ These rings are also equipped with residue maps
272
+ Res2 : K2 → A
273
+ Res1 : C((t)) → C,
274
+ Rest : Q((t)) → Q,
275
+ Ress : Q((s)) → Q,
276
+ which behave nicely with respect to specialisation and expansion (see [4, Section
277
+ 1.2]). Finally, we recall Lemma 1.10 in [4].
278
+ Lemma 2.1 ([4], Lemma 1.10). Let M, N be two A-modules and ϕ : M −→ N be
279
+ a morphism of A-modules. Then
280
+ a) if M is flat and ϕa : M[a−1] −→ N[a−1] is injective, then ϕ is injective.
281
+ b) if N is flat, ϕa : M[a−1] −→ N[a−1] is surjective, and ϕ : M/aM −→ N/aN is
282
+ injective, then ϕ is surjective.
283
+ In particular, if M and N are flat, ϕa : M[a−1] −→ N[a−1] is an isomorphism,
284
+ and ϕ : M/aM −→ N/aN is injective, then ϕ is an isomorphism.
285
+ 2.2. Affine Lie algebras and completion of the enveloping algebra. We
286
+ follow [4, Section 3]. Let g be a finite-dimensional Lie algebra over the complex
287
+ numbers and denote by κ the Killing form of g. Recall from [4, Sections 3.1 and 3.3]
288
+ that for each of the rings of the previous section we introduce an affine Lie algebra:
289
+ ˆg1 is the usual affine Lie algebra (we take for convenience the version defined by
290
+ Laurent polynomial and not Laurent series), ˆgt and ˆgs are also versions of the the
291
+ usual affine Lie algebra, while ˆg2 is an A-Lie algebra having as underlying A-module
292
+ the space
293
+ ˆg2 = C[t, s][1/ts] ⊗C g ⊕ A C2.
294
+ We also introduce the Lie algebra ˆgt,s = ˆgt ⊕ ˆgs/(Ct − Cs) (see [4, Section 3.3]).
295
+ For each of these Lie algebras, we introduce the corresponding universal envel-
296
+ oping algebra, which we suitably complete and then specialize at the critical level
297
+ by imposing that the central element acts as −1/2 (see Sections 3.1 and 3.3 in [4]).
298
+ In particular
299
+ ˆU2 = lim
300
+ ←−
301
+ n
302
+ U(ˆg2)
303
+ (C2 = −1/2, tnsnC[t, s] ⊗ g)left.id.
304
+ Recall from [4, Section 3.4] that the expansion maps and the specialisation maps
305
+ induce morphisms at the level of Lie algebras.
306
+ In particular, the specialisation
307
+ map Sp : ˆU2 −→ ˆU1 induces an isomorphism between ˆU2/a ˆU2 and ˆU1, while the
308
+
309
+ SEMI-INFINITE COHOMOLOGY OF WEYL MODULES
310
+ 5
311
+ expansion map induces a morphism E : ˆU2[a−1] −→ ˆUt,s which is injective and has
312
+ dense image.
313
+ Moreover, the natural inclusions ˆgt ֒→ ˆgt,s and ˆgs ֒→ ˆgt,s induce a morphism
314
+ ˆUt ⊗ ˆUs −→ ˆUt,s
315
+ which is also injective and with dense image (see [4, Section 3.3]).
316
+ 2.3. Weyl modules. We follow [4, Section 6]. We choose a Borel subalgebra and
317
+ a maximal toral subalgebra of g, which we denote by b and t respectively. This
318
+ data induces a choice of weights, integral weights and dominant weights. For every
319
+ integral dominant weight λ, [7] introduced the Weyl module Vλ
320
+ 1 over the affine Lie
321
+ algebra ˆg1. The representation V = V0
322
+ 1, which has a structure of vertex algebra, will
323
+ play a particularly important role for us. This vertex algebra enjoys the following
324
+ universal property.
325
+ Lemma 2.2. Let U be a vertex algebra such that there exists a linear map x �→ ux
326
+ from g to U such that
327
+ (ux)(0)(uy) = u[x,y]
328
+ (ux)(1)(uy) = −1
329
+ 2κ(x, y)|0⟩U
330
+ (ux)(n)(uy) = 0
331
+ for all n ⩾ 2. There exists a unique morphism of vertex algebras α : V → U such
332
+ that α(xt−1|0⟩V) = ux for all x ∈ g.
333
+ Weyl modules Vλ
334
+ t and Vλ
335
+ s can also be defined for the Lie algebras ˆgt and ˆgs,
336
+ without any significant change from [7]. In [4], we introduced a generalization of
337
+ these modules. Given two dominant weights λ, µ, we consider the irreducible repres-
338
+ entations V λ and V µ of the Lie algebra g having highest weights λ, µ, respectively.
339
+ In [4, Definition 6.2], given two dominant integral weights λ, µ we introduced the
340
+ module
341
+ Vλ,µ
342
+ 2
343
+ = Indˆg2
344
+ ˆg+
345
+ 2
346
+
347
+ A ⊗C V λ ⊗C V µ�
348
+ ,
349
+ where ˆg+
350
+ 2 = C[t, s] ⊗ g ⊕ A C2 acts on A ⊗C V λ ⊗C V µ as
351
+ f(t, s)x · (p(a) ⊗ u ⊗ v) = f(0, −a)p(a) ⊗ xu ⊗ v + f(a, 0)p(a) ⊗ u ⊗ xv,
352
+ while C2 acts as −1/2. In [4] we called this object the Weyl module of weights
353
+ (λ, µ), although, as we will see, it does not have the the same properties as its
354
+ 1-singularity analogue.
355
+ We also define
356
+ Wλ,µ
357
+ 1
358
+ = Indˆg1
359
+ ˆg+
360
+ 1
361
+
362
+ V λ ⊗C V µ�
363
+ ,
364
+ where ˆg+
365
+ 1 = C[t] ⊗ g ⊕ C C1 acts on V λ ⊗C V µ as f(t)x · (u ⊗ v) = f(0)x · (u ⊗ v)
366
+ and C1 acts as −1/2.
367
+ The specialisation and expansion maps are defined also for Weyl modules, and
368
+ induce the following isomorphisms [4, Lemma 6.3]:
369
+ Vλ,µ
370
+ 2
371
+ aVλ,µ
372
+ 2
373
+ ≃ Wλ,µ
374
+ 1
375
+ ,
376
+ Vλ,µ
377
+ 2
378
+ [a−1] ≃ Vλ
379
+ t ⊗Q Vµ
380
+ s .
381
+ (2.1)
382
+ 2.4. Clifford algebra. We now define the Clifford algebra with two singularities,
383
+ generalizing the construction of the classical case (see for example [5, Chapter 15]).
384
+ Let n+ be the nilpotent radical of b and set
385
+ X2 = K2 ⊗C n+ ⊕ K2 ⊗C n∗
386
+ +.
387
+ We equip X2 with the unique A-bilinear form such that K2 ⊗C n+ and K2 ⊗C n∗
388
+ +
389
+ are isotropic subspaces and
390
+ (f ⊗ x; g ⊗ ϕ) = Res2(fg) ϕ(x)
391
+
392
+ 6
393
+ FORTUNA, LOMBARDO, MAFFEI, MELANI
394
+ for all f, g ∈ K2, x ∈ n+ and ϕ ∈ n∗
395
+ +. We denote by Cℓ2 the associated Clifford
396
+ algebra over A.
397
+ There are obvious variants of the same construction where we replace K2 with the
398
+ ring C[t±1] or one of the rings Q[t±1], Q[s±1], Q[t±1] × Q[s±1]. We obtain Clifford
399
+ algebras that we denote by Cℓ1, Cℓt, Cℓs, Cℓt,s. The algebra CℓU in [5, Section 15.1.1]
400
+ is a completion of Cℓ1.
401
+ These Clifford algebras have a natural grading called the charge and denoted by
402
+ ch. It can be defined as follows: the elements of the base ring have charge 0, while
403
+ for ψ ∈ n and ψ∗ ∈ n∗ we have
404
+ ch ψ = −1,
405
+ ch ψ∗ = 1.
406
+ (2.2)
407
+ The relations defining each Clifford algebra are homogeneous, hence the charge
408
+ induces a well-defined grading on the Clifford algebra.
409
+ We now introduce completions of the tensor product ˆU2 ⊗A Cℓ2. We define
410
+ ˆU2 ˆ⊗ACℓ2 = lim
411
+ ←−
412
+ n
413
+ ˆU2 ⊗A Cℓ2
414
+
415
+ (ts)nR2g ⊗ 1, 1 ⊗ (ts)nR2n+, 1 ⊗ (ts)nR2n∗
416
+ +
417
+
418
+ left ideal
419
+ and we notice that, as in the case of the algebra ˆU2, this A-module has a nat-
420
+ ural structure of A-algebra. We introduce the completed Clifford algebras ˆU1 ˆ⊗Cℓ1,
421
+ ˆUt ˆ⊗QCℓt, ˆUs ˆ⊗QCℓs, and ˆUt,s ˆ⊗QCℓt,s. The specialisation and expansion map de-
422
+ termine morphisms
423
+ Sp : ˆU2 ˆ⊗ACℓ2 −→ ˆU1 ˆ⊗Cℓ1
424
+ and
425
+ E : ( ˆU2 ˆ⊗ACℓ2)[a−1] −→ ˆUt,s ˆ⊗QCℓt,s.
426
+ Arguing exacly as in [4, Lemmas 3.7 and 3.9] we see that E is injective with dense
427
+ image, while the specialisation map induces an isomorphism ˆU2 ˆ⊗ACℓ2/a( ˆU2 ˆ⊗ACℓ2) ≃
428
+ ˆU1 ˆ⊗Cℓ1. Finally, we have an injective map I : ˆUt ˆ⊗QCℓt → ˆUt,s ˆ⊗QCℓt,s induced by
429
+ the natural inclusion Kt → Kt,s = Kt × Ks given by f �→ (f, 0). Similarly, we have
430
+ an injective map J : ˆUs ˆ⊗QCℓs → ˆUt,s ˆ⊗QCℓt,s. As in Section 3.3 of [4], the product
431
+ of these maps I ⊗ J : ( ˆUt ˆ⊗QCℓt) ⊗Q ( ˆUs ˆ⊗QCℓs) → ˆUt,s ˆ⊗QCℓt,s is injective with
432
+ dense image.
433
+ 2.5. Fock module. We now describe the “fermionic” Fock spaces corresponding to
434
+ the Clifford algebras defined in the previous section. As above, for the construction
435
+ in the case of one singularity we refer to [5, Section 15.1.4]: here we mimic this
436
+ definition in the case of two singularities. We define Cℓ+
437
+ 2 as the A-subalgebra of Cℓ2
438
+ generated by R2 ⊗ n+ and R2 ⊗ n∗
439
+ + and we define the Fock module
440
+ Λ2 = Cℓ2 ⊗Cℓ+
441
+ 2 A |0⟩Λ2
442
+ where R2 ⊗ n+ and R2 ⊗ n∗
443
+ + acts trivially on |0⟩Λ2. The charge (see equation (2.2))
444
+ induces a grading on the Fock space by setting
445
+ ch |0⟩Λ2 = 0.
446
+ We denote by Λn
447
+ 2 the subspace of elements of degree n. Similar constructions can be
448
+ given for all the other Clifford algebras Cℓ1, Cℓt, Cℓs, and Cℓt,s, giving Fock modules
449
+ Λ1, Λt, Λs, and Λt,s.
450
+ Specialisation and expansion, induce maps also at the level of the Fock spaces.
451
+ Arguing as in [4, Section 6] (where we considered the module Vλ,µ
452
+ 2
453
+ ), it is easy to
454
+ prove the following Lemma:
455
+ Lemma 2.3.
456
+ a) The specialisation map Sp : Λ•
457
+ 2 −→ Λ•
458
+ 1 is homogeneous of degree zero and
459
+ induces an isomorphism Λ•
460
+ 2/aΛ•
461
+ 2 ≃ Λ•
462
+ 1.
463
+ b) We have a homogeneous isomorphism of degree zero Λ•
464
+ t,s ≃ Λ•
465
+ t ⊗Q Λ•
466
+ s.
467
+
468
+ SEMI-INFINITE COHOMOLOGY OF WEYL MODULES
469
+ 7
470
+ c) The expansion map E : Λ•
471
+ 2[a−1] −→ Λ•
472
+ t ⊗Q Λ•
473
+ s is a homogeneous isomorphism
474
+ of degree zero.
475
+ Recall also that the Fock space Λ = Λ1 has a natural structure of vertex super-
476
+ algebra with the following universal property.
477
+ Lemma 2.4. Let U be a vertex superalgebra such that there exists a linear map
478
+ x �→ ux from n∗
479
+ + ⊕ n∗
480
+ + to the space of odd elements of U such that
481
+ (1) for all ϕ, ψ ∈ n and for all ϕ∗, ψ∗ ∈ n∗
482
+ +
483
+ (uψ)(n)(uϕ) = (uψ∗)(n)(uϕ∗) = (uψ)(m)(uψ∗) = (uψ∗)(m)(uψ) = 0
484
+ for all n ⩾ 0 and for all m ⩾ 1;
485
+ (2) (uψ)(0)(uψ∗) = (uψ∗)(0)(uψ) = ⟨ψ, ψ∗⟩|0⟩U for all ψ ∈ n and ψ∗ ∈ n∗
486
+ +.
487
+ Then there exists a unique morhism of vertex superalgebras α : Λ → U such that
488
+ α(ψt−1|0⟩Λ) = uψ and α(ψ∗t−1|0⟩Λ) = uψ∗.
489
+ 2.6. Bases. For each of the objects introduced above – base rings, enveloping al-
490
+ gebras, Clifford algebras, and Fock spaces – it is not hard to construct explicit
491
+ bases (or topological bases). We give the details in the case of two singularities.
492
+ The construction of a basis depends on the choice of a basis of C[t, s][1/ts] as an
493
+ A-module. Following [4], Section 1.1 and Equation (4.1) we introduce the following
494
+ bases, indexed by 1
495
+ 2Z: for n ∈ Z we define
496
+
497
+ zn = tnsn
498
+ zn+ 1
499
+ 2 = tn+1sn
500
+
501
+ wn = tnsn
502
+ wn+ 1
503
+ 2 = tnsn+1
504
+ The elements zm for m ∈
505
+ 1
506
+ 2Z form a basis of C[t, s][1/ts] as an A-module, and
507
+ the elements wn are the dual basis with respect to the residue bilinear form: more
508
+ precisely, one has
509
+ Res2(znw−m− 1
510
+ 2
511
+ ) = δn,m.
512
+ This specific choice of basis is not particularly important, and several others would
513
+ be possible. However, some properties need to be satisfied for our approach to
514
+ work. In particolar with our choice, the elements zm (or wm) with m ⩾ 0 form an
515
+ A-basis of C[t, s].
516
+ Since K2 is an A-free module, we deduce that the enveloping algebras of g2 and
517
+ Cℓ2 are A-free modules. Moreover, as R2 is a direct summand of K2, we also deduce
518
+ that Vλ,µ
519
+ 2
520
+ and Λ2 are also A-free modules. Explicit bases of these modules, as well
521
+ as an explicit topological basis of the algebra ˆU2 ˆ⊗ACℓ2, can be obtained using the
522
+ Poincar´e-Birkhoff-Witt theorem and its analogue for Clifford algebras.
523
+ 3. Vertex algebras and semi-infinite cohomology
524
+ In this section, we recall some results obtained by Casarin [1] which allow us to
525
+ use the formalism of vertex algebras also in the context of several singularities. In
526
+ particular, using this formalism we develop a notion of semi-infinite cohomology for
527
+ ˆU2-modules.
528
+ 3.1. Distributions and vertex algebra morphisms. Let R be a complete topo-
529
+ logical associative A-algebra. Following [1, Definition 3.0.4], we denote by FA(K2, R)
530
+ the space of continuous A-linear morphisms from K2 to R and call it the space of
531
+ 2-fields.
532
+ We refer to [1] for the definitions of mutually local 2-fields (Definition
533
+ 3.1.1), of the n-products X(n)Y of two 2-fields (Definitions 3.1.2 and 3.1.7) and
534
+ of the derivative ∂(X) of a 2-field (before Definition 3.0.2). The definition in [1]
535
+ applies also to the other ring we are considering: K1, Kt, Ks, Kt,s.
536
+
537
+ 8
538
+ FORTUNA, LOMBARDO, MAFFEI, MELANI
539
+ In particular to define n products it is necessary to choose what in [1] is called
540
+ a global coordinate (see definition ). We choose always t as a global coordinate.
541
+ More explicitly for the rings K2, K1, Kt and Ks we choose t = s + a as a global
542
+ coordinate, and for the ring Kt,s = Kt × K2 we choose (t, t) = (t, s + a).
543
+ We also use some foundational results proved in this context in [1]. In particular,
544
+ the following result will be crucial for us.
545
+ Theorem 3.1 ([1], Theorem 3.2.3). Let F be a C-linear subspace of FA(K2, R) of
546
+ mutually local 2-fields closed under derivation and n-products. Let
547
+ 1 be a field such
548
+ that
549
+ 1(f) is central for every f ∈ K2, that ∂
550
+ 1 = 0 and such that
551
+ 1(n)X = δn,−1X for
552
+ all X ∈ F. Then the vector dpace F + C1, endowed with n-products and derivation
553
+ T = ∂, is a C-vertex algebra with
554
+ 1 as vacuum vector.
555
+ It is straightforward to generalize the constructions and results in [1] to the case
556
+ of superalgebras R.
557
+ We are interested in the case where R is the superalgebra ˆU2 ˆ⊗ACℓ2. For x ∈ g,
558
+ ψ ∈ n+ and ψ∗ ∈ n∗
559
+ + we define the 2-fields
560
+ x(2)(g) = (x⊗g)⊗1Cℓ2,
561
+ ψ[2](g) = 1 ˆU2⊗(ψ⊗g),
562
+ (ψ∗)[2](g) = 1 ˆU2⊗(ψ∗⊗g) (3.1)
563
+ for all g ∈ K2. The first of these fields has even parity with respect to the superal-
564
+ gebra structure, while the second and third ones are odd. These fields are mutually
565
+ local. We consider the minimal C-linear subspace F(2) of ˆU2 ˆ⊗ACℓ2 closed under
566
+ n-products and derivation and containing the fields (3.1). Moreover, we define
567
+ 12(f) = Res2(f)
568
+
569
+ 1 ˆU2 ⊗ 1Cℓ2
570
+
571
+ .
572
+ It is easy to check that this data satisfies the hypothesis of Theorem 3.1. Therefore,
573
+ V(2) = F(2) + C12 has a structure of vertex superalgebra, and by the universal
574
+ properties of the vertex algebra V (Lemma 2.2) and of the vertex superalgebra Λ•
575
+ (Lemma 2.4) it follows that there exists a morphism of vertex superalgebras
576
+ Φ(2) : V ⊗C Λ• −→ V(2).
577
+ (3.2)
578
+ This homomorphism will allow us to easily introduce many elements in V(2), hence
579
+ also in ˆU2 ˆ⊗ACℓ2.
580
+ Similar constructions apply if the algebra ˆU2 ˆ⊗ACℓ2 is replaced by the algebras
581
+ ˆU1 ˆ⊗Cℓ1, ˆUt ˆ⊗QCℓt, etc. Hence, we construct the fields x(1), ψ[1], x(t), ψ[t], the vertex
582
+ superalgebras V(1), V(t), and homomorphisms of vertex algebras Φ(1) : V⊗C Λ• −→
583
+ V(1), Φ(t) : V ⊗C Λ• −→ V(t), etc.
584
+ Notice that we have a specialisation morphism SpF : FA(K2, ˆU2 ˆ⊗Cℓ2) −→
585
+ FC(K1, ˆU2 ˆ⊗Cℓ1) and an expansion map EF : FA(K2, ˆU2 ˆ⊗ACℓ2) −→ FQ(Kt,s, ˆU2 ˆ⊗QCℓt,s),
586
+ determined by the conditions
587
+
588
+ SpF(X)
589
+
590
+ (Sp(f)) = Sp(ϕ(f))
591
+ and
592
+
593
+ EF(X)
594
+
595
+ (E(f)) = E(ϕ(f)).
596
+ These maps commute with n-products and derivations and satisfy SpF(12) =
597
+ 11
598
+ and EF(12) =
599
+ 1t,s. Moreover, by construction they satisfy
600
+ SpF(x(2)) = x(1)
601
+ and
602
+ EF(x(2)) = x(t,s)
603
+ for x ∈ g. Similar relations hold for ψ[2] and (ψ∗)[2]. This implies in particular that
604
+ the homomorphisms SpF and EF restrict to homomorphisms of vertex algebras
605
+ Sp : V(2) −→ V(1) and E : V(2) −→ V(t,s) such that
606
+ Sp ◦Φ(2) = Φ(1)
607
+ E ◦ Φ(2) = Φ(t,s).
608
+ We can also describe the morphism Φ(2) through the morphisms Φ(t) and Φ(s).
609
+ Recall from the end of Section 2.4 the maps I, J from ˆUt ˆ⊗QCℓt and ˆUs ˆ⊗QCℓs to
610
+ ˆUt,s ˆ⊗QCℓt,s. These maps induce maps at the level of fields IF : FQ(Kt, ˆUt ˆ⊗QCℓt) →
611
+
612
+ SEMI-INFINITE COHOMOLOGY OF WEYL MODULES
613
+ 9
614
+ FQ(Kt,s, ˆUt,s ˆ⊗QCℓt,s) and JF : FQ(Ks, ˆUs ˆ⊗QCℓs) → FQ(Kt,s, ˆUt,s ˆ⊗QCℓt,s), given
615
+ by
616
+ IF(X)(f, g) = I(X(f))
617
+ and
618
+ JF(X)(f, g) = J(X(g))
619
+ for all (f, g) ∈ Kt×Ks = Kt,s. The maps IF and JF preserve n-products, commute
620
+ with derivations, and satisfy IF(1t) + JF(1s) =
621
+ 1t,s. Moreover we notice that I(u)
622
+ and J(v) commute for all u ∈ ˆUt ˆ⊗QCℓt and v ∈ ˆUs ˆ⊗QCℓs. By the discussion in [1,
623
+ Section 7.2], this implies
624
+ IF ◦ Φ(t) + JF ◦ Φ(s) = Φ(t,s).
625
+ This is the only statement where it is relevant the choice of the global coordinate
626
+ we have done in Section 3.1.
627
+ 3.2. Semi-infinite cohomology. We now define a notion of semi-infinite cohomo-
628
+ logy for ˆU2-modules, in analogy with the analogous notion for ˆU1-modules described
629
+ for example in [5, Chapter 15]. To this end, we introduce some notation for elements
630
+ in the vertex superalgebra V ⊗ Λ•. As in the the case of ˆU1, to describe these ele-
631
+ ments we choose a basis Ja of g compatible with the decomposition g = n−⊕t⊕n+,
632
+ where n+ is the nilpotent radical of b and n− is the radical of the opposite nilpotent
633
+ borel subalgebra. We denote by cb,d
634
+ e
635
+ the structure coefficients of the Lie bracket
636
+ with respect to this basis. We denote by Φ ⊔ Γ the indexing set of the basis Ja, so
637
+ that, if α ∈ Φ, then Jα = eα = f−α is a root vector of weight α and, if α ∈ Γ, then
638
+ Jα ∈ t. We also denote by ψ∗
639
+ α for α ∈ Φ+ the basis of n∗
640
+ + dual to the basis eα of
641
+ n+.
642
+ With each element in n+ ⊗ · · · ⊗ n+ ⊗ n∗
643
+ + ⊗ · · · ⊗ n∗
644
+ + we associate an element in
645
+ the vertex superalgebra Λ as follows:
646
+ N(ψ1 ⊗ . . . ψℓ ⊗ ψ∗
647
+ 1 ⊗ · · · ⊗ ψ∗
648
+ m) = (ψ1t−1) · · · (ψℓt−1) · (ψ∗
649
+ 1t−1) · · · (ψ∗
650
+ mt−1) · |0⟩Λ.
651
+ Similarly, with an element in g ⊗ n∗
652
+ + we associate an element in the vertex
653
+ superalgebra V ⊗ Λ∗ by setting
654
+ M(x ⊗ ψ∗) = (xt−1) · |0⟩V ⊗ (ψ∗t−1) · |0⟩Λ.
655
+ Following [5, Chapter 15] we define
656
+ q =M(I) − 1
657
+ 2|0⟩V ⊗ N(B) =
658
+
659
+ α∈Φ+
660
+ (eαt−1) · |0⟩V ⊗ (ψ∗
661
+ αt−1) · |0⟩Λ
662
+ − 1
663
+ 2
664
+
665
+ α,β∈Φ+
666
+ cα,β
667
+ α+β |0⟩V ⊗ (eα+βt−1) · (ψ∗
668
+ αt−1) · (ψ∗
669
+ βt−1) · |0⟩Λ,
670
+ where I ∈ g ⊗ n∗
671
+ + represents the inclusion of n+ in g and B ∈ n+ ⊗ n∗
672
+ + ⊗ n∗
673
+ + is the
674
+ Lie bracket. We now define the boundary operator d(2)
675
+ std ∈ ˆU2 ˆ⊗ACℓ2 as follows:
676
+ d(2)
677
+ std :=
678
+
679
+ Φ(2)(q)
680
+
681
+ (1).
682
+ The boundary operator that we will use to define the semi-infinite cohomology
683
+ is a deformation of d(2)
684
+ std. Let ψ∗
685
+ pr = �
686
+ α simple ψ∗
687
+ α ∈ n∗
688
+ +, and define
689
+ χ(2) = 1 ˆU2 ⊗ ψ∗
690
+ pr = Φ(2)(N(ψ∗
691
+ pr))(1) ∈ ˆU2 ⊗A Cℓ2.
692
+ Similar constructions yield χ(s), χ(t), χ(s), and χ(s,t). Finally set
693
+ d(2) = d(2)
694
+ std + χ(2).
695
+ As we will check in Section 3.3, this is an element that squares to zero, and therefore,
696
+ it can be used to define the semi-infinite cohomology of a ˆU2-module.
697
+
698
+ 10
699
+ FORTUNA, LOMBARDO, MAFFEI, MELANI
700
+ Similarly we can define d(1)
701
+ std, χ(1), d(1), d(t)
702
+ std, χ(t), d(t), and so on, as elements of
703
+ the corresponding superalgebras. By the discussion at the end of Section 3.1 we
704
+ have
705
+ Sp(d(2)) = d(1),
706
+ E(d(2)) = d(t,s),
707
+ and
708
+ I(d(t)) + J(d(s)) = d(t,s).
709
+ Definition 3.2. Let M be an ˆU2 module. Consider the ˆU2 ˆ⊗ACℓ2-graded module
710
+ M ⊗AΛ•
711
+ 2, where the grading is given by charge on Λ•
712
+ 2. The element d(2) acts on this
713
+ module as a boundary operator of degree one. Define Ψn(M) as the corresponding
714
+ cohomology of degree n.
715
+ Similar constructions apply to modules over the algebras ˆU1, ˆUt, ˆUs or ˆUt,s.
716
+ Let Z2 be the center of the algebra ˆU2, and similarly introduce the center Z1 of
717
+ ˆU1 and the centers Zt and Zs of ˆUt and ˆUs. If M is an ˆU2-module, the action of Z2
718
+ on M ⊗A Λ•
719
+ 2 commutes with the differential d(2) and preserves the charge, hence
720
+ induces an action of Z2 on the semi-infinite cohomology groups of M. A similar
721
+ action is defined in the case of ˆU1-modules or ˆUt-modules.
722
+ Recall that a module M over a topological algebra R is said to be smooth if the
723
+ action of R on M is continuous with respect to the discrete topology on M. Notice
724
+ that, if M is a smooth ˆU2-module, then, since the map E has dense image, the
725
+ action of ˆU2 on M extends to a smooth action of ˆUt,s on M[a−1]. Similarly, if Mt
726
+ is a smooth ˆUt-module and Ms is a smooth ˆUs-module, then there is an induced
727
+ action of ˆUt,s on Mt ⊗Q Ms. In the next section we will use the following properties
728
+ of the semi-infinite cohomology.
729
+ Lemma 3.3.
730
+ a) Given a short exact sequence of ˆU2-modules, there is an induced long exact
731
+ sequence in semi-infinite cohomology.
732
+ b) Let M be an ˆU1-module. The semi-infinite cohomology of M as an ˆU1-module
733
+ is isomorphic to the semi-infinite cohomology of M considered as an ˆU2-
734
+ module through the map Sp.
735
+ c) Let M be an ˆUt,s-module. The semi-infinite cohomology of M as an ˆUt,s-
736
+ module is isomorphic to the semi-infinite cohomology of M considered as an
737
+ ˆU2-module through the map E. In particular, this applies to the case where
738
+ M = N[a−1] is the localization of a smooth ˆU2-module N.
739
+ d) Let Mt be a smooth ˆUt-module, Ms be a smooth ˆUs-module, and let M :=
740
+ Mt ⊗Q Ms, regarded as a ˆUt,s-module.
741
+ The complex computing the semi-
742
+ infinite cohomology of M is the total complex associated with the double com-
743
+ plex given by the tensor product of the complex computing the semi-infinite
744
+ cohomology of Mt and that of Ms. In particular, being the base ring Q a
745
+ field, if Mt and Ms have non zero semi-infinite cohomology only in degree
746
+ zero, then M considered as an ˆUt,s-module has semi-infinite cohomology only
747
+ in degree zero and the cohomology in degree zero is isomorphic to the product
748
+ of the tensor product of Ψ0(Mt) and Ψ0(Ms).
749
+ Proof. Part a) follows from the fact that Λ2 is a free module over A.
750
+ Part b) follows from the fact that, since a ∈ A acts trivially on M, by Lemma
751
+ 2.3 a) we have
752
+ M ⊗A Λ•
753
+ 2 ≃ M ⊗C
754
+ Λ•
755
+ 2
756
+ aΛ•
757
+ 2
758
+ ≃ M ⊗C Λ•
759
+ 1
760
+ and moreover, by construction, d(1) = Sp(d(2)).
761
+ Part c) follows from the fact that, since the action of a on M is invertible, by
762
+ Lemma 2.3 c) we have
763
+ M ⊗A Λ•
764
+ 2 = M ⊗A Λ•
765
+ 2[a−1] = M ⊗A Λ•
766
+ t,s
767
+
768
+ SEMI-INFINITE COHOMOLOGY OF WEYL MODULES
769
+ 11
770
+ and, moreover, by construction, d(t,s) = E(d(2)).
771
+ Finally, from Lemma 2.3 c) we have
772
+ (Mt ⊗Q Λ•
773
+ t) ⊗Q (Ms ⊗Q Λ•
774
+ s) ≃ M ⊗Q Λ•
775
+ t,s.
776
+ Part d) then follows from the equality d(t,s) = I(d(t)) + J(d(s)).
777
+
778
+ 3.3. Commutation relations. For their computation of the semi-infinite cohomo-
779
+ logy of V, Frenkel and Ben Zvi (see [5] Chapter 15) relied on the choice of a clever
780
+ basis of V ⊗ Λ. For all x ∈ g, they define
781
+ ˆx = xt−1 · |0⟩V ⊗ |0⟩Λ + N(αx),
782
+ where αx ∈ n∗
783
+ +⊗n∗
784
+ + represents the linear map n+ → n+ obtained as the composition
785
+ of adx : n+ −→ n+, the natural projection π : g −→ g/b−, and the inverse of the
786
+ isomorphism n+ ∼= g/b− induced by π. Using the map Φ(2) from Equation (3.2) we
787
+ define
788
+ ˆx(2) = Φ(2)(ˆx).
789
+ To compute the semi-infinite cohomology of Vλ,µ
790
+ 2
791
+ we will need some information
792
+ about the commutation relations among the elements ˆx(2), ψ[2], and (ψ∗)[2], and
793
+ the boundary operators. These are easy to compute because all these objects are
794
+ constructed through the map Φ(2). Let us make this remark precise. Given an
795
+ element x in V⊗Λ, denote by x(z) the corresponding field in the vertex superalgebra
796
+ and by x(2) : K2 −→ ˆU2 ˆ⊗Cℓ2 the 2-field Φ(2)(x).
797
+ For any choice of elements
798
+ x, y ∈ V ⊗ Λ, the commutator of the corresponding fields is given by
799
+ [x(z), y(w)] =
800
+
801
+ n⩾0
802
+ 1
803
+ n!(x(n)y)(w)∂n
804
+ wδ(z − w).
805
+ We have a similar Operator Product Expansion formula for 2-fields (see [1], Pro-
806
+ position 3.1.4)
807
+ [x(2)(f), y(2)(g)] =
808
+
809
+ n⩾0
810
+ 1
811
+ n!
812
+
813
+ (x(2))(n)(y(2))
814
+
815
+ (g ∂nf),
816
+ where the product (x(2))(n)(y(2))) is the product of 2-fields defined in [1]. However,
817
+ since Φ(2) is a map of vertex algebras we get (x(2))(n)(y(2)) = (x(n)y)(2). Hence, if
818
+ we know the commutator of x(z), y(w), we immediately deduce that of x(2) and
819
+ y(2).
820
+ Similar considerations apply when we want to compute [x(2)(1), y(2)(g)] assuming
821
+ we know the commutator of x(0) and y(w). In this case, the usual OPE formula
822
+ gives [x(0), y(w)] = (x(0)y)(w), while the OPE formula for 2-fields gives
823
+ [x(2)(1), y(2)(g)] =
824
+
825
+ (x(2))(0)(y(2))
826
+
827
+ (g).
828
+ Using again the fact that Φ(2) is a map of vertex algebras, we get
829
+ [x(2)(1), y(2)] = Φ(2)
830
+ ��
831
+ [x(0), y(w)](|0⟩V ⊗ |0⟩Λ)
832
+
833
+ |w=0
834
+
835
+ .
836
+ These formulas are enough to determine all commutation relations among the ele-
837
+ ments ˆx(2), ψ[2], (ψ∗)[2] and the boundary operators from those obtained by Frenkel
838
+ and Ben Zvi in [5, Chapter 15], without the need of any further computation. We
839
+ summarise these results in Proposition 3.4 below, which (in light of the above)
840
+ follows from Sections 15.2.4 and 15.2.9 of [5].
841
+ In the statement, we denote by
842
+ epr, hpr, fpr the sl(2)-triple such that fpr = �
843
+ α simple λαfα, κ(fpr, eα) = 1 for all
844
+ simple root α and hpr ∈ t.
845
+
846
+ 12
847
+ FORTUNA, LOMBARDO, MAFFEI, MELANI
848
+ Proposition 3.4. for all x ∈ g, y ∈ b, z ∈ n+, w ∈ b−, ψ ∈ n+ and ψ∗ ∈ n∗
849
+ + we
850
+ have:
851
+ a)
852
+ (d(2)
853
+ std)2 = 0,
854
+ [d(2)
855
+ std, χ(2)]+ = 0,
856
+ b)
857
+ (χ(2))2 = 0,
858
+ (d(2))2 = 0,
859
+ c) [χ(2), ψ[2]]+ = ⟨ψ∗
860
+ pr, ψ⟩
861
+ 1,
862
+ [χ(2), (ψ∗)[2]]+ = 0,
863
+ d)
864
+ [χ(2), ˆz(2)] = 0,
865
+ [χ(2), ˆw(2)] =
866
+
867
+ α ∈Φ+
868
+ κ([fpr, z], eα)ψ∗
869
+ α,
870
+ e) [d(2)
871
+ std, ψ[2]]+ = ˆψ(2),
872
+ [d(2)
873
+ std, (ψ∗)[2]]+ = −1
874
+
875
+
876
+ 1 ˆU2 ⊗ N(ψ∗ ◦ B)
877
+
878
+ ,
879
+ f) [d(2)
880
+ std, ˆy(2)]+ = 0
881
+ where in the second formula of e) the element ψ∗ ◦ B ∈ n∗
882
+ + ⊗ n∗
883
+ + represents the
884
+ composition of the bracket with the map ψ∗. Moreover, if we choose a basis Ja as
885
+ at the beginnin of Section 3.2, for all γ ∈ Φ+ we have
886
+ [d(2)
887
+ std, ˆf (2)
888
+ γ ]+ =
889
+
890
+ α∈Φ+,a∈Φ−⊔Γ
891
+ cα,−γ
892
+ a
893
+ ( ˆJa)(2)
894
+ (−1)(ψ∗
895
+ α)[2]
896
+ − 1
897
+ 2 κ(e−γ, fγ) ∂(ψ∗
898
+ −γ)[2] −
899
+
900
+ α,β∈Φ+, a∈Φ⊔Γ
901
+ cα,a
902
+ β cβ,−γ
903
+ a
904
+ ∂(ψ∗
905
+ α)[2]
906
+ By specialisation and localization we obtain that similar formulas hold also in
907
+ the case of our various other superalgebras ˆUt ˆ⊗Cℓt, ˆUt,s ⊗ Cℓt,s, . . .
908
+ 4. The semi-infinite cohomology of Vλ,µ
909
+ 2
910
+ In this section we compute the semi-infinite cohomology of Vλ,µ
911
+ 2
912
+ . We denote by
913
+ C•
914
+ 2 = C•
915
+ 2(λ, µ) the complex Vλ,µ
916
+ 2
917
+ ⊗A Λ•
918
+ 2 and similarly we introduce the complexes
919
+ C•
920
+ t = C•
921
+ t (λ) = Vλ
922
+ t ⊗Q Λ•
923
+ t and C•
924
+ s = C•
925
+ s (µ) = Vµ
926
+ s ⊗Q Λ•
927
+ s. We further introduce
928
+ the complexes C•
929
+ 1(ν) = Vν
930
+ 1 ⊗C Λ•
931
+ 1 and C•
932
+ 1(λ, µ) = Wλ,µ
933
+ 1
934
+ ⊗C Λ•
935
+ 1. Hence, we have
936
+ C•
937
+ 1(λ, µ) ≃ ⊕C•
938
+ 1(ν), where the sum ranges over the irreducible factors of V λ ⊗ V µ
939
+ counted with multiplicity.
940
+ We denote by Op1 the indscheme of opers on the punctured disc and, for every
941
+ integral dominant weight ν, we write Opν
942
+ 1 for the associated connected component
943
+ of the space of unramified opers without monodromy, equipped with its reduced
944
+ structure (see, for example, [7] for a more complete definition). We also denote by
945
+ vν a highest weight vector in the g-module V ν. Feigin and Frenkel [2] constructed
946
+ an isomorphism F1 : Funct(Op1) −→ Z1 between the space of functions over Op1
947
+ and the center Z1 of ˆU1. Recall the following result, which combines Theorem 1,
948
+ Theorem 2 and the proof of Proposition 1 in [7].
949
+ Theorem 4.1 (Frenkel and Gaitsgory [7]). The action of Z1 on Vν
950
+ 1 and the Feigin-
951
+ Frenkel isomorphism induce an isomorphism
952
+ G1 : Funct(Opν
953
+ 1) −→ Endˆg1(Vν
954
+ 1).
955
+ Moreover, the element vν ⊗|0⟩Λ is a cocycle in C•
956
+ 1(ν) and the map z �→ [z ·vν ⊗|0⟩Λ
957
+ from Z1 to Ψ0(Vν
958
+ 1) induces isomorphisms of Z1-modules
959
+ Funct(Opν
960
+ 1) ≃ Endˆg1(Vν
961
+ 1) ≃ Ψ0(Vν
962
+ 1).
963
+ Finally, Ψn(Vν
964
+ 1) vanishes for all n ̸= 0.
965
+ The result of Frenkel and Gaitsgory generalises easily to the case of the modules
966
+
967
+ t and Vµ
968
+ s .
969
+
970
+ SEMI-INFINITE COHOMOLOGY OF WEYL MODULES
971
+ 13
972
+ By Lemma 2.1 and Lemma 2.3, as in the proof of Lemma 3.3, by the compatibility
973
+ of boundary operators we get homomorphisms of complexes Sp : C•
974
+ 2 → C•
975
+ 1(λ, µ)
976
+ and E : C•
977
+ 2 → C•
978
+ t (λ) ⊗Q C•
979
+ s (µ). These induce isomorphisms
980
+ C•
981
+ 2[a−1] ≃ C•
982
+ t (λ) ⊗Q C•
983
+ s (µ)
984
+ and
985
+ C•
986
+ 2
987
+ aC•
988
+ 2
989
+ ≃ C•
990
+ 1(λ, µ).
991
+ (4.1)
992
+ From these isomorphisms and Theorem 4.1 it follows easily that Ψn(Vλ,µ
993
+ 2
994
+ ) is zero
995
+ for n ̸= 0, 1, and we could also get information on the cohomology in degrees zero
996
+ and one.
997
+ However, it is easier to compute these cohomology groups directly by adapting
998
+ the strategy employed by Frenkel and Ben Zvi in [5, Chapter 15]. In order to do
999
+ this, we now introduce certain subcomplexes of C•
1000
+ 2. We denote by 1V0,0
1001
+ 2
1002
+ the element
1003
+ 1 ∈ A ⊗C C ⊗C C ⊂ V0,0
1004
+ 2 .
1005
+ Definition 4.2. We denote by E•
1006
+ 2 the subcomplex of C•
1007
+ 2(0, 0) spanned by elements
1008
+ of the form
1009
+ ˆx(2)
1010
+ 1 (g1) · · · ˆx(2)
1011
+ a (ga) · 1V0,0
1012
+ 2
1013
+ ⊗ ψ(2)
1014
+ 1 (ℓ1) · · · ψ(2)
1015
+ b (ℓb) · |0⟩Λ2
1016
+ (4.2)
1017
+ where xi, ψi ∈ n+ and g1, . . . , ga, ℓ1, . . . ℓb ∈ K2. By the commutation relations of
1018
+ Section 3.3 we see that E•
1019
+ 2 is a subcomplex of C•
1020
+ 2(0, 0).
1021
+ We define also analogous complexes E•
1022
+ t , E•
1023
+ s and E•
1024
+ 1. These complexes were de-
1025
+ noted by C′ in [5] and by C0 in [7]. By construction, these subcomplexes are compat-
1026
+ ible with specialisation and localization, and there are isomorphisms E•
1027
+ 2/aE•
1028
+ 2 ≃ E•
1029
+ 1
1030
+ and E•
1031
+ 2[a−1] ≃ E•
1032
+ t ⊗Q E•
1033
+ s.
1034
+ Definition 4.3. We denote by D•
1035
+ 2 = D•
1036
+ 2(λ, µ) the subcomplex of C•
1037
+ 2(λ, µ) spanned
1038
+ by elements of the form
1039
+ ˆy(2)
1040
+ 1 (h1) · · · ˆy(2)
1041
+ c (hc) · w ⊗ (ψ∗
1042
+ 1)(2)(k1) · · · (ψ∗
1043
+ d)(2)(kd) · |0⟩Λ2
1044
+ (4.3)
1045
+ where w ∈ V λ ⊗ V µ, yi ∈ b− = n− + t, ψ∗
1046
+ i ∈ n∗
1047
+ + and h1, . . . , hc, k1, . . . , kd ∈ K2.
1048
+ By the commutation relations of Section 3.3 we see that E•
1049
+ 2 is a subcomplex of
1050
+ C•
1051
+ 2(λ, µ).
1052
+ We define also analogous complexes D•
1053
+ t (λ), D•
1054
+ s(µ) and D•
1055
+ 1(ν). These complexes
1056
+ were denoted by C0 in [5] and by C′ in [7]. Finally, we denote by D•
1057
+ 1(λ, µ) the
1058
+ analogous subcomplex of C•
1059
+ 1(λ, µ). By construction, these subcomplexes are com-
1060
+ patible with specialisation and localization, and there are isomorphisms D•
1061
+ 2/aD•
1062
+ 2 ≃
1063
+ D•
1064
+ 1(λ, µ) and D•
1065
+ 2[a−1] ≃ D•
1066
+ t (λ) ⊗Q D•
1067
+ s(µ).
1068
+ There is an isomorphism of complexes E•
1069
+ 2 ⊗ D•
1070
+ 2 −→ C•
1071
+ 2 defined by
1072
+
1073
+ x · 1V0,0
1074
+ 2
1075
+ ⊗ ψ · |0⟩Λ2
1076
+
1077
+
1078
+
1079
+ y · w ⊗ ψ∗ · |0⟩Λ2
1080
+
1081
+ �−→ x · y · w ⊗ ψ · ψ∗ · |0⟩Λ2,
1082
+ where x = ˆx(2)
1083
+ 1 (g1) · · · ˆx(2)
1084
+ a (ga) and ψ = ψ(2)
1085
+ 1 (ℓ1) · · · ψ(2)
1086
+ b (ℓb) are as in Equation (4.2),
1087
+ y = ˆy(2)
1088
+ 1 (h1) · · · ˆy(2)
1089
+ c (hc) and ψ∗ = (ψ∗)(2)(k1) · · · (ψ∗)(2)(kd) are as in Equation
1090
+ (4.3), and w is an element of V λ ⊗ V µ.
1091
+ We now compute the cohomology of the complex E•
1092
+ 2. We will need the following
1093
+ result by Frenkel and Ben Zvi.
1094
+ Lemma 4.4 ([5, Section 15.2.6]). Hn(E•
1095
+ 1) = 0 for n ̸= 0 and Ψ0(E•
1096
+ 1) = C[|0⟩V ⊗
1097
+ |0⟩Λ].
1098
+ This result generalizes easily to the case of E•
1099
+ t and E•
1100
+ s. Localizing and special-
1101
+ izing, we deduce the following lemma.
1102
+ Lemma 4.5. Hn(E•
1103
+ 2) = 0 for n ̸= 0 and H0(E•
1104
+ 2) = A[1V0,0
1105
+ 2
1106
+ ⊗ |0⟩Λ2].
1107
+
1108
+ 14
1109
+ FORTUNA, LOMBARDO, MAFFEI, MELANI
1110
+ Proof. By definition, the complex E•
1111
+ 2 is concentrated in non-positive degrees. Hence,
1112
+ the long exact sequence induced by
1113
+ 0
1114
+ � E•
1115
+ 2
1116
+
1117
+ � E•
1118
+ 2
1119
+ � E•
1120
+ 1
1121
+ � 0
1122
+ implies that Hn(E•
1123
+ 2) is torsion free for every n, and that the specialisation of
1124
+ H0(E•
1125
+ 2) is isomorphic to H0(E•
1126
+ 1). Since semi-infinite cohomology commutes with
1127
+ localization (Lemma 3.3), using Lemma 2.1 and Lemma 4.4 we get the desired
1128
+ result.
1129
+
1130
+ We now compute the cohomology of D•
1131
+ 2. The strategy is similar, but the ar-
1132
+ gument is less straightforward since we do not have an explicit representative for
1133
+ H0(D•
1134
+ 1). Following the strategy in [5], we introduce the following bigraded struc-
1135
+ ture on D•
1136
+ 2. Recall that the height ht(α) of a root α is equal to the sum of the
1137
+ coefficients of α when written as a sum of simple roots. Let also epr, hpr, fpr be an
1138
+ sl(2)-triple such that fpr = �
1139
+ α simple fα and hpr belongs to t.
1140
+ Definition 4.6. We define a bidegree, with values in 1
1141
+ 2Z × 1
1142
+ 2Z and denoted by
1143
+ bideg, as follows. On elements of ˆg2, we set
1144
+ bideg(x ⊗ g) = (−n, n)
1145
+ if x ∈ g is such that [hpr, x] = 2 n x and g ∈ K2. We set also the bidegree of the
1146
+ central element C2 ∈ ˆg2 to be (0, 0). This induces a bidegree on U(ˆg2). On the
1147
+ space X2 = K2 ⊗ n+ ⊕ K2 ⊗ n∗
1148
+ + (see Section 2.4) we define
1149
+ bideg eα ⊗ g = (− ht(α), −1 + ht(α))
1150
+ bideg ψ∗
1151
+ α ⊗ g = (ht(α), 1 − ht(α))
1152
+ for α a positive root and g any element of K2. This induces a bidegree on the
1153
+ Clifford algebra Cℓ2. Moreover, if W is any finite-dimensional representation of g,
1154
+ then we set
1155
+ bideg w = (−n, n)
1156
+ if w ∈ W is such that hpr · w = 2 n w. These choices induces a bidegree on the
1157
+ module C•
1158
+ 2(λ, µ), and the element ˆx(2)(g) is homogeneous of bidegree (−n, n) if
1159
+ [hpr, x] = 2 n x. Finally, notice that if an element has bidegree (p, q), then it has
1160
+ charge p + q. In particular, we introduce the submodule Dp,q
1161
+ 2
1162
+ of elements of Dp+q
1163
+ 2
1164
+ of bidegree (p, q).
1165
+ We notice also that bideg d(2)
1166
+ std = (0, 1) and that bideg χ(2) = (1, 0). In particu-
1167
+ lar, D•,•
1168
+ 2
1169
+ is a double complex and D•
1170
+ 2 is the associated total complex. Following
1171
+ Frenkel and Ben Zvi [5, Chapter 15], the cohomology of the rows of this double
1172
+ complex is easy to describe. Let a be the centralizer of fpr in g. Recall from [5,
1173
+ Lemma 15.1.3 and Section 15.2.9] that the space spanned by monomials of the form
1174
+ (ˆp1)n1 · · · (ˆpk)nk ·|0⟩V ⊗|0⟩Λ with pi ∈ a generates a commutative vertex subalgebra
1175
+ F1 of V ⊗ Λ• isomorphic to S•(a ⊗ t−1C[t−1]). As in Section 3.3, it follows that for
1176
+ x, y ∈ a the fields ˆx(2) and ˆy(2) commute.
1177
+ We define F2(λ, µ) as the span of elements of the form
1178
+ ˆx(2)
1179
+ 1 (g1) · · · ˆx(2)
1180
+ k (gk) · (v ⊗ |0⟩Λ2) ∈ Vλ,µ
1181
+ 2
1182
+ ⊗A Λ•
1183
+ 2
1184
+ with x1, . . . , xk ∈ a and v ∈ V λ ⊗ V µ.
1185
+ Notice that all these elements have
1186
+ charge equal to zero, and that the space F2(λ, µ) splits as a direct sum F2(λ, µ) =
1187
+
1188
+ q F −q,q
1189
+ 2
1190
+ (λ, µ) according to the bidegree introduced above. Moreover, by Propos-
1191
+ ition 3.4 d), these elements are annihilated by the action of χ(2).
1192
+ Similarly we construct subspaces F −q,q
1193
+ 1
1194
+ (ν) ⊂ Vν
1195
+ 1 ⊗C Λ•
1196
+ 1, F −q,q
1197
+ t
1198
+ (λ) ⊂ Vλ
1199
+ t ⊗Q Λ•
1200
+ t ,
1201
+ F −q,q
1202
+ s
1203
+ (µ) ⊂ Vλ
1204
+ s ⊗Q Λ•
1205
+ s, and F −q,q
1206
+ 1
1207
+ (λ, µ) ⊂ Wλ,µ
1208
+ 1
1209
+ ⊗C Λ•
1210
+ 1, In particular, F −q,q
1211
+ 1
1212
+ (λ, µ) =
1213
+
1214
+ SEMI-INFINITE COHOMOLOGY OF WEYL MODULES
1215
+ 15
1216
+
1217
+ ν F −q,q
1218
+ 1
1219
+ (ν) where the sum is over all irreducible factors of V λ ⊗C V µ. By con-
1220
+ struction, the specialisation and localization maps induce isomorphisms
1221
+ F −q,q
1222
+ 2
1223
+ (λ, µ)
1224
+ aF −q,q
1225
+ 2
1226
+ (λ, µ)
1227
+ ≃ F −q,q
1228
+ 1
1229
+ (λ, µ)
1230
+ and
1231
+ F −q,q
1232
+ 2
1233
+ (λ, µ)[a−1] ≃
1234
+
1235
+ b+c=q
1236
+ F −b,b
1237
+ t
1238
+ (λ) ⊗Q F −c,c
1239
+ s
1240
+ (µ).
1241
+ Recall the following result on the cohomology of D•,q
1242
+ 1
1243
+ with respect to the bound-
1244
+ ary χ(1).
1245
+ Lemma 4.7 ([5, Lemma 15.2.10] and [7]). Let 2pν = ⟨ν, hpr⟩.
1246
+ a) Dp,q
1247
+ 1 (ν) = 0 for q > pν and for p < −q. In particular, Dp,q
1248
+ 1
1249
+ = 0 for q > pλ+µ
1250
+ and for p < −q;
1251
+ b) Hn(D•,q
1252
+ 1 (ν)) = 0 for n ̸= −q. In particular, Hn(D•,q
1253
+ 1 (λ, µ)) = 0 for n ̸= −q;
1254
+ c) The map v �→ [v] from F −q,q
1255
+ 1
1256
+ (ν) to H−q(D•,q
1257
+ 1 (ν)) is an isomorphism.
1258
+ Finally, it follows from c) that the map v �→ [v] from F −q,q
1259
+ 1
1260
+ (λ, µ) to H−q(D•,q
1261
+ 1 (λ, µ))
1262
+ is also an isomorphism.
1263
+ Similar results hold for the complexes D•,q
1264
+ t
1265
+ (λ) and D•,q
1266
+ s (µ). From this result we
1267
+ deduce the cohomology of the complex D•,q
1268
+ q
1269
+ with respect to the boundary operator
1270
+ χ(2).
1271
+ Lemma 4.8. Let 2p0 = ⟨λ + µ, hpr⟩ as above.
1272
+ a) Dp,q
1273
+ 2
1274
+ = 0 for q > p0 and for p < −q;
1275
+ b) Hn(D•,q
1276
+ 2 ) = 0 for n ̸= −q;
1277
+ c) The map v �→ [v] from F −q,q
1278
+ 2
1279
+ (λ, µ) to H−q(D•,q
1280
+ 2 (λ, µ)) is an isomorphism of
1281
+ A-modules.
1282
+ Proof. Part a) is clear for the definition of Dp,q
1283
+ 2
1284
+ = 0. For parts b) and c), we start
1285
+ by studying the localization of the cohomology groups of D•,q
1286
+ 2 . Equivalently, we aim
1287
+ to compute the cohomology of the localization of the row D•,q
1288
+ 2 . This localization
1289
+ can be rewritten as
1290
+
1291
+ b+c=q
1292
+ D•,b
1293
+ t (λ) ⊗ D•,c
1294
+ s (µ).
1295
+ In particular, it follows from Lemma 4.7 that its cohomology is concentrated in
1296
+ degree −q, and that its cohomology in this degree is given by
1297
+
1298
+ b+c=q
1299
+ F −b,b
1300
+ t
1301
+ (λ) ⊗ F −c,c
1302
+ s
1303
+ (µ),
1304
+ which is the localization of F −q,q
1305
+ 2
1306
+ (λ, µ).
1307
+ Since specialisation is compatible with
1308
+ bideg, we have an isomorphism D•,q
1309
+ 2 /aD•,q
1310
+ 2
1311
+ ≃ D•,q
1312
+ 1 (λ, µ). Using Lemma 4.7, the
1313
+ associated long exact sequence shows that Hn(D•,q
1314
+ 2 ) is torsion-free for n ̸= −q + 1,
1315
+ and that the map
1316
+ ι : H−q(D•,q
1317
+ 2 )/aH−q(D•,q
1318
+ 2 ) → H−q(D•,q
1319
+ 1 (λ, µ))
1320
+ is injective.
1321
+ We now prove c). Notice that both F −q,q
1322
+ 2
1323
+ (λ, µ) and H−q(D•,q
1324
+ 2 (λ, µ)) are torsion-
1325
+ free. We have already shown that the localization of the natural maps between them
1326
+ is an isomorphism. To study its specialisation, we compose it with the injection ι.
1327
+ This composition is the isomorphism of the last remark of Lemma 4.7. We conclude
1328
+ by applying Lemma 2.1.
1329
+ In order to prove b), it is enough to notice that from the above discussion we
1330
+ know that, for n ̸= −q, the module Hn(D•,q
1331
+ 2 ) = 0 is torsion-free, and that its
1332
+ localization is trivial.
1333
+
1334
+
1335
+ 16
1336
+ FORTUNA, LOMBARDO, MAFFEI, MELANI
1337
+ Let now be ϕ(q)
1338
+ i
1339
+ be an A-basis of F −q,q
1340
+ 2
1341
+ (λ, µ). Since the cohomology in degree
1342
+ −q of the complex D•,q+1
1343
+ 2
1344
+ is zero, there exists an element ϕ(q)
1345
+ i,1 ∈ D−q−1,q+1
1346
+ 2
1347
+ such
1348
+ that χ(2)(ϕ(q)
1349
+ i,1 ) = −d(2)
1350
+ std(ϕ(q)
1351
+ i ). By induction, we can construct elements ϕ(q)
1352
+ i,0 = ϕ(q)
1353
+ i
1354
+ and ϕ(q)
1355
+ i,ℓ ∈ D−q−ℓ,q+ℓ
1356
+ 2
1357
+ such that their sum
1358
+ ˜ϕ(q)
1359
+ i
1360
+ =
1361
+ p0−q
1362
+
1363
+ ℓ=0
1364
+ ϕ(q)
1365
+ i,ℓ
1366
+ satisfies d(2)( ˜ϕ(q)
1367
+ i ) = 0. We now prove the main result of this section.
1368
+ Theorem 4.9. The following hold.
1369
+ a) Ψn(Vλ,µ
1370
+ 2
1371
+ ) = 0 for n ̸= 0.
1372
+ b) We have an isomorphism
1373
+ Ψ0(Vλ,µ
1374
+ 2
1375
+ )
1376
+ aΨ0(Vλ,µ
1377
+ 2
1378
+ )
1379
+ ≃ Ψ0(Wλ,µ
1380
+ 1
1381
+ ) ≃
1382
+
1383
+ ν
1384
+ Ψ0(Vν
1385
+ 1)
1386
+ where the sum ranges over all irreducible components V ν of V λ ⊗V µ, counted
1387
+ with multiplicity.
1388
+ c) The elements
1389
+
1390
+ ˜ϕ(q)
1391
+ i
1392
+
1393
+ are an A-basis of Ψ0(Vλ,µ).
1394
+ Proof. From Lemma 4.8 we deduce that the classes of the elements ˜ϕ(q)
1395
+ i
1396
+ form an
1397
+ A-basis of H0(D•
1398
+ 2), and that Hn(D•
1399
+ 2) = 0 for n ̸= 0. As the complex D•
1400
+ 2 is con-
1401
+ centrated in non-negative degrees, by a standard homological argument we deduce
1402
+ that Hn(Vλ,µ
1403
+ 2
1404
+ ) is isomorphic to the n-th cohomology of the complex H0(D•
1405
+ 2)⊗AE•
1406
+ 2.
1407
+ Using Lemma 4.5, we immediately obtain parts a) and c).
1408
+ The second isomorphism appearing in part b) is clear, while the first follows from
1409
+ a) and the long exact sequence associated with the isomorphism
1410
+ C•
1411
+ 2
1412
+ aC•
1413
+ 2
1414
+ ≃ C•
1415
+ 1(λ, µ).
1416
+
1417
+ We will use the following Corollary in the next Section.
1418
+ Corollary 4.10. The element [vλ ⊗ vµ] ∈ Ψ0(Vλ,µ) is indivisible.
1419
+ Proof. By the previous theorem we can choose [vλ ⊗ vµ] as an element of a basis of
1420
+ the free A module Ψ0(Vλ,µ).
1421
+
1422
+ 5. The action of the center
1423
+ In this section we study the action of the center Z2 on the semi-infinite cohomo-
1424
+ logy of the module Vλ,µ
1425
+ 2
1426
+ .
1427
+ In this section we show that Vλ,µ
1428
+ 2
1429
+ is not a perfect analogue of the Weyl module
1430
+
1431
+ 1. Indeed, we show that, as a Z2-module, the semi-infinite cohomology of Vλ,µ
1432
+ 2
1433
+ is
1434
+ not isomorphic to Endˆg2(Vλ,µ
1435
+ 2
1436
+ ) or to Funct(Opλ,µ
1437
+ 2
1438
+ ).
1439
+ We begin by observing that the module Ψ0(Vν
1440
+ 1) has no non-trivial Z1-equivariant
1441
+ automorphisms.
1442
+ First we notice, that by construction, the action of Z2 commutes with localization
1443
+ and specialisation, as introduced before Equation (4.1). Concretely, we have:
1444
+ Et(z · x) = Et(z) · Et(x),
1445
+ Es(z · x) = Es(z) · Es(x),
1446
+ Sp(z · x) = Sp(z) · Sp(x)
1447
+ for all z ∈ Z2 and for all x ∈ Ψ0(Vλ,µ
1448
+ 2
1449
+ ).
1450
+ Lemma 5.1. If K : Endˆgt(Vλ
1451
+ t )⊗QEndˆgs(Vµ
1452
+ s ) −→ Ψ0(Vλ
1453
+ t )⊗QΨ0(Vµ
1454
+ s ) is a (Zt⊗Zs)-
1455
+ equivariant isomorphism, then K(IdVλ
1456
+ t ⊗ IdVµ
1457
+ s ) = q[vλ] ⊗ [vµ] for some q ∈ Q ∖ {0}.
1458
+
1459
+ SEMI-INFINITE COHOMOLOGY OF WEYL MODULES
1460
+ 17
1461
+ Proof. It follows from Theorem 4.1 that Endˆgt׈gs(Vλt ⊗Q Vµs) is isomorphic to
1462
+ Funct(Opλ
1463
+ t ×Spec Q Opµ
1464
+ s ) = Funct(Opλ
1465
+ t ) ⊗Q Funct(Opµ
1466
+ s ) and this is a polynomial
1467
+ ring in infinitely many variables over the field Q. In particular, its only invertible
1468
+ elements are the non-zero scalars in Q.
1469
+ Moreover, Theorem 4.1 also implies that Funct(Opλ
1470
+ t ) is isomorphic as a Zt-
1471
+ module to Ψ0(Vλ
1472
+ t ), with an isomorphism given by z −→ Gt(z) · [vλ]. The claim
1473
+ follows.
1474
+
1475
+ Before proving that Vλ,µ
1476
+ 2
1477
+ does not have the “right” semi-infinite cohomology
1478
+ we recall some properties of the modules Vν
1479
+ 1 that will be needed also in the next
1480
+ section.
1481
+ Remark 5.2. We denote by Zν
1482
+ 1 the coordinate ring of the scheme Opν
1483
+ 1. Recall
1484
+ that the schemes Opν
1485
+ 1 for different values of ν are disjoint, so that the map Z1 −→
1486
+ Zν1
1487
+ 1 × · · · × Zνk
1488
+ 1
1489
+ is surjective if the weights νi are distinct. Recall also that the ring
1490
+
1491
+ 1 is a polynomial ring in infinitely many variables. This implies that
1492
+ (1) There are no nontrivial ˆg1-morphisms between the ˆU1-modules Vν
1493
+ 1 and Vν′
1494
+ 1
1495
+ if ν ̸= ν′.
1496
+ (2) There are no nontrivial extensions between the ˆU1-modules Vν
1497
+ 1 and Vν′
1498
+ 1 if
1499
+ ν ̸= ν′.
1500
+ (3) Assume that α : � Zνi −→ � Zνi is a map of Z-modules and that the
1501
+ weights νi are distinct. If 1 is in the image of α then α is an isomorphism
1502
+ and α(Zνi
1503
+ 1 ) = Zνi
1504
+ 1 .
1505
+ By the Feigin-Frenkel Theorem (see [4] Theorem 5.2) the ring Funct(Op2) is
1506
+ isomorphic to Z2. In the sequel we will identify these rings through this isomorph-
1507
+ ism.
1508
+ In particular the ring Funct(Opλ,µ
1509
+ 2
1510
+ ) is a quotient of Z2.
1511
+ We will denote
1512
+ Funct(Opλ,µ
1513
+ 2
1514
+ ) by Zλ,µ
1515
+ 2
1516
+ .
1517
+ We now prove that Zλ,µ
1518
+ 2
1519
+ and Ψ0(Vλ,µ
1520
+ 2
1521
+ ) are not isomorphic.
1522
+ Proposition 5.3. Assume that V λ ⊗ V µ is not irreducible.
1523
+ Then the two Z2-
1524
+ modules Endˆg2(Vλ,µ
1525
+ 2
1526
+ ) and Ψ0(Vλ,µ
1527
+ 2
1528
+ ) are not isomorphic.
1529
+ Similarly the two Z2-
1530
+ modules Zλ,µ
1531
+ 2
1532
+ and Ψ0(Vλ,µ
1533
+ 2
1534
+ ) are not isomorphic.
1535
+ Proof. Suppose H : Endˆg2(Vλ,µ
1536
+ 2
1537
+ ) −→ Ψ0(Vλ,µ
1538
+ 2
1539
+ ) is a Z2-equivariant isomorphism.
1540
+ Recall from Lemma 4.28 in [4] that Z2[1/a] is dense in Zt,s, and therefore the
1541
+ localization of H is a (Zt ⊗Q Zs)-equivariant isomorphism
1542
+ Endˆgt(Vλ
1543
+ t ) ⊗Q Endˆgs(Vµ
1544
+ s ) −→ Ψ0(Vλ
1545
+ t ) ⊗Q Ψ0(Vµ
1546
+ s ),
1547
+ where we used the identification of the localization of Ψ0(Vλ,µ
1548
+ 2
1549
+ ) with Ψ0(Vλ
1550
+ t ) ⊗Q
1551
+ Ψ0(Vµ
1552
+ s ).
1553
+ From Lemma 5.1 and 4.10 we deduce that H(IdVλ,µ
1554
+ 2
1555
+ ) = [q vλ ⊗ vµ], where q ∈ A
1556
+ and qvλ ⊗ vµ ∈ Vλ,µ
1557
+ 2
1558
+ . We set w = qvλ ⊗ vµ ∈ Vλ,µ
1559
+ 2
1560
+ .
1561
+ By specialisation, H gives a Z1-equivariant isomorphism
1562
+ H : Endˆg2(Vλ,µ
1563
+ 2
1564
+ )
1565
+ a Endˆg2(Vλ,µ
1566
+ 2
1567
+ )
1568
+ −→ Ψ0(Vλ,µ
1569
+ 2
1570
+ )
1571
+ aΨ0(Vλ,µ
1572
+ 2
1573
+ )
1574
+ .
1575
+ (5.1)
1576
+ This isomorphism sends IdVλ,µ
1577
+ 2
1578
+ to w. Now consider the decomposition V λ ⊗ V µ =
1579
+ � V ν as g-modules. By Theorem 4.9, the target of the map H in (5.1) decomposes
1580
+ as � Ψ0(Vν
1581
+ 1). The element w is a multiple of vλ ⊗ vµ hence its class belongs to
1582
+ Ψ0(Vλ+µ
1583
+ 1
1584
+ ). As H is Z1-equivariant and Vλ+µ
1585
+ 1
1586
+ is stable by the action of ˆg1, we get
1587
+ that the image of H is contained in the direct summand Ψ0(Vλ+µ
1588
+ 1
1589
+ ). In particular,
1590
+ if V λ ⊗ V µ is not irreducible, the map H cannot be surjective. This proves the
1591
+
1592
+ 18
1593
+ FORTUNA, LOMBARDO, MAFFEI, MELANI
1594
+ first claim. The second claim follows since the map from Zλ,µ
1595
+ 2
1596
+ to Ψ0(Vλ,µ
1597
+ 2
1598
+ ) factors
1599
+ through Endˆg2(Vλ,µ
1600
+ 2
1601
+ ).
1602
+
1603
+ 6. A Weyl module for sl(2)
1604
+ In this section, we propose an alternative Weyl module in the context of opers
1605
+ with two singularities, in the case of g = sl(2).
1606
+ We fix the following notation:
1607
+ e, h, f is an sl(2)-triple such that h ∈ t and e ∈ n+, while ψ∗ ∈ n∗
1608
+ + is the dual of
1609
+ e. We identify dominant weights with natural numbers and we assume from now
1610
+ on that λ ⩾ µ. In this case, the differential of the complex computing semi-infinite
1611
+ cohomology takes the simpler form d(2) = ψ∗ + � ewn ⊗ ψ∗z−n−1/2.
1612
+ Let �Vλ,µ
1613
+ 2
1614
+ be the ˆU2-submodule of Vλ,µ
1615
+ 2
1616
+ generated by the highest weight vector
1617
+ 1 ⊗ vλ ⊗ vµ ∈ A ⊗ V λ ⊗ V µ. We will prove that this module has the “correct”
1618
+ semi-infinite cohomology and the “correct” endomorphism ring.
1619
+ We start by giving a more explicit description of the module �Vλ,µ
1620
+ 2
1621
+ . If X is a
1622
+ subspace of U(g) and Y is a subspace of a g-module Z we denote by X · Y the
1623
+ subspace of Z generated by the products x · y with x ∈ X and y ∈ Y . We define
1624
+ an increasing filtration F i of �Vλ,µ
1625
+ 2
1626
+ as follows
1627
+ F i = U(g) · (C Id ⊗ Id + Id ⊗ g)i · (vλ ⊗ vµ).
1628
+ This is an increasing filtration of V λ ⊗ V µ by g-modules and for i large enough we
1629
+ have F i = V λ ⊗ V µ. Choose a g-stable complement Gi+1 of F i in F i+1 and set
1630
+ G0 = F 0, so that F i = �i
1631
+ j=0 Gj. If we set F i(V µ) = (CId + n−)ivµ, it is easy to
1632
+ check by induction on i that
1633
+ F i = U(g) · (Id ⊗ Id + Id ⊗ n−)i(vλ ⊗ vµ) = U(g) ·
1634
+
1635
+ V λ ⊗ F i(V µ)
1636
+
1637
+ .
1638
+ In the case of g = sl(2) we have Gi ≃ V λ+µ−2i and F µ(V µ) = V µ.
1639
+ Let U −
1640
+ 2
1641
+ ⊂ U(ˆg2) be the A-span of Poincar´e-Birkhoff-Witt monomials of the
1642
+ form (x1wa1) · · · (xkwak) with xi ∈ g and ai < 0. This is a complement of U(ˆg+
1643
+ 2 )
1644
+ in U(ˆg2), so that in particular we have
1645
+ Vλ,µ
1646
+ 2
1647
+ = U −
1648
+ 2 ⊗C (V λ ⊗ V µ).
1649
+ Lemma 6.1. If λ ⩾ µ then
1650
+ �Vλ,µ
1651
+ 2
1652
+ =
1653
+ µ
1654
+
1655
+ i=0
1656
+ aiU −
1657
+ 2 ⊗C F i =
1658
+ µ
1659
+
1660
+ i=0
1661
+ aiU −
1662
+ 2 ⊗C Gi
1663
+ Proof. To understand the module �Vλ,µ
1664
+ 2
1665
+ we need to compute the ˆg+
1666
+ 2 -submodule of
1667
+ A⊗C V λ ⊗C V µ generated by 1⊗vλ ⊗vµ. Notice that every element of the form xg,
1668
+ with x ∈ g and g ∈ C[[t, s]] divisible by ts, acts trivially on A ⊗ V λ ⊗ V µ. Hence
1669
+ we need to understand the action of elements of the form
1670
+ z = x1 · · · xℓ · (y1t) · · · (ymt) · (vλ ⊗ vµ),
1671
+ with xi, yi ∈ g. Moreover, elements of g act in the standard way on the tensor
1672
+ product V λ ⊗ V µ, while elements of the form xt with x ∈ g act via −a(Id ⊗ x).
1673
+ This implies the lemma.
1674
+
1675
+ We now describe the specialisation of the module �Vλ,µ
1676
+ 2
1677
+ . We introduce the fol-
1678
+ lowing decreasing filtration of �Vλ,µ
1679
+ 2
1680
+ :
1681
+ Fi = �Vλ,µ
1682
+ 2
1683
+ ∩ aiVλ,µ
1684
+ 2
1685
+ .
1686
+ (6.1)
1687
+
1688
+ SEMI-INFINITE COHOMOLOGY OF WEYL MODULES
1689
+ 19
1690
+ By Lemma 6.1 we have the following description of the terms of this filtration as
1691
+ A-modules:
1692
+ Fi = aiU −
1693
+ 2 ⊗C F i ⊕
1694
+ µ
1695
+
1696
+ j=i+1
1697
+ ajU −
1698
+ 2 ⊗C Gj
1699
+ In particular we have F0 = �Vλ,µ
1700
+ 2
1701
+ , Fj = ajVλ,µ
1702
+ 2
1703
+ for j ⩾ µ.
1704
+ Lemma 6.2.
1705
+ a) Let ui ∈ Gi be the highest weight vector and set ˜wi = aiui ∈
1706
+ �Vλ,µ
1707
+ 2
1708
+ . Then ˜wi ∈ �Vλ,µ
1709
+ 2
1710
+ and ai−1ui /∈ �Vλ,µ
1711
+ 2
1712
+ .
1713
+ b) There is an isomorphism of ˆU1-modules
1714
+ Fi + a�Vλ,µ
1715
+ 2
1716
+ a�Vλ,µ
1717
+ 2
1718
+
1719
+ µ
1720
+
1721
+ j=i
1722
+ Vλ+µ−2j
1723
+ 1
1724
+ .
1725
+ The quotient Fi+a�Vλ,µ
1726
+ 2
1727
+ a�Vλ,µ
1728
+ 2
1729
+ is generated as a ˆU1-module by the classes of ˜wi, . . . , ˜wµ.
1730
+ In particular �Vλ,µ
1731
+ 2
1732
+ /a�Vλ,µ
1733
+ 2
1734
+ ≃ Wλ,µ
1735
+ 1
1736
+ is generated by ˜w0, . . . , ˜wµ.
1737
+ Proof. The first claim follows from Lemma 6.1.
1738
+ We prove part b) by decreasing induction on i. By Lemma 6.1, for i > µ the
1739
+ quotient is zero and the claim is true. For i ⩽ µ, consider the map
1740
+ U −
1741
+ 2 ⊗ Gi −→
1742
+ Fi + ai+1Vλ,µ
1743
+ 2
1744
+ ai+1Vλ,µ
1745
+ 2
1746
+ + Fi ∩ a�Vλ,µ
1747
+ 2
1748
+
1749
+ (Fi + a�Vλ,µ
1750
+ 2
1751
+ )/a�Vλ,µ
1752
+ 2
1753
+ (Fi+1 + a�Vλ,µ
1754
+ 2
1755
+ )/a�Vλ,µ
1756
+ 2
1757
+ sending an element u ⊗ v to the class of aiu ⊗ v. This map induces an isomorphism
1758
+ U −
1759
+ 2
1760
+ aU −
1761
+ 2
1762
+ ⊗ Gi ≃
1763
+ (Fi + a�Vλ,µ
1764
+ 2
1765
+ )/a�Vλ,µ
1766
+ 2
1767
+ (Fi+1 + a�Vλ,µ
1768
+ 2
1769
+ )/a�Vλ,µ
1770
+ 2
1771
+ .
1772
+ (6.2)
1773
+ Moreover, notice that
1774
+ U−
1775
+ 2
1776
+ aU−
1777
+ 2 ⊗ Gi ≃ U −
1778
+ 1 ⊗ Gi, where U −
1779
+ 1 = U(t−1g[t−1]) ⊂ U(ˆg1) =
1780
+ U1, and that U −
1781
+ 1 ⊗ Gi has a natural structure of U1-module, as it can be identified
1782
+ with Vλ+µ−2i
1783
+ 1
1784
+ . With this U1-action, the isomorphism 6.2 is U1-equivariant. Now
1785
+ the claim follows by the inductive hypothesis, combined with the fact that there
1786
+ are no nontrivial extensions between modules Vν
1787
+ 1 and Vν′
1788
+ 1 if ν ̸= ν′ and that the
1789
+ highest weight vector of V ν generates the module Vν
1790
+ 1 as an U1-module.
1791
+
1792
+ Notice that, although the specialisations at a = 0 of Vλ,µ
1793
+ 2
1794
+ and �Vλ,µ
1795
+ 2
1796
+ are iso-
1797
+ morphic, the specialisation of �Vλ,µ
1798
+ 2
1799
+ , is generated by vλ ⊗ vµ while in the first case
1800
+ this vector generates the submodule Vλ+µ
1801
+ 1
1802
+ .
1803
+ As a corollary, we get the following result.
1804
+ Proposition 6.3. The following hold:
1805
+ a) Ψn(�Vλ,µ
1806
+ 2
1807
+ ) = 0 for n ̸= 0.
1808
+ b) The inclusion of �Vλ,µ
1809
+ 2
1810
+ in Vλ,µ
1811
+ 2
1812
+ induces isomorphisms
1813
+ Ψ0(�Vλ,µ
1814
+ 2
1815
+ )[a−1] ≃ Ψ0(Vλ,µ
1816
+ 2
1817
+ )[a−1] ≃ Ψ0(Vλ
1818
+ t ) ⊗Q Ψ0(Vµ
1819
+ s ).
1820
+ c) Ψ0(�Vλ,µ
1821
+ 2
1822
+ ) is torsion-free with respect to the action of A, and the natural pro-
1823
+ jection induces isomorphisms
1824
+ Ψ0(�Vλ,µ
1825
+ 2
1826
+ )
1827
+ aΨ0(�Vλ,µ
1828
+ 2
1829
+ )
1830
+ ≃ Ψ0
1831
+ � �Vλ,µ
1832
+ 2
1833
+ a�Vλ,µ
1834
+ 2
1835
+
1836
+ ≃ Ψ0(Wλ,µ
1837
+ 1
1838
+ ).
1839
+ Proof. We use the filtration introduced in Equation (6.1). Notice that
1840
+ Fi
1841
+ Fi+1
1842
+ =
1843
+ aiU −
1844
+ 2 ⊗ F i
1845
+ ai+1U −
1846
+ 2 ⊗ F i ≃ U −
1847
+ 1 ⊗C F i ≃ Indˆg1
1848
+ ˆg+
1849
+ 1 F i,
1850
+
1851
+ 20
1852
+ FORTUNA, LOMBARDO, MAFFEI, MELANI
1853
+ where we consider F i as a ˆg+
1854
+ 1 -module on which tg[t] acts trivially. Notice that
1855
+ Indˆg1
1856
+ ˆg+
1857
+ 1 F i is a sum of modules of the form Vν
1858
+ 1, hence in particular has trivial non-
1859
+ zero cohomology.
1860
+ Hence, arguing by decreasing induction on i, starting from i = µ, it follows that
1861
+ Fi has trivial semi-infinite cohomology in degree different from zero. Indeed for
1862
+ i = µ we have Fµ = aµVλ,µ
1863
+ 2
1864
+ ≃ Vλ,µ
1865
+ 2
1866
+ and this is the content of Theorem 4.9. For
1867
+ i = 0 this implies claim a).
1868
+ Part b) follows from the fact that semi-infinite cohomology commutes with local-
1869
+ ization (see Lemma 3.3) combined with the isomorphism �Vλ,µ
1870
+ 2
1871
+ [a−1] = Vλ,µ
1872
+ 2
1873
+ [a−1] ≃
1874
+
1875
+ t ⊗Q Vµ
1876
+ s .
1877
+ To prove c), consider the exact sequence
1878
+ 0
1879
+ � �Vλ,µ
1880
+ 2
1881
+ ·a
1882
+ � �Vλ,µ
1883
+ 2
1884
+
1885
+ �Vλ,µ
1886
+ 2
1887
+ a�Vλ,µ
1888
+ 2
1889
+ � 0
1890
+ By Lemma 6.2, the last module in this sequence is isomorphic to Wλ,µ
1891
+ 1
1892
+ . In par-
1893
+ ticular, the semi-infinite cohomology groups Ψn of the modules appearing in this
1894
+ sequence are zero for n ̸= 0, and c) follows.
1895
+
1896
+ To prove that the semi-infinite cohomology of �Vλ,µ
1897
+ 2
1898
+ is isomorphic to Zλ,µ
1899
+ 2
1900
+ we will
1901
+ use the action of a particular central element in Z2. Recall from [4] the definition
1902
+ of the 2-Sugawara operator
1903
+ S(2)
1904
+ 1/2 =
1905
+
1906
+ n∈ 1
1907
+ 2 Z,b
1908
+ : (Jbwn)(Jbz−n) :
1909
+ (6.3)
1910
+ where J1, J2, J3 are the basis elements e, h, f and J1, J2, J3 are the dual basis
1911
+ elements f, h/2, e. As proved in [4], the element S(2)
1912
+ 1/2 is central. Its specialisation
1913
+ is the Sugawara operator
1914
+ S(1)
1915
+ 1
1916
+ =
1917
+
1918
+ n∈Z,b
1919
+ : (Jbtn) (Jbt−n) :
1920
+ (6.4)
1921
+ which is an element of Z1. It is straightforward to check that the action of S(1)
1922
+ 1
1923
+ on
1924
+ the Weyl module Vν
1925
+ 1 is given by multiplication by ν(ν + 1).
1926
+ Lemma 6.4. The element ˆwℓ =
1927
+
1928
+ et−1�ℓ ˜wℓ belongs to Z2 · (vλ ⊗ vµ) + a�Vλ,µ
1929
+ 2
1930
+ for
1931
+ ℓ = 0, . . . , µ,
1932
+ Proof. We notice first that the element vλ ⊗ f ℓvµ belongs to F ℓ \ F ℓ−1 and has
1933
+ weight λ + µ − 2ℓ. Hence, up to a non-zero constant we have vλ ⊗ f ℓvµ = uℓ + u′
1934
+ ℓ,
1935
+ where we recall that uℓ is the highest weight vector in Gℓ ≃ V λ+µ−2ℓ ⊂ V λ ⊗ V µ
1936
+ and u′
1937
+ ℓ ∈ F ℓ−1. In particular, recall from Lemma 6.2 that aℓ−1F ℓ ⊂ �Vλ,µ
1938
+ 2
1939
+ , hence
1940
+ aℓ �
1941
+ et−1�ℓ vλ ⊗ f ℓvµ =
1942
+
1943
+ et−1�ℓ ˜wℓ +
1944
+
1945
+ et−1�ℓ (aℓu′
1946
+ ℓ) ≡
1947
+
1948
+ et−1�ℓ ˜wℓ
1949
+ mod a�Vλ,µ
1950
+ 2
1951
+ .
1952
+ Hence, the lemma is equivalent to the fact that ˆwℓ = aℓ �
1953
+ et−1�ℓ vλ ⊗ f ℓvµ is in
1954
+ Z2 · vλ ⊗ vµ + a�Vλ,µ
1955
+ 2
1956
+ . We prove this statement by induction on ℓ. For ℓ = 0 it is
1957
+ trivially true. Now assume ˆwℓ is in Z2 · vλ ⊗ vµ + a�Vλ,µ
1958
+ 2
1959
+ . We compute S(2)
1960
+ 1/2( ˆwℓ). In
1961
+ order to do this, we notice that the action of xtisj on �Vλ,µ
1962
+ 2
1963
+ /a�Vλ,µ
1964
+ 2
1965
+ is equal to the
1966
+ action of xti+j on the same module, and that vλ ⊗ e f ℓvµ is in F ℓ−1. We have
1967
+ S(2)
1968
+ 1/2 ˆwℓ = 2
1969
+
1970
+ n>0
1971
+ et−n · ftn · ˆwℓ + 2
1972
+
1973
+ n>0
1974
+ ft−n · etn · ˆwℓ +
1975
+
1976
+ n>0
1977
+ ht−n · htn · ˆwℓ
1978
+ + e · f · ˆwℓ + e · f · ˆwℓ + 1
1979
+ 2h · h · ˆwℓ.
1980
+
1981
+ SEMI-INFINITE COHOMOLOGY OF WEYL MODULES
1982
+ 21
1983
+ In the second infinite sum above, the element etn commutes with et−1, hence etn ·
1984
+ ˆwℓ ∈ a�Vλ,µ
1985
+ 2
1986
+ for all n > 0. The summands of the third series are of the form
1987
+ htn · (et−1)ℓ · ˆwℓ = (et−1)ℓhtn · ˆwℓ + 2ℓ(et−1)ℓ−1etn−1 · ˆwℓ,
1988
+ hence they vanish for n ⩾ 3, while for n = 1, 2 they are easily checked to be elements
1989
+ of a�Vλ,µ
1990
+ 2
1991
+ . The summands of the first series are given by
1992
+ ftn·(et−1)ℓ· ˆwℓ = (et−1)ℓftn· ˆwℓ−ℓ(et−1)ℓ−1htn−1· ˆwℓ−ℓ(ℓ−1)(et−1)ℓ−2etn−2· ˆwℓ,
1993
+ and all terms are zero or in a�Vλ,µ
1994
+ 2
1995
+ but for the case n = 1, for which we get
1996
+ (et−1) · (ft) · (et−1)ℓ · ˆwℓ = aℓ+1(et−1)ℓ+1 · (vλ ⊗ f ℓ+1vµ)
1997
+ − ℓ(et−1)ℓh · (vλ ⊗ f ℓvµ) − ℓ(ℓ − 1)(et−1)ℓ · ˆwℓ = ˆwℓ+1 + K1 ˆwℓ
1998
+ for some constant K1. Finally, e·f · ˆwℓ+e·f · ˆwℓ+ 1
1999
+ 2h·h· ˆwℓ belongs to K2 ˆwℓ+a�Vλ,µ
2000
+ 2
2001
+ for some constant K2. Hence we get
2002
+ S(2)
2003
+ 1/2 ˆwℓ ≡ ˆwℓ+1 + K ˆwℓ mod a�Vλ,µ
2004
+ 2
2005
+ for some constant K, proving our claim.
2006
+
2007
+ We now prove that the zero-th semi-infinite cohomology of the module �Vλ,µ
2008
+ 2
2009
+ is
2010
+ isomorphic to Zλ,µ
2011
+ 2
2012
+ .
2013
+ Theorem 6.5. For g = sl(2) the map Φ : Zλ,µ
2014
+ 2
2015
+ −→ Ψ0��Vλ,µ
2016
+ 2
2017
+
2018
+ given by Φ(z) =
2019
+ z · [vλ ⊗ vµ] is an isomorphism.
2020
+ Proof. By [4], Theorem 6.4, the action of Z2 on Vλ,µ
2021
+ 2
2022
+ , hence on �Vλ,µ
2023
+ 2
2024
+ , factors through
2025
+ Zλ,µ
2026
+ 2
2027
+ . Moreover vλ ⊗ vµ is a cycle, so the map Φ is well defined. Since we know
2028
+ that both modules are torsion-free, to prove that Φ is an isomorphism it suffices to
2029
+ prove that the localization Φa and the specialisation Φ are isomorphisms.
2030
+ The fact that Φa is an isomorphism is the content of part b) of Proposition 6.3.
2031
+ We need to prove that Φ is an isomorphism. By Lemma 6.2, Proposition 6.3 and
2032
+ [4, Theorem 2.13] we have
2033
+ Zλ,µ
2034
+ 2
2035
+ aZλ,µ
2036
+ 2
2037
+
2038
+ µ
2039
+
2040
+ i=0
2041
+ Zλ+µ−2i
2042
+ 1
2043
+ and
2044
+ Ψ0(�Vλ,µ
2045
+ 2
2046
+ )
2047
+ aΨ0(�Vλ,µ
2048
+ 2
2049
+ )
2050
+
2051
+ µ
2052
+
2053
+ i=0
2054
+ Ψ0(Vλ+µ−2i
2055
+ 1
2056
+ ).
2057
+ In particular, by Theorem 4.1 these two Z1-modules are isomorphic, but we need to
2058
+ prove that our specific map Φ provides an isomorphism between them. By Remark
2059
+ 5.2 it is enough to prove that Φ is surjective. We prove that the image of Φ contains
2060
+ Ψ0(Fℓ+a�Vλ,µ
2061
+ 2
2062
+ /a�Vλ,µ
2063
+ 2
2064
+ ) arguing by reverse induction on ℓ. For ℓ = 0 we get our claim.
2065
+ For ℓ > µ there is nothing to prove. Now assume ℓ ⩽ µ. Consider again the exact
2066
+ sequence
2067
+ 0
2068
+ � Fℓ+1+a�Vλ,µ
2069
+ 2
2070
+ a�Vλ,µ
2071
+ 2
2072
+ � Fℓ+a�Vλ,µ
2073
+ 2
2074
+ a�Vλ,µ
2075
+ 2
2076
+ �aℓU −
2077
+ 2 ⊗C Gℓ
2078
+ �0.
2079
+ We know that the last module is isomorphic to
2080
+ aℓU −
2081
+ 2 ⊗C Gℓ ≃ Vλ+µ−2ℓ
2082
+ 1
2083
+ = Indˆg1
2084
+ ˆg+
2085
+ 1 (V λ+µ−2ℓ)
2086
+ and that it is generated by the element ˜wℓ ∈ aℓGℓ. Notice this sequence of Z1-
2087
+ modules splits by Remark 5.2.
2088
+ Taking semi-infinite cohomology we get a short
2089
+ exact sequence
2090
+ 0
2091
+ �Ψ0 �
2092
+ Fℓ+1+a�Vλ,µ
2093
+ 2
2094
+ a�Vλ,µ
2095
+ 2
2096
+
2097
+ �Ψ0 �
2098
+ Fℓ+a�Vλ,µ
2099
+ 2
2100
+ a�Vλ,µ
2101
+ 2
2102
+
2103
+ �Ψ0 �
2104
+ aℓU −
2105
+ 2 ⊗C Gℓ�
2106
+ �0.
2107
+ and we know that the last Z2-module is generated by ˜wℓ. Hence it is enough to
2108
+ prove that this element is in the image of Zλ,µ
2109
+ 2
2110
+ (vλ ⊗vµ) in Ψ0�
2111
+ �Vλ,µ
2112
+ 2
2113
+ /Fℓ+1 +a�Vλ,µ
2114
+ 2
2115
+
2116
+ .
2117
+
2118
+ 22
2119
+ FORTUNA, LOMBARDO, MAFFEI, MELANI
2120
+ By Lemma 6.4 we know that ˆwℓ is in this image. Now we prove that ˜wℓ and ˆwℓ
2121
+ define the same element in the semi-infinite cohomology of aℓU −
2122
+ 2 ⊗C Gℓ. This is a
2123
+ claim about the cohomology of the module Vν
2124
+ 1 for ν = λ + µ − 2ℓ. For any ν we
2125
+ prove that
2126
+
2127
+ et−1�hvν +
2128
+
2129
+ et−1�h−1vν is a coboundary. Indeed the boundary operator
2130
+ in the case of sl(2) is equal to
2131
+ d(1) = ψ∗ +
2132
+
2133
+ n∈Z
2134
+ (etn) ⊗ ψ∗t−1−n,
2135
+ so a simple computation shows
2136
+ d(1) ��
2137
+ et−1�h−1vν ⊗ (ψt−1)|0⟩Λ
2138
+
2139
+ =
2140
+
2141
+ et−1�h−1vν ⊗ |0⟩Λ +
2142
+
2143
+ et−1�hvν ⊗ |0⟩Λ,
2144
+ which implies our claim.
2145
+
2146
+ Recall that in [4] we computed the endomorphism ring of Vλ,µ
2147
+ 2
2148
+ , showing that it
2149
+ is isomorphic to Zλ,µ
2150
+ 2
2151
+ . We now prove the same result for the module �Vλ,µ
2152
+ 2
2153
+ .
2154
+ Proposition 6.6. The action of the center Z2 on �Vλ,µ
2155
+ 2
2156
+ induces an isomorphism
2157
+ Zλ,µ
2158
+ 2
2159
+ ≃ Endˆg2(�Vλ,µ
2160
+ 2
2161
+ ).
2162
+ Proof. We already recalled at the beginning of the proof of Theorem 6.5 that the
2163
+ action of Z2 on �Vλ,µ
2164
+ 2
2165
+ factors through Zλ,µ
2166
+ 2
2167
+ . We denote by α : Zλ,µ
2168
+ 2
2169
+ −→ End(�Vλ,µ
2170
+ 2
2171
+ )
2172
+ this action. Since both modules have no A-torsion, in order to prove that α is
2173
+ an isomorphism it suffices to show that its localization and its specialisation are
2174
+ isomorphisms.
2175
+ Moreover, since our modules are finitely generated and have no
2176
+ torsion we have
2177
+ Endˆg2
2178
+
2179
+ �Vλ,µ
2180
+ 2
2181
+
2182
+ [a−1] ≃ Endˆg2[a−1]
2183
+
2184
+ �Vλ,µ
2185
+ 2
2186
+ [a−1]
2187
+
2188
+ ≃ Endˆgt,s
2189
+
2190
+ Vλ ⊗Q Vµ
2191
+ s
2192
+
2193
+ ≃ Zλ
2194
+ t ⊗Q Zµ
2195
+ t ≃ Zλ,µ
2196
+ 2
2197
+ [a−1],
2198
+ hence the localization of α is an isomorphism.
2199
+ Finally, we prove that the specialisation of α is also an isomorphism. We have
2200
+ already recalled that by [4, Theorem 2.13] we have Zλ,µ
2201
+ 2
2202
+ /aZλ,µ
2203
+ 2
2204
+ ≃ �µ
2205
+ i=0 Zλ+µ−2i
2206
+ 1
2207
+ .
2208
+ Hence by Theorem 4.1 we have the following abstract isomorphisms of Z1-modules:
2209
+ Zλ,µ
2210
+ 2
2211
+ aZλ,µ
2212
+ 2
2213
+
2214
+ µ
2215
+
2216
+ i=0
2217
+ Zλ+µ−2i
2218
+ 1
2219
+
2220
+ µ
2221
+
2222
+ i=0
2223
+ Endˆg1(Vλ+µ−2i
2224
+ 1
2225
+ ).
2226
+ Moreover, since �Vλ,µ
2227
+ 2
2228
+ has no nontrivial A-torsion, by Lemma 6.2 and Remark 5.2
2229
+ part (1) we have the inclusion
2230
+ Endˆg1
2231
+
2232
+ �Vλ,µ
2233
+ 2
2234
+
2235
+ a Endˆg1
2236
+
2237
+ �Vλ,µ
2238
+ 2
2239
+ � ⊂ Endˆg1
2240
+ � �Vλ,µ
2241
+ 2
2242
+ a�Vλ,µ
2243
+ 2
2244
+
2245
+
2246
+ µ
2247
+
2248
+ i=0
2249
+ Endˆg1(Vλ+µ−2i
2250
+ 1
2251
+ ).
2252
+ Hence, composing the specialisation of the map α with this inclusion and the iso-
2253
+ morphisms above we get a Z1-equivariant endomorphism of �µ
2254
+ i=0 Zλ+µ−2i
2255
+ 1
2256
+ . Moreover,
2257
+ α(1) = 1, hence we conclude by Remark 5.2 (3) that the specialisation of α is also
2258
+ an isomorphism.
2259
+
2260
+ References
2261
+ [1] Casarin, L. A Feigin Frenkel theorem with n singularities. preprint, 2022.
2262
+ [2] Feigin, B., and Frenkel, E. Affine Kac-Moody algebras at the critical level and Gelfand-
2263
+ Diki˘ı algebras. In Infinite analysis, Part A, B (Kyoto, 1991), vol. 16 of Adv. Ser. Math. Phys.
2264
+ World Sci. Publ., River Edge, NJ, 1992, pp. 197–215.
2265
+ [3] Fortuna, G. The Beilinson-Bernstein Localization Theorem for the affine Grassmannian.
2266
+ MIT, PhD thesis, 2013.
2267
+
2268
+ SEMI-INFINITE COHOMOLOGY OF WEYL MODULES
2269
+ 23
2270
+ [4] Fortuna, G., Lombardo, D., Maffei, A., and Melani, V. Local opers with two singularities:
2271
+ the case of sl(2). Comm. Math. Phys., 394 (2022), 1303–1360.
2272
+ [5] Frenkel, E., and Ben-Zvi, D. Vertex algebras and algebraic curves, second ed., vol. 88
2273
+ of Mathematical Surveys and Monographs. American Mathematical Society, Providence, RI,
2274
+ 2004.
2275
+ [6] Frenkel, E., and Gaitsgory, D. Local geometric Langlands correspondence: the spherical
2276
+ case. In Algebraic analysis and around, vol. 54 of Adv. Stud. Pure Math. Math. Soc. Japan,
2277
+ Tokyo, 2009, pp. 167–186.
2278
+ [7] Frenkel, E., and Gaitsgory, D. Weyl modules and opers without monodromy. In Arithmetic
2279
+ and geometry around quantization, vol. 279 of Progr. Math. Birkh¨auser Boston, Boston, MA,
2280
+ 2010, pp. 101–121.
2281
+ E-mail addresses: [email protected], [email protected],
2282
2283
+
FNFQT4oBgHgl3EQfRTbc/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
GNE2T4oBgHgl3EQfTQeG/content/2301.03801v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3f7a1e6745a7b34087f541e54bf30149755c411136fe1478443c613abb79883
3
+ size 552359
GNE2T4oBgHgl3EQfTQeG/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2eb1c4314de870830ce6cb615b36dfcc809b55af17028a17a36344aafd24c03
3
+ size 1900589
GNE2T4oBgHgl3EQfTQeG/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:275349d6c2c75874893cc9023b50bc2aa01b7dea1c0b4c8734f7ad94e9aabf0a
3
+ size 77261
ItAyT4oBgHgl3EQfTPfP/content/tmp_files/2301.00103v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
ItAyT4oBgHgl3EQfTPfP/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
ItE3T4oBgHgl3EQfugv_/content/2301.04686v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06f8edc984a7800ddc8da18f1bcd21180a5bda813db78187da8bbf3f1d2f61fb
3
+ size 739404
ItE3T4oBgHgl3EQfugv_/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:005f49b3fbc786fb998e43e674b8ef60b6395903a3c0c99533a818467027a1c1
3
+ size 281608
J9E1T4oBgHgl3EQfsQWj/content/2301.03364v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b951c22a59e6b282f6ac0f031f4b5324ee17a3d4aba04df4c370a81a234d7811
3
+ size 3012552
J9E1T4oBgHgl3EQfsQWj/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23cafc9a73e4480228112ce6a7ec9a29264b48d1ff92d70bdf380ff31e09d3de
3
+ size 1507373