jackkuo commited on
Commit
7f63a41
·
verified ·
1 Parent(s): 3d3e4ef

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. -NE0T4oBgHgl3EQfxAEX/vector_store/index.faiss +3 -0
  2. .gitattributes +67 -0
  3. 09E4T4oBgHgl3EQfZwye/vector_store/index.faiss +3 -0
  4. 1NFLT4oBgHgl3EQfpi-7/content/2301.12136v1.pdf +3 -0
  5. 1NFLT4oBgHgl3EQfpi-7/vector_store/index.pkl +3 -0
  6. 39E2T4oBgHgl3EQfOAbJ/content/tmp_files/2301.03744v1.pdf.txt +1562 -0
  7. 39E2T4oBgHgl3EQfOAbJ/content/tmp_files/load_file.txt +0 -0
  8. 39FKT4oBgHgl3EQfRS0O/content/2301.11770v1.pdf +3 -0
  9. 39FKT4oBgHgl3EQfRS0O/vector_store/index.faiss +3 -0
  10. 39FKT4oBgHgl3EQfRS0O/vector_store/index.pkl +3 -0
  11. 3dFKT4oBgHgl3EQfQy0a/content/tmp_files/2301.11768v1.pdf.txt +0 -0
  12. 3dFKT4oBgHgl3EQfQy0a/content/tmp_files/load_file.txt +0 -0
  13. 4NE1T4oBgHgl3EQf6AXQ/content/tmp_files/2301.03519v1.pdf.txt +1374 -0
  14. 4NE1T4oBgHgl3EQf6AXQ/content/tmp_files/load_file.txt +0 -0
  15. 4dE1T4oBgHgl3EQf6QUq/content/tmp_files/2301.03520v1.pdf.txt +715 -0
  16. 4tAzT4oBgHgl3EQffvxD/content/tmp_files/2301.01456v1.pdf.txt +1639 -0
  17. 4tAzT4oBgHgl3EQffvxD/content/tmp_files/load_file.txt +0 -0
  18. 7tAyT4oBgHgl3EQf2_kh/content/2301.00759v1.pdf +3 -0
  19. 7tAyT4oBgHgl3EQf2_kh/vector_store/index.faiss +3 -0
  20. 7tAyT4oBgHgl3EQf2_kh/vector_store/index.pkl +3 -0
  21. 7tE1T4oBgHgl3EQfTwM_/content/tmp_files/2301.03081v1.pdf.txt +1214 -0
  22. 7tE1T4oBgHgl3EQfTwM_/content/tmp_files/load_file.txt +0 -0
  23. 89E3T4oBgHgl3EQfSAmG/content/2301.04428v1.pdf +3 -0
  24. 89E3T4oBgHgl3EQfSAmG/vector_store/index.faiss +3 -0
  25. 89E3T4oBgHgl3EQfSAmG/vector_store/index.pkl +3 -0
  26. 8dE3T4oBgHgl3EQfqgpk/content/2301.04652v1.pdf +3 -0
  27. 8dFLT4oBgHgl3EQfBS4r/content/tmp_files/2301.11969v1.pdf.txt +759 -0
  28. 8dFLT4oBgHgl3EQfBS4r/content/tmp_files/load_file.txt +0 -0
  29. A9AzT4oBgHgl3EQf__9t/content/tmp_files/2301.01956v1.pdf.txt +1542 -0
  30. A9AzT4oBgHgl3EQf__9t/content/tmp_files/load_file.txt +0 -0
  31. BNFIT4oBgHgl3EQf_iwr/content/2301.11415v1.pdf +3 -0
  32. BNFIT4oBgHgl3EQf_iwr/vector_store/index.faiss +3 -0
  33. BNFIT4oBgHgl3EQf_iwr/vector_store/index.pkl +3 -0
  34. BtAzT4oBgHgl3EQfGPuX/content/2301.01025v1.pdf +3 -0
  35. BtAzT4oBgHgl3EQfGPuX/vector_store/index.pkl +3 -0
  36. CtE3T4oBgHgl3EQfUgpf/vector_store/index.pkl +3 -0
  37. D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf +3 -0
  38. DdE1T4oBgHgl3EQfEAP6/content/tmp_files/2301.02886v1.pdf.txt +710 -0
  39. DdE1T4oBgHgl3EQfEAP6/content/tmp_files/load_file.txt +313 -0
  40. DdE3T4oBgHgl3EQfUwqw/content/tmp_files/2301.04454v1.pdf.txt +593 -0
  41. DdE3T4oBgHgl3EQfUwqw/content/tmp_files/load_file.txt +493 -0
  42. DdFJT4oBgHgl3EQfBSxP/content/tmp_files/2301.11424v1.pdf.txt +0 -0
  43. DdFJT4oBgHgl3EQfBSxP/content/tmp_files/load_file.txt +0 -0
  44. E9E4T4oBgHgl3EQffg1w/content/2301.05108v1.pdf +3 -0
  45. E9E4T4oBgHgl3EQffg1w/vector_store/index.faiss +3 -0
  46. E9E4T4oBgHgl3EQffg1w/vector_store/index.pkl +3 -0
  47. ENA0T4oBgHgl3EQfA_81/vector_store/index.pkl +3 -0
  48. EdA0T4oBgHgl3EQfA_-G/content/2301.01970v1.pdf +3 -0
  49. FNE5T4oBgHgl3EQfVQ8Q/content/2301.05549v1.pdf +3 -0
  50. FNE5T4oBgHgl3EQfVQ8Q/vector_store/index.faiss +3 -0
-NE0T4oBgHgl3EQfxAEX/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b787ec07c304c71a30da8725036f11da890551c8fca6093d322063f6ea2e015d
3
+ size 5046317
.gitattributes CHANGED
@@ -917,3 +917,70 @@ v9E2T4oBgHgl3EQfggeM/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -tex
917
  q9FAT4oBgHgl3EQffR3S/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
918
  j9FRT4oBgHgl3EQfWzfr/content/2301.13545v1.pdf filter=lfs diff=lfs merge=lfs -text
919
  j9FRT4oBgHgl3EQfWzfr/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
917
  q9FAT4oBgHgl3EQffR3S/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
918
  j9FRT4oBgHgl3EQfWzfr/content/2301.13545v1.pdf filter=lfs diff=lfs merge=lfs -text
919
  j9FRT4oBgHgl3EQfWzfr/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
920
+ udE3T4oBgHgl3EQf-Auh/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
921
+ SNE3T4oBgHgl3EQfDglb/content/2301.04287v1.pdf filter=lfs diff=lfs merge=lfs -text
922
+ MdE0T4oBgHgl3EQfSwDa/content/2301.02228v1.pdf filter=lfs diff=lfs merge=lfs -text
923
+ hNAzT4oBgHgl3EQfMvs5/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
924
+ GtE1T4oBgHgl3EQfFQMH/content/2301.02899v1.pdf filter=lfs diff=lfs merge=lfs -text
925
+ MdE0T4oBgHgl3EQfSwDa/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
926
+ xdAzT4oBgHgl3EQf7v58/content/2301.01894v1.pdf filter=lfs diff=lfs merge=lfs -text
927
+ BNFIT4oBgHgl3EQf_iwr/content/2301.11415v1.pdf filter=lfs diff=lfs merge=lfs -text
928
+ BNFIT4oBgHgl3EQf_iwr/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
929
+ FNE5T4oBgHgl3EQfVQ8Q/content/2301.05549v1.pdf filter=lfs diff=lfs merge=lfs -text
930
+ L9FAT4oBgHgl3EQfxB4e/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
931
+ D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf filter=lfs diff=lfs merge=lfs -text
932
+ wNE3T4oBgHgl3EQf-wup/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
933
+ GtE4T4oBgHgl3EQfgQ2M/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
934
+ GtE1T4oBgHgl3EQfFQMH/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
935
+ FNE5T4oBgHgl3EQfVQ8Q/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
936
+ -NE0T4oBgHgl3EQfxAEX/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
937
+ 39FKT4oBgHgl3EQfRS0O/content/2301.11770v1.pdf filter=lfs diff=lfs merge=lfs -text
938
+ LNE0T4oBgHgl3EQfiwE-/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
939
+ 8dE3T4oBgHgl3EQfqgpk/content/2301.04652v1.pdf filter=lfs diff=lfs merge=lfs -text
940
+ PtE2T4oBgHgl3EQfVgea/content/2301.03824v1.pdf filter=lfs diff=lfs merge=lfs -text
941
+ wNE0T4oBgHgl3EQf-AKd/content/2301.02809v1.pdf filter=lfs diff=lfs merge=lfs -text
942
+ 09E4T4oBgHgl3EQfZwye/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
943
+ rNE3T4oBgHgl3EQfMQnG/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
944
+ Q9E4T4oBgHgl3EQflA2r/content/2301.05156v1.pdf filter=lfs diff=lfs merge=lfs -text
945
+ ydAzT4oBgHgl3EQftP1k/content/2301.01672v1.pdf filter=lfs diff=lfs merge=lfs -text
946
+ SNE3T4oBgHgl3EQfDglb/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
947
+ ydAzT4oBgHgl3EQftP1k/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
948
+ 39FKT4oBgHgl3EQfRS0O/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
949
+ xdAzT4oBgHgl3EQf7v58/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
950
+ NdFJT4oBgHgl3EQfGyyu/content/2301.11449v1.pdf filter=lfs diff=lfs merge=lfs -text
951
+ wNE0T4oBgHgl3EQf-AKd/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
952
+ NdFJT4oBgHgl3EQfGyyu/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
953
+ LNE0T4oBgHgl3EQfiwE-/content/2301.02449v1.pdf filter=lfs diff=lfs merge=lfs -text
954
+ vdFKT4oBgHgl3EQf4C7n/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
955
+ Q9E4T4oBgHgl3EQflA2r/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
956
+ 89E3T4oBgHgl3EQfSAmG/content/2301.04428v1.pdf filter=lfs diff=lfs merge=lfs -text
957
+ E9E4T4oBgHgl3EQffg1w/content/2301.05108v1.pdf filter=lfs diff=lfs merge=lfs -text
958
+ EdA0T4oBgHgl3EQfA_-G/content/2301.01970v1.pdf filter=lfs diff=lfs merge=lfs -text
959
+ E9E4T4oBgHgl3EQffg1w/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
960
+ hNE0T4oBgHgl3EQfXwAk/content/2301.02296v1.pdf filter=lfs diff=lfs merge=lfs -text
961
+ 7tAyT4oBgHgl3EQf2_kh/content/2301.00759v1.pdf filter=lfs diff=lfs merge=lfs -text
962
+ hNE0T4oBgHgl3EQfXwAk/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
963
+ bNFJT4oBgHgl3EQf8y1j/content/2301.11685v1.pdf filter=lfs diff=lfs merge=lfs -text
964
+ L9E3T4oBgHgl3EQfYgo-/content/2301.04488v1.pdf filter=lfs diff=lfs merge=lfs -text
965
+ fdE3T4oBgHgl3EQf3Qt-/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
966
+ GtE4T4oBgHgl3EQfgQ2M/content/2301.05115v1.pdf filter=lfs diff=lfs merge=lfs -text
967
+ ZNFPT4oBgHgl3EQfuTUu/content/2301.13155v1.pdf filter=lfs diff=lfs merge=lfs -text
968
+ VNAyT4oBgHgl3EQfhfi9/content/2301.00379v1.pdf filter=lfs diff=lfs merge=lfs -text
969
+ ZtFPT4oBgHgl3EQfuzVj/content/2301.13157v1.pdf filter=lfs diff=lfs merge=lfs -text
970
+ bNFJT4oBgHgl3EQf8y1j/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
971
+ o9FKT4oBgHgl3EQfyC7z/content/2301.11906v1.pdf filter=lfs diff=lfs merge=lfs -text
972
+ 1NFLT4oBgHgl3EQfpi-7/content/2301.12136v1.pdf filter=lfs diff=lfs merge=lfs -text
973
+ PtE2T4oBgHgl3EQfVgea/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
974
+ ctFKT4oBgHgl3EQfqC4I/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
975
+ ctFKT4oBgHgl3EQfqC4I/content/2301.11872v1.pdf filter=lfs diff=lfs merge=lfs -text
976
+ 89E3T4oBgHgl3EQfSAmG/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
977
+ ZNFPT4oBgHgl3EQfuTUu/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
978
+ o9FKT4oBgHgl3EQfyC7z/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
979
+ vdFKT4oBgHgl3EQf4C7n/content/2301.11932v1.pdf filter=lfs diff=lfs merge=lfs -text
980
+ XNFJT4oBgHgl3EQf5S05/content/2301.11669v1.pdf filter=lfs diff=lfs merge=lfs -text
981
+ 7tAyT4oBgHgl3EQf2_kh/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
982
+ PNE2T4oBgHgl3EQfVQfK/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
983
+ BtAzT4oBgHgl3EQfGPuX/content/2301.01025v1.pdf filter=lfs diff=lfs merge=lfs -text
984
+ xtE3T4oBgHgl3EQf_gt3/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
985
+ uNAzT4oBgHgl3EQfsP0n/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
986
+ o9FPT4oBgHgl3EQfLTSh/content/2301.13022v1.pdf filter=lfs diff=lfs merge=lfs -text
09E4T4oBgHgl3EQfZwye/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76b97ad6318f903acd11413d5c58a0409c30f0703d909050f30bb62264185586
3
+ size 4587565
1NFLT4oBgHgl3EQfpi-7/content/2301.12136v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:541e7e756342fdb441c7d5875b0bc9745aba1d7b427745a1048103fc4b61bbf0
3
+ size 1346695
1NFLT4oBgHgl3EQfpi-7/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a37deac867f1802fccf0869fd5f3c2956ce7717e7ed139fdc8540cf2dfe29a6
3
+ size 167702
39E2T4oBgHgl3EQfOAbJ/content/tmp_files/2301.03744v1.pdf.txt ADDED
@@ -0,0 +1,1562 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Inflation in Weyl Scaling Invariant Gravity with R3 Extensions
2
+ Qing-Yang Wanga, Yong Tanga,b,c,d, and Yue-Liang Wua,b,c,e
3
+ aUniversity of Chinese Academy of Sciences (UCAS), Beijing 100049, China
4
+ bSchool of Fundamental Physics and Mathematical Sciences,
5
+ Hangzhou Institute for Advanced Study, UCAS, Hangzhou 310024, China
6
+ cInternational Center for Theoretical Physics Asia-Pacific, Beijing/Hangzhou, China
7
+ dNational Astronomical Observatories, Chinese Academy of Sciences, Beijing 100101, China
8
+ eInstitute of Theoretical Physics, Chinese Academy of Sciences, Beijing 100190, China
9
+ (Dated: January 11, 2023)
10
+ Abstract
11
+ The cosmological observations of cosmic microwave background and large-scale structure indicate
12
+ that our universe has a nearly scaling invariant power spectrum of the primordial perturbation.
13
+ However, the exact origin for this primordial spectrum is still unclear.
14
+ Here, we propose the
15
+ Weyl scaling invariant R2 + R3 gravity that gives rise to inflation that is responsible for the
16
+ primordial perturbation in the early universe. We develop both analytic and numerical treatments
17
+ on inflationary observables, and find this model gives a distinctive scalar potential that can support
18
+ two different patterns of inflation. The first one is similar to that occurs in the pure R2 model,
19
+ but with a wide range of tensor-to-scalar ratio r from O(10−4) to O(10−2). The other one is a new
20
+ situation with not only slow-roll inflation but also a short stage of oscillation-induced accelerating
21
+ expansion. Both patterns of inflation have viable parameter spaces that can be probed by future
22
+ experiments on cosmic microwave background and primordial gravitational waves.
23
+ 1
24
+ arXiv:2301.03744v1 [astro-ph.CO] 10 Jan 2023
25
+
26
+ I.
27
+ INTRODUCTION
28
+ Inflation is a hypothetical epoch of exponential expansion introduced in the very early
29
+ universe to solve the cosmological horizon and flatness problems [1, 2]. It is also a reasonable
30
+ scheme to explain the origin of primordial density perturbations, which plays the role of
31
+ the seeds that formed the structure of current universe [3]. In recent years, the precise
32
+ measurement of cosmic microwave background (CMB) presents us with an almost scale
33
+ invariant spectrum of primordial perturbations [4]. This result is usually explained by an
34
+ approximate de Sitter spacetime of the very early universe [5–9]. Moreover, it is theoretically
35
+ explored that there is a more profound and basic principle behind the phenomenon, namely,
36
+ local Weyl scaling invariance of the universe. This symmetry is first proposed by H. Weyl in
37
+ the attempt of understanding gravity and electromagnetism in a unified framework [10, 11],
38
+ and after a century of development, it has been applied extensively to particle physics,
39
+ cosmology [12–30] and gauge theory of gravity [31–34].
40
+ Lately, inflation in the Weyl scaling invariant theory of gravity, especially induced by
41
+ a quadratic curvature term R2, has been of many concern [35–45]. Comparing with the
42
+ conventional R2 model, which is also called Starobinsky model [46–49], the scaling invariant
43
+ version not only allows a viable inflation scenario with good observational agreement, but
44
+ also provides a framework to comprehend another fundamental puzzles, such as hierarchy
45
+ problem [37, 40, 50] and dark matter candidates [41, 45].
46
+ However, inflation with only quadratic scalar curvature might be just a simplistic scenario.
47
+ From the viewpoint of effective field theory, any higher-order curvature effects may exist and
48
+ play a role in the early universe. Hence it is reasonable to evaluate their impacts on inflation.
49
+ Generally, the extensions with high-order tensors, like RµνRµν or RµνρσRµνρσ, can result in
50
+ unacceptable ghost degrees of freedom [51], while the terms of arbitrary functions of the
51
+ Ricci scalar are known to be safe. Therefore, in this paper, we consider a minimal extension
52
+ of Ricci scalar beyond the R2 model with Weyl scaling invariance, namely a cubic term
53
+ coupled with an extra scalar field as denominator R3/ϕ2. We will show that even if this
54
+ term is extremely small, it will have an essential impact on inflation, which even open up a
55
+ completely different inflationary scenario from Weyl R2 and conventional R2 + R3 models.
56
+ The paper is organized as follows. In Sec. II, we develop the analytic formalism of Weyl
57
+ R2 + R3 model and derive the effective scalar potential. We show that in some cases, the
58
+ 2
59
+
60
+ potential has two different kinds of global minima, leading to two distinctive inflationary pat-
61
+ terns. In Sec. III, we investigate the inflation in the pattern of evolving to the side minimum.
62
+ We calculate the spectral index ns and tensor-to-scalar ratio r of the inflationary perturba-
63
+ tions, and give the preferred parameter space allowed by the latest observations. Analytical
64
+ treatments are developed for more transparent, physical understanding of the asymptotic
65
+ behaviors. Then in Sec. IV, we investigate the pattern of evolving to the center minimum.
66
+ A special process called “oscillating inflation” is considered in detail. Finally, conclusions
67
+ are given in Sec. V. We adopt the following conventions: metric ηµν = (−1, +1, +1, +1),
68
+ natural unit ℏ = c = 1 and MP ≡ 1/
69
+
70
+ 8πG = 2.435 × 1018 GeV = 1.
71
+ II.
72
+ WEYL SCALING INVARIANT R2 + R3 MODEL
73
+ We start with the following Lagrangian for metric field gµν, scalar field ϕ, and Weyl gauge
74
+ field Wµ ≡ gWwµ with local scaling symmetry
75
+ L
76
+ √−g = 1
77
+ 2
78
+
79
+ ϕ2 ˆR + α ˆR2 + β
80
+ ϕ2 ˆR3
81
+
82
+ − ζ
83
+ 2DµϕDµϕ −
84
+ 1
85
+ 4g2
86
+ W
87
+ FµνF µν.
88
+ (1)
89
+ Here g is the determinant of metric, α, β and ζ are constant parameters, Dµ = ∂µ − Wµ
90
+ is the covariant derivative associated with scaling symmetry, gW is the coupling constant,
91
+ Fµν ≡ ∂µWν − ∂νWµ defines the invariant field strength of Wµ, and ˆR is the Ricci scalar
92
+ defined by the local scaling invariant connection
93
+ ˆΓρ
94
+ µν = 1
95
+ 2gρσ [(∂µ + 2Wµ)gσν + (∂ν + 2Wν)gµσ − (∂σ + 2Wσ)gµν] .
96
+ (2)
97
+ Explicit calculation shows the relation between ˆR and usual R defined by metric field gµν,
98
+ ˆR = R − 6WµW µ −
99
+ 6
100
+ √−g∂µ(√−gW µ).
101
+ (3)
102
+ It is straightforward to verify the invariance of Eq. (1) under the following Weyl scaling
103
+ transformation
104
+ metric : gµν → g′
105
+ µν = f 2(x)gµν,
106
+ scalar : φ → φ′ = f −1(x)φ,
107
+ Ricci scalar :
108
+ ˆR → ˆR′ = f −2(x) ˆR,
109
+ Weyl vector : Wµ → W ′
110
+ µ = Wµ − ∂µ ln f(x),
111
+ (4)
112
+ where f(x) is an arbitrary positive function.
113
+ 3
114
+
115
+ The purpose to explore the Lagrangian in Eq. (1) is two-fold.
116
+ Theoretically, such a
117
+ ˆR3 term constitutes as a simple extension of the ˆR2 theory, motivated from perspective of
118
+ effective field theories and also quantum loop corrections in more fundamental theories [31–
119
+ 34]. Phenomenologically, it is worthwhile to explore how such a term would modify the
120
+ cosmological observations related to inflation, and evaluate the likelihood and robustness of
121
+ the predictions in the lowest-order theories.
122
+ A.
123
+ Formalism in Einstein frame
124
+ General f(R) gravity is equivalent to the Einstein gravity with a scalar field [52, 53]. In
125
+ Ref. [41], we have extended the proof in general scaling invariant F( ˆR, ϕ) gravity. We can
126
+ explicitly show that by introducing an auxiliary scalar field χ and rewrite the high-order
127
+ curvature terms as
128
+ F( ˆR, ϕ) ≡ ϕ2 ˆR + α ˆR2 + β
129
+ ϕ2 ˆR3 = F ˆR( ˆR → χ2, ϕ)( ˆR − χ2) + F( ˆR → χ2, ϕ).
130
+ (5)
131
+ Here F ˆR denotes the derivative over ˆR, F ˆR = ∂F( ˆR, ϕ)/∂ ˆR. We can verify that the equiv-
132
+ alence relation χ2 = ˆR can be obtained from the Euler-Lagrange equation, δL
133
+ δχ = 0. Substi-
134
+ tuting Eq. (5) into Eq. (1), we find
135
+ L
136
+ √−g = 1
137
+ 2
138
+
139
+ ϕ2 + 2αχ2 + 3β
140
+ ϕ2 χ4
141
+
142
+ ˆR − 1
143
+ 2
144
+
145
+ αχ4 + 2β
146
+ ϕ2 χ6
147
+
148
+ − ζ
149
+ 2DµϕDµϕ −
150
+ 1
151
+ 4g2
152
+ W
153
+ FµνF µν.
154
+ (6)
155
+ Now we have demonstrated that linearization of ˆR has led to the non-minimal coupling of
156
+ the scalar field, χ.
157
+ We can transform the above Lagrangian into the Einstein frame by making a Weyl or
158
+ conformal transformation of the metric field. However, we note that scaling invariance is
159
+ still preserved in our model with χ → χ′ = f −1(x)χ. Therefore, we can directly normalize
160
+ the coefficient before the Ricci scalar as
161
+ ϕ2 + 2αχ2 + 3βχ4/ϕ2 = 1,
162
+ (7)
163
+ due to the scaling invariance of Eq. (6). This is equivalent to making a Weyl transformation
164
+ with f(x) =
165
+
166
+ ϕ2 + 2αχ2 + 3βχ4/ϕ2 in Eq. (4). Further dropping the total derivative term
167
+ 4
168
+
169
+ in Eq. (3) due to its null surface integral, we can write the Lagrangian as
170
+ L
171
+ √−g =1
172
+ 2R − ζ
173
+ 2DµϕDµϕ − V (ϕ) −
174
+ 1
175
+ 4g2
176
+ W
177
+ FµνF µν − 3W µWµ
178
+ =R
179
+ 2 −
180
+ ∂µϕ∂µϕ
181
+ 2/ζ + ϕ2/3 − V (ϕ) −
182
+ 1
183
+ 4g2
184
+ W
185
+ FµνF µν − 6 + ζϕ2
186
+ 2
187
+
188
+ Wµ − ∂µ ln |6 + ζϕ2|
189
+ 2
190
+ �2
191
+ ,
192
+ (8)
193
+ with the scalar potential
194
+ V (ϕ) = α
195
+ 2 χ4 + β
196
+ ϕ2χ6 = α
197
+
198
+
199
+ ϕ4 − ϕ2�
200
+ + α3ϕ4
201
+ 27β2
202
+ ��
203
+ 1 − 3β
204
+ α2
205
+
206
+ 1 − ϕ−2��3/2
207
+ − 1
208
+
209
+ ,
210
+ (9)
211
+ where we have solved χ from Eq. (7)
212
+ χ2 = αϕ2
213
+
214
+ ��
215
+ 1 − 3β
216
+ α2 (1 − ϕ−2) − 1
217
+
218
+ .
219
+ (10)
220
+ It is now clear that we have a minimally-coupled scalar ϕ with a non-canonical kinetic
221
+ term. To further simplifying the theoretical formalism, we introduce the following redefini-
222
+ tions for the scalar and the Weyl gauge field
223
+ ϕ2 ≡
224
+
225
+
226
+
227
+
228
+
229
+ 6
230
+ |ζ| sinh2 �
231
+ ±Φ
232
+
233
+ 6
234
+
235
+ for ζ > 0,
236
+ 6
237
+ |ζ| cosh2 �
238
+ ±Φ
239
+
240
+ 6
241
+
242
+ for ζ < 0,
243
+ (11)
244
+ ˜Wµ ≡ Wµ − 1
245
+ 2∂µ ln |6 + ζϕ2| ≡ gW ˜wµ.
246
+ (12)
247
+ Then the final Lagrangian turns into a more compact form
248
+ L
249
+ √−g = 1
250
+ 2R − 1
251
+ 2∂µΦ∂µΦ − V (Φ) −
252
+ 1
253
+ 4g2
254
+ W
255
+ ˜Fµν ˜F µν − 1
256
+ 2m2(Φ) ˜W µ ˜Wµ,
257
+ (13)
258
+ with the mass term of Weyl gauge field
259
+ m2(Φ) =
260
+
261
+
262
+
263
+
264
+
265
+ +6 cosh2 �
266
+ Φ
267
+
268
+ 6
269
+
270
+ for ζ > 0,
271
+ −6 sinh2 �
272
+ Φ
273
+
274
+ 6
275
+
276
+ for ζ < 0.
277
+ (14)
278
+ We should note that m2(Φ) is negative when ζ < 0. Therefore, to avoid Weyl gauge boson
279
+ becoming tachyonic in this case, it requires some other mechanisms to obtain a real mass,
280
+ for example, introducing other scalar field, which we do not explore in this paper. For viable
281
+ inflation, both positive and negative are possible, as we shall show later.
282
+ In the above discussion, we have demonstrated that Weyl scaling invariant ˆR2+ ˆR3 model
283
+ can be written equivalently as the Einstein gravity coupled with a self-interacting scalar Φ
284
+ 5
285
+
286
+ and a massive vector ˜Wµ with a field-dependent mass. This conclusion is also true for any
287
+ Weyl scaling invariant model of gravity with high-order curvature ˆRn as the above formalism
288
+ applies straightforwardly. It is also worth to point out that Weyl vector boson can serve
289
+ as a dark matter candidate [27, 28, 41], with details of the relic abundance being discussed
290
+ in [45]. In this paper, we shall concentrate on the scalar potential Eq. (9) and discuss the
291
+ viable inflation scenarios with the presence of ˆR3.
292
+ B.
293
+ Effective scalar potentials
294
+ There are two necessary requirements for the potential Eq. (9). The first one is ϕ2 > 0
295
+ since ϕ is a real scalar field. The other is 1 − 3β
296
+ α2
297
+
298
+ 1 −
299
+ 1
300
+ ϕ2
301
+
302
+ ≥ 0, otherwise an imaginary
303
+ potential will emerge. Consequently, there are some constraints on the parameters and the
304
+ viable value of Φ. We can rewrite the second requirement as
305
+ sinh
306
+ �±Φ
307
+
308
+ 6
309
+
310
+ ≥ or ≤
311
+
312
+ |ζ|
313
+ 6 − 2α2/β , for ζ > 0,
314
+ cosh
315
+ �±Φ
316
+
317
+ 6
318
+
319
+ ≥ or ≤
320
+
321
+ |ζ|
322
+ 6 − 2α2/β , for ζ < 0,
323
+ (15)
324
+ where “ ≥ ” for β < α2
325
+ 3 and “ ≤ ” for β ≥ α2
326
+ 3 . For convenience, we define λ ≡
327
+
328
+ |ζ|
329
+ 6−2α2/β
330
+ and γ ≡
331
+
332
+ α2, then discuss the possible ranges of the potential corresponding to different
333
+ parameters. The results are listed in the Table. I. To ensure the theoretical stability, we
334
+ require that Φ can only evolve within these ranges where the potential is real. Fig. 1 shows
335
+ some instances of the scalar potential for several values of ζ and γ.
336
+ We first discuss the case of positive ζ. When γ = 0, it is a hill-top-like potential with two
337
+ minima at Φ = ±
338
+
339
+ 6 sinh−1 �
340
+ ζ
341
+ 6. However, as long as there is a tiny cubic curvature, whether
342
+ positive or negative, the shape of potential will be affected significantly. When γ > 0, the
343
+ potential turns to decrease near Φ = 0, and a third vacuum can form there. This behavior
344
+ is transparent, because when ζ > 0, Φ = 0 corresponds to ϕ2 = 0 according to Eq. (11),
345
+ then substituting it in Eq. (9) will obtain V |Φ=0 = 0. When γ < 0, the potential turns to
346
+ rise near Φ = 0 and become imaginary and unphysical in −
347
+
348
+ 6 sinh−1 λ < Φ <
349
+
350
+ 6 sinh−1 λ,
351
+ which has been listed in Table. I.
352
+ Next, we switch to the case of negative ζ. It is evident in Fig. 1 that when ζ < 0 and |ζ|
353
+ or |γ| is relatively small, the modification of ˆR3 term on the Weyl R2 potential is moderate,
354
+ 6
355
+
356
+ TABLE I. Effective potential range of the Weyl R2 + R3 model.
357
+ ζ
358
+ γ or β
359
+ real V (ϕ)
360
+ ζ > 0
361
+ γ ≥ 1
362
+ |Φ| ≤
363
+
364
+ 6 sinh−1 λ
365
+ 0 ≤ γ < 1
366
+ fully real
367
+ γ < 0
368
+ |Φ| ≥
369
+
370
+ 6 sinh−1 λ
371
+ −6 < ζ < 0
372
+ γ >
373
+ 1
374
+ 1+ζ/6
375
+ fully imaginary
376
+ 1 < γ ≤
377
+ 1
378
+ 1+ζ/6
379
+ |Φ| ≤
380
+
381
+ 6| cosh−1 λ|
382
+ γ ≤ 1
383
+ fully real
384
+ ζ ≤ −6
385
+ γ ≥ 1
386
+ |Φ| ≤
387
+
388
+ 6| cosh−1 λ|
389
+ 1
390
+ 1+ζ/6 < γ < 1
391
+ fully real
392
+ γ ≤
393
+ 1
394
+ 1+ζ/6
395
+ |Φ| ≥
396
+
397
+ 6| cosh−1 λ|
398
+ unlike the dramatic change near Φ = 0 in the case of positive ζ. This is because the mapping
399
+ of Φ ⇒ ϕ2 does not cover the interval of ϕ2 < 1 for ζ < 0 according to Eq. (11). In other
400
+ words, for negative ζ with modest |γ|, Φ → 0 does not lead to ϕ2 → 0, which brings the
401
+ violent behavior of the potential around here in the case of ζ > 0. However, when ζ is
402
+ excessively negative or |γ| is large enough, the violent variation will reappear to a certain
403
+ extent. For γ > 0, the potential will return to a downward trend near Φ = 0, albeit there
404
+ is no true vacuum formed (but a false vacuum is formed). And for excessively negative γ,
405
+ the imaginary potential will reappear in the range of −
406
+
407
+ 6| cosh−1 λ| < Φ <
408
+
409
+ 6| cosh−1 λ|,
410
+ which we have listed this situation in Table. I (see ζ ≤ −6 with γ ≤
411
+ 1
412
+ 1+ζ/6 case).
413
+ Generally, inflation takes place when the potential is flat and Φ evolves to the vacuum
414
+ (Φ|V =0). The cosmological observations would restrict the potential and the initial value Φi
415
+ when inflation starts, here the Φi is defined as the value when the comoving horizon of the
416
+ inflationary universe shrinks to the same size as today.
417
+ For ζ > 0 and γ > 0, the scalar potential contains three separate vacua, one lying at the
418
+ center and the other two at both sides. Therefore, there are two different viable inflationary
419
+ patterns. One pattern refers to the evolution into the central minimum, and the other into
420
+ the side minima. We can calculate the value of Φ which corresponds to the hill-top of the
421
+ 7
422
+
423
+ -10
424
+ -5
425
+ 0
426
+ 5
427
+ 10
428
+ 0
429
+ 0.5
430
+ 1
431
+ 1.5
432
+ 2
433
+ 10-10
434
+ -10
435
+ -5
436
+ 0
437
+ 5
438
+ 10
439
+ 0
440
+ 0.5
441
+ 1
442
+ 1.5
443
+ 2
444
+ 10-10
445
+ -10
446
+ -5
447
+ 0
448
+ 5
449
+ 10
450
+ 0
451
+ 0.5
452
+ 1
453
+ 1.5
454
+ 2
455
+ 10-10
456
+
457
+ -10
458
+ -5
459
+ 0
460
+ 5
461
+ 10
462
+ 0
463
+ 0.5
464
+ 1
465
+ 1.5
466
+ 2
467
+ 10-10
468
+ FIG. 1. Effective potentials of Weyl R2 + R3 model with α = 109 and various γ and ζ. Here we
469
+ only depict the real ranges of potentials.
470
+ potential in this case
471
+ Φh = ±
472
+
473
+ 6 sinh−1
474
+
475
+ ζ
476
+ 12
477
+ √3γ − 2γ
478
+ 3 − 4γ
479
+ ,
480
+ (16)
481
+ which is the critical point of two inflationary patterns. Neglecting the velocity, if the initial
482
+ value of inflation field satisfies |Φi| > |Φh|, it will evolve towards the side vacua. If |Φi| < |Φh|
483
+ at the beginning, the inflation field will evolve towards the central vacuum.
484
+ For other cases of ζ and γ, there are only the global side minima. Hence the only feasible
485
+ inflationary pattern is that Φ evolves to either one of the side minimum. The initial value
486
+ Φi has to correspond to a real potential, and when there is a false vacuum in ζ < 0 case, it
487
+ requires a large enough |Φi| outside two local maxima of the potential to ensure the gradient
488
+ of V (Φi) towards the true vacuum. Next, we are going to discuss the inflation in these two
489
+ patterns respectively.
490
+ 8
491
+
492
+ III.
493
+ INFLATION TO THE SIDE
494
+ In this inflation pattern, ϕ2 (defined as Eq. (11)) is usually not very close to 0, and as
495
+ we shall show later, observations generally would require an extremely small cubic curva-
496
+ ture, namely |γ| ≪ 1. Therefore in many cases, |γ(1 − ϕ−2)| ≪ 1 is satisfied. Under this
497
+ condition, we are able to have analytical treatment and expand the potential Eq. (9) as
498
+ V (ϕ) =ϕ4 − ϕ2
499
+ 2αγ
500
+ +
501
+ ϕ4
502
+ 3αγ2
503
+
504
+ −3γ
505
+ 2
506
+
507
+ 1 − 1
508
+ ϕ2
509
+
510
+ + 3γ2
511
+ 8
512
+
513
+ 1 − 1
514
+ ϕ2
515
+ �2
516
+ + γ3
517
+ 16
518
+
519
+ 1 − 1
520
+ ϕ2
521
+ �3
522
+ + O
523
+ �γ4
524
+ ϕ8
525
+ ��
526
+ = 1
527
+
528
+
529
+ 1 − ϕ2�2
530
+
531
+ 1 + γ
532
+ 6
533
+
534
+ 1 − 1
535
+ ϕ2
536
+
537
+ + O
538
+ �γ2
539
+ ϕ4
540
+ ��
541
+ .
542
+ (17)
543
+ Then with Eq. (11), we derive
544
+ V (Φ) =
545
+
546
+
547
+
548
+
549
+
550
+ 1
551
+
552
+
553
+ 1 − 6
554
+ |ζ| sinh2 �
555
+ Φ
556
+
557
+ 6
558
+ ��2 �
559
+ 1 + γ
560
+ 6
561
+
562
+ 1 − |ζ|
563
+ 6 csch2 �
564
+ Φ
565
+
566
+ 6
567
+ ��
568
+ + O(γ2)
569
+
570
+ for ζ > 0,
571
+ 1
572
+
573
+
574
+ 1 − 6
575
+ |ζ| cosh2 �
576
+ Φ
577
+
578
+ 6
579
+ ��2 �
580
+ 1 + γ
581
+ 6
582
+
583
+ 1 − |ζ|
584
+ 6 sech2 �
585
+ Φ
586
+
587
+ 6
588
+ ��
589
+ + O(γ2)
590
+
591
+ for ζ < 0.
592
+ (18)
593
+ The first term is exactly the effective potential of Weyl ˆR2, which has been shown in [41, 45],
594
+ and the rest originates from the cubic curvature term ˆR3, to the leading order of γ. Next
595
+ we shall calculate the inflationary physical quantities, the spectral index ns and tensor-to-
596
+ scalar ratio r, and contrast them with the latest observations. We first give an analytical
597
+ calculation for two limiting cases, then show the full numerical results for general cases.
598
+ A.
599
+ Analytical approach of γ → 0 case
600
+ We first discuss the γ → 0 case and show how ζ affects ns and r. The slow-roll parameters
601
+ in this case can be derived as
602
+ ϵ ≡ 1
603
+ 2
604
+ �V ′(Φ)
605
+ V
606
+ �2
607
+ =
608
+ 12 sinh2 �
609
+
610
+
611
+ 6
612
+
613
+
614
+ |ζ + 3| − 3 − 6 sinh2 �
615
+ Φ
616
+
617
+ 6
618
+ ��2,
619
+ (19)
620
+ η ≡ V ′′(Φ)
621
+ V
622
+ =
623
+ 12 cosh
624
+
625
+
626
+
627
+ 6
628
+
629
+ − 4|ζ + 3| cosh
630
+
631
+
632
+
633
+ 6
634
+
635
+
636
+ |ζ + 3| − 3 − 6 sinh2 �
637
+ Φ
638
+
639
+ 6
640
+ ��2
641
+ .
642
+ (20)
643
+ Generally, the slow-roll inflation occurs when ϵ and |η| is small enough, and it will end when
644
+ any of them evolves to ∼ 1. For the situation we are concerned with, ϵ breaks the slow-roll
645
+ 9
646
+
647
+ limit before the other. Thus we derive the value of Φ when inflation ends according to ϵ = 1
648
+ Φe =
649
+
650
+ 3
651
+ 2 ln
652
+
653
+ 2
654
+
655
+ |ζ + 3|2 + 3
656
+
657
+ 3
658
+ − |ζ + 3| +
659
+
660
+ 7
661
+ 3|ζ + 3|2 − 4|ζ + 3|
662
+
663
+ 3
664
+
665
+ |ζ + 3|2 + 3 + 3
666
+
667
+ . (21)
668
+ When |ζ| > O(102), which is a preferred range by the observational constraints as we will
669
+ show shortly, the above equation can be approximated as
670
+ Φe ≃
671
+
672
+ 3
673
+ 2 ln
674
+ � 1
675
+
676
+ 3
677
+
678
+ 2 +
679
+
680
+ 7 − 4
681
+
682
+ 3 −
683
+
684
+ 3
685
+
686
+ |ζ + 3|
687
+
688
+
689
+
690
+ 3
691
+ 2 ln (0.3094|ζ + 3|) .
692
+ (22)
693
+ It is now clear that when |ζ| is large enough, Φe will be almost independent of the sign of ζ.
694
+ Next, we calculate initial value Φi, which is defined when the size of comoving horizon
695
+ during inflation shrinks to the present size. We first focus on the e-folding number of the
696
+ slow-roll inflation
697
+ N ≡ ln ae
698
+ ai
699
+
700
+ � Φe
701
+ Φi
702
+
703
+
704
+ 2ϵ,
705
+ (23)
706
+ where ai/e ≡ a(Φi/e) is the cosmic scale factor when inflation starts/ends.
707
+ Substituting
708
+ Eq. (19) into it, we find
709
+ N =
710
+ (|ζ + 3| − 3) ln
711
+
712
+ tanh
713
+
714
+ Φ
715
+
716
+ 6
717
+ ��
718
+ − 6 ln
719
+
720
+ cosh
721
+
722
+ Φ
723
+
724
+ 6
725
+ ��
726
+ 4
727
+ �����
728
+ Φe
729
+ Φi
730
+ = |ζ + 3| − 3
731
+ 4
732
+ ln
733
+
734
+ �tanh
735
+ � 1
736
+ 2 ln(0.3094|ζ + 3|)
737
+
738
+ tanh
739
+
740
+ Φi
741
+
742
+ 6
743
+
744
+
745
+ � − 3
746
+ 2 ln
747
+
748
+ �cosh
749
+ � 1
750
+ 2 ln(0.3094|ζ + 3|)
751
+
752
+ cosh
753
+
754
+ Φi
755
+
756
+ 6
757
+
758
+
759
+ � .
760
+ (24)
761
+ For the circumstances we are concerned with, namely N ∼ (50, 60) and |ζ| > O(102), the
762
+ second term of Eq. (24) is much smaller than the first term, and it can be estimated as
763
+ ∼ −2.3. Thus we derive
764
+ Φi ≃
765
+
766
+ 6 tanh−1
767
+ ��
768
+ 1 −
769
+ 2
770
+ 0.3094|ζ + 3| + 1
771
+
772
+ e
773
+ −4(N+2.3)
774
+ |ζ+3|−3
775
+
776
+
777
+
778
+ 6 tanh−1 Ω(ζ, N).
779
+ (25)
780
+ Here we have defined Ω(ζ, N) for later convenience.
781
+ When |ζ| ≫ 4N, it can be further approximated as Φi ≃
782
+
783
+ 3
784
+ 2 ln
785
+ |ζ|
786
+ 2N+7.8. Substituting
787
+ Eq. (25) into Eq. (19) and (20), we find
788
+ ϵi =
789
+ 48Ω2
790
+ [(Ω2 − 1)|ζ + 3| + 3(Ω2 + 1)]2,
791
+ (26)
792
+ ηi =4 [(Ω4 − 1)|ζ + 3| + 3(Ω4 + 6Ω2 + 1)]
793
+ [(Ω2 − 1)|ζ + 3| + 3(Ω2 + 1)]2
794
+ .
795
+ (27)
796
+ 10
797
+
798
+ As a result, the tensor-to-scalar ratio r and spectral index ns of inflationary perturbations
799
+ in the γ → 0 limit are finally calculated as
800
+ r = 16ϵi =
801
+ 768Ω2
802
+ [(Ω2 − 1)|ζ + 3| + 3(Ω2 + 1)]2,
803
+ (28)
804
+ ns = 1 − 6ϵi + 2ηi = 1 + 8(Ω4 − 1)|ζ + 3| + 24(Ω4 − 6Ω2 + 1)
805
+ [(Ω2 − 1)|ζ + 3| + 3(Ω2 + 1)]2
806
+ .
807
+ (29)
808
+ For N ∼ (50, 60) and |ζ| > O(102), We can approximate the expressions as
809
+ r ≃ r∗ − 54
810
+ ζ2 ,
811
+ (30)
812
+ ns ≃ n∗
813
+ s − 11N
814
+ ζ2 ,
815
+ (31)
816
+ where
817
+ r∗ ≃
818
+ 12
819
+ (N + 3.55)2, n∗
820
+ s ≃ 1 −
821
+ 2
822
+ N + 3.55 −
823
+ 3
824
+ (N + 3.55)2
825
+ (32)
826
+ are the predictions of Starobinsky model (see Appendix A for an analytical derivation.).
827
+ Thus it is evident that the predictions of inflationary perturbations in our model will converge
828
+ to that of Starobinsky model when γ → 0 and ζ → ∞. As |ζ| decreases, the value of r and
829
+ ns will also decrease. We show this trend as the pink area in Fig. 2. According to the latest
830
+ observation [54], the lower limit of ns has been constrained to ∼ 0.959, hence it requires
831
+ |ζ| > 270 in this γ → 0 case.
832
+ B.
833
+ Analytical approach of ζ → ∞ case
834
+ Now we discuss the ζ → ∞ case and show how γ affects r and ns. When ζ is large enough,
835
+ the potential is greatly widened. The side vacua are far away from 0 and so are Φi and Φe
836
+ (e.g., Φi ∼ 5.4MP, Φe ∼ 9.8MP for ζ = 104). Therefore Eq. (11) can be approximated as
837
+ ϕ2 = 6
838
+ |ζ|
839
+
840
+ eΦ/
841
+
842
+ 6 ± e−Φ/
843
+
844
+ 6
845
+ 2
846
+ �2
847
+ ≃ e
848
+
849
+ 2/3
850
+
851
+ Φ−√
852
+ 3/2 ln(2|ζ|/3)
853
+
854
+ ≡ e
855
+
856
+ 2/3(Φ−Φ0).
857
+ (33)
858
+ Here and after, without losing generality, we may choose to evolve in the positive Φ region,
859
+ and denote Φ0 as the minimum in this region. Substituting it into Eq. (17), we have the
860
+ scalar potential for Φ ≫ 0
861
+ V (Φ) = 1
862
+
863
+
864
+ 1 − e
865
+
866
+ 2/3(Φ−Φ0)�2 �
867
+ 1 + γ
868
+ 6
869
+
870
+ 1 − e−√
871
+ 2/3(Φ−Φ0)�
872
+ + O(γ2)
873
+
874
+ .
875
+ (34)
876
+ 11
877
+
878
+ FIG. 2. The predictions of spectral index ns combined with tensor-to-scalar ratio r in the Weyl
879
+ R2 + R3 model with e-folding number N ∼ (50, 60). The pink area shows the results in the γ → 0
880
+ case with various ζ. The yellow and green areas respectively show the ζ → ∞ and ζ = −650 cases
881
+ with various γ. The red line is the result with both γ → 0 and ζ → ∞, which is equivalent to the
882
+ Starobinsky model. The blue area is the latest observation constraint given by the BICEP/Keck
883
+ collaboration [54].
884
+ Ignoring the O(γ2) terms, we give an approximate expression for the slow-roll parameters
885
+ ϵ ≡ 1
886
+ 2
887
+ �V ′(Φ)
888
+ V
889
+ �2
890
+
891
+
892
+ γe
893
+
894
+ 2/3(Φ−Φ0) − 2(γ + 6)e
895
+
896
+ 8/3(Φ−Φ0) + γ
897
+ �2
898
+ 3
899
+
900
+ e
901
+
902
+ 2/3(Φ−Φ0) − 1
903
+ �2 �
904
+ γ − (γ + 6)e
905
+
906
+ 2/3(Φ−Φ0)�2,
907
+ (35)
908
+ η ≡ V ′′(Φ)
909
+ V
910
+ ≃ 6(γ + 4)e
911
+
912
+ 8/3(Φ−Φ0) − 8(γ + 6)e
913
+
914
+ 6(Φ−Φ0) + 2γ
915
+ 3
916
+
917
+ e
918
+
919
+ 2/3(Φ−Φ0) − 1
920
+ �2 �
921
+ γ − (γ + 6)e
922
+
923
+ 2/3(Φ−Φ0)�.
924
+ (36)
925
+ In this case, the slow-roll inflation also ends at ϵ ∼ 1. To find the expression of Φe, we
926
+ further approximate Eq. (35) as
927
+ ϵ ≃
928
+ e−√
929
+ 8/3(Φ−Φ0) �
930
+ γ − 12e
931
+
932
+ 8/3(Φ−Φ0)�2
933
+ 108
934
+
935
+ e
936
+
937
+ 2/3(Φ−Φ0) − 1
938
+ �2
939
+ .
940
+ (37)
941
+ 12
942
+
943
+ .......
944
+ → 60= -650
945
+ 95% CL
946
+ 0.03
947
+ 68% CL
948
+ 5×1
949
+ 0.01
950
+ 500
951
+ X
952
+ = 250
953
+ 0.003
954
+ 3x 10
955
+ 3 × 10
956
+ 0.001
957
+ 0.955
958
+ 0.96
959
+ 0.965
960
+ 0.97
961
+ 0.975
962
+ ns0.980.1
963
+ 0←
964
+ N = 50
965
+ 8个VThen Φe can be derived as
966
+ Φe = Φ0 −
967
+
968
+ 3
969
+ 2 ln
970
+ �√
971
+ 3
972
+ γ
973
+ ��
974
+ 2(2 +
975
+
976
+ 3)γ + 9 − 3
977
+ ��
978
+ .
979
+ (38)
980
+ If γ is extremely small, we will find Φe ≃ Φ0 − 0.94MP.
981
+ Next, we derive the analytic formula for Φi in this case. The e-folding number of the
982
+ slow-roll inflation can be calculated with Eq. (37) as
983
+ N = −
984
+ �27
985
+ 4γ tanh−1
986
+ �� γ
987
+ 12e−√ 2
988
+ 3 (Φ−Φ0)
989
+
990
+ − 3
991
+ 8 ln
992
+
993
+ 12 − γe−√ 8
994
+ 3 (Φ−Φ0)�
995
+
996
+
997
+ 6
998
+ 4 (Φ − Φ0)
999
+ ����
1000
+ Φe
1001
+ Φi
1002
+ . (39)
1003
+ Considering N ∼ (50, 60) and γ < O(10−3), the first term of the integral is dominant, while
1004
+ the rest are the marginal terms which can be approximately treated as a constant, ∼ −2.7.
1005
+ Hence we have
1006
+ N ≃
1007
+ �27
1008
+
1009
+
1010
+ tanh−1
1011
+ �� γ
1012
+ 12e−√
1013
+ 2/3(Φi−Φ0)
1014
+
1015
+ − tanh−1
1016
+ �� γ
1017
+ 12e−√
1018
+ 2/3(Φe−Φ0)
1019
+ ��
1020
+ − 2.7,
1021
+ (40)
1022
+ and derive
1023
+ Φi = Φ0 −
1024
+
1025
+ 3
1026
+ 2 ln
1027
+ �����
1028
+ �12
1029
+ γ tanh
1030
+
1031
+ tanh−1
1032
+ �� γ
1033
+ 12e−√
1034
+ 2/3(Φe−Φ0)
1035
+
1036
+ +
1037
+
1038
+
1039
+ 27(N + 2.7)
1040
+ ������
1041
+ ≃ Φ0 −
1042
+
1043
+ 3
1044
+ 2 ln
1045
+ �����
1046
+ �12
1047
+ γ tanh
1048
+
1049
+ tanh−1 (0.622√γ) +
1050
+
1051
+
1052
+ 27(N + 2.7)
1053
+ ������
1054
+ ≡ Φ0 −
1055
+
1056
+ 3
1057
+ 2 ln Θ(γ, N),
1058
+ (41)
1059
+ where we have defined Θ(γ, N) for later convenience. Then substituting it into Eq. (35) and
1060
+ (36), we find
1061
+ ϵi = [γΘ(1 + Θ) − 2(γ + 6)]2
1062
+ 3 [1 − Θ]2 [γΘ − (γ + 6)]2,
1063
+ (42)
1064
+ ηi = 2γΘ3 + 6(γ + 4)Θ − 8(γ + 6)
1065
+ 3 [1 − ��]2 [γΘ − (γ + 6)]
1066
+ .
1067
+ (43)
1068
+ Finally, we derive r and ns of the inflationary perturbations in the ζ → ∞ limit
1069
+ r = 16ϵi = 16 [γΘ(1 + Θ) − 2(γ + 6)]2
1070
+ 3 [1 − Θ]2 [γΘ − (γ + 6)]2 ,
1071
+ (44)
1072
+ ns = 1 − 6ϵi + 2ηi = 1 − 2 [γΘ(1 + Θ) − 2(γ + 6)]2
1073
+ [1 − Θ]2 [γΘ − (γ + 6)]2 + 4γΘ3 + 3(γ + 4)Θ − 4(γ + 6)
1074
+ 3 [1 − Θ]2 [γΘ − (γ + 6)]
1075
+ . (45)
1076
+ 13
1077
+
1078
+ If γ is extremely small, smaller than O(10−4), the above expressions can be linearly approx-
1079
+ imated as
1080
+ r ≃ r∗ − 2.4γ,
1081
+ (46)
1082
+ ns ≃ n∗
1083
+ s − 0.42γN,
1084
+ (47)
1085
+ where r∗ and n∗
1086
+ s have been defined in the last paragraph of Sec. III.A. We can see that
1087
+ compared with the predictions of Starobinsky model, a positive γ will reduce both r and
1088
+ ns, while a negative γ will increase them. We show this trend as the yellow area in Fig. 2.
1089
+ It is manifest that the observations have constrained |γ| ≲ 5 × 10−4 in this ζ → ∞ case.
1090
+ Actually, this result agrees with other numerical investigations of the R3-extended Starobin-
1091
+ sky model [55–61], since the potential Eq. (34) is the same as the R3-extended Starobinsky
1092
+ model with a vacuum shift. Moreover, compared with Eq. (30) and (31), we note that the
1093
+ predictions of r and ns in the γ → 0 case is similar to that of the ζ → ∞ and γ > 0 case
1094
+ with a simple replacement of γ ↔ 24
1095
+ ζ2. This can be seen more clearly from Fig. 2, where the
1096
+ pink area overlaps with the yellow area with γ > 0.
1097
+ C.
1098
+ General cases
1099
+ Now we discuss the general cases with various ζ and γ by numerical treatment. The
1100
+ results are shown in Fig. 3. Here the parameter ranges satisfying observational constraints
1101
+ (see blue area in Fig. 2) are marked with colored areas, where the color gradient from blue to
1102
+ red corresponds to ascending value of r. The gray areas represent that the potential defined
1103
+ by these parameters cannot support an adequate inflation. In other words, their maximal
1104
+ e-folding number is unable to reach N = 50 or 60. The white areas are the parameter ranges
1105
+ that can give rise to ample inflation, but their prediction of ns or r has been excluded by
1106
+ the observation constraints. Here we mark two dotted lines to distinguish the boundaries of
1107
+ constraints. Beyond the pink one indicates a large ns that exceeds the observational upper
1108
+ limit, while beyond the green one signifies a too small prediction.
1109
+ Let us focus on the colored parameter ranges that are allowed by observations. In the
1110
+ |ζ| ≫ 1000 case, the result is roughly equivalent to the analytical calculation shown in the
1111
+ last subsection. The prediction of r is limited to 0.002 < r < 0.006. However, distinctive
1112
+ situations appear when |ζ| is small. First, when −1000 < ζ < −200, the restrictions on
1113
+ γ is relaxed, which can stand |γ| ∼ 6 × 10−3 at most. Besides, the upper limit of r is
1114
+ 14
1115
+
1116
+ FIG. 3. Possible parameter space for Weyl R2 + R3 model when Φ evolves to the side vacuum.
1117
+ The colored areas are the parameter ranges allowed by the latest observations of BICEP/Keck
1118
+ collaboration [54], where the color gradient from blue to red corresponds to r increases from 0.001
1119
+ to the observational upper limit 0.036. The dotted lines are the boundaries that ns exceeds the
1120
+ observational upper (pink line) or lower (green line) limit. The gray areas represent the parameter
1121
+ ranges with inadequate inflation, namely, the maximal e-folding number of inflation cannot reach
1122
+ N = 50 or 60.
1123
+ greatly expanded. There is even a small parameter range that gives r > 0.01. We show an
1124
+ example as the green area in Fig. 2. It clearly shows a distinguishable feature from the Weyl
1125
+ R2 model and the R3-extended Starobinsky model. If the next generation experiment of
1126
+ CMB B-mode polarization detects the primordial gravitational waves with r > 0.01, it may
1127
+ support Weyl R2 + R3 model. Another notable feature emerges at 0 < ζ < 200, where the
1128
+ 15
1129
+
1130
+ r0
1131
+ -2
1132
+ ns > 0.974
1133
+ -4
1134
+ inadequate e-folds
1135
+ -6
1136
+ -3000
1137
+ -2000
1138
+ -1000
1139
+ 0
1140
+ 1000
1141
+ 2000
1142
+ × 10-3
1143
+ 4
1144
+ N = 60
1145
+ 2
1146
+ ns < 0.959
1147
+ 0
1148
+ -2
1149
+ ns > 0.974
1150
+ -4
1151
+ inadequate e-folds
1152
+ 9-
1153
+ -3000
1154
+ -2000
1155
+ -1000
1156
+ 0
1157
+ 1000
1158
+ 20000.000
1159
+ 0.015
1160
+ 3000
1161
+ 0.005
1162
+ 0.001
1163
+ 3000×10-3
1164
+ 4
1165
+ N = 50
1166
+ 2
1167
+ ns < 0.959negative γ, even if very small, can greatly affect the predictions of primordial perturbations.
1168
+ Actually, there are some cases with small positive ζ and small negative γ can give proper
1169
+ r and ns that match the observation constraints, and generally, r is extremely small. For
1170
+ instance, when ζ = 80, γ = −4 × 10−8, and N = 60, we have ns = 0.963 and r = 3 × 10−4.
1171
+ IV.
1172
+ INFLATION TO THE CENTER
1173
+ As we mentioned earlier, the third vacuum appears at Φ = 0 in the case of ζ > 0 and
1174
+ γ > 0, and if the initial value satisfies |Φi| < |Φh| (Φh is defined in Eq. (16)), inflation can
1175
+ happen in the evolution of Φ to 0. Actually, the situation is more complicated. A process
1176
+ called “oscillating inflation” [62–74] will continue immediately after the end of slow-roll
1177
+ inflation because the scalar potential in this case is a non-convex function in the region close
1178
+ to the vacuum, which means there is d2V
1179
+ dΦ2 < 0 when Φ nears 0. In other words, for such a
1180
+ non-convex potential, despite the slow-roll conditions (ϵ ≪ 1 and |η| ≪ 1) has been violated
1181
+ during the bottom oscillation of the inflaton potential, the universe can keep accelerating
1182
+ expansion until the average amplitude of the inflaton’s oscillation becomes lower than the
1183
+ borderline of d2V
1184
+ dΦ2 from negative to positive (if there is a rounded transition in a small enough
1185
+ ∆Φ at the bottom to connect the left and right sides of the potential, see [62]), or until the
1186
+ contribution of the radiation produced in reheating process becomes non-negligible.
1187
+ It is helpful to understand the behavior of oscillating inflation from the perspective of the
1188
+ effective equation of state. For an oscillating scalar field Φ, its effective equation of state in
1189
+ one oscillating period is defined as
1190
+ ⟨w⟩ ≡ ⟨p⟩
1191
+ ⟨ρ⟩ = ⟨ ˙Φ2 − ρ⟩
1192
+ ⟨ρ⟩
1193
+ = ⟨ ˙Φ2⟩
1194
+ Vm
1195
+ − 1 = ⟨Φ dV
1196
+ dΦ⟩
1197
+ Vm
1198
+ − 1 = 1 − 2⟨V ⟩
1199
+ Vm
1200
+ ,
1201
+ (48)
1202
+ where ⟨⟩ means the average value in one oscillation period, and Vm represents the maximal
1203
+ potential of this oscillation period.
1204
+ The accelerating expansion of the universe requires
1205
+ ⟨w⟩ < − 1
1206
+ 3, which is equivalent to the following relation
1207
+ U ≡ ⟨V − ΦdV
1208
+ dΦ⟩ > 0.
1209
+ (49)
1210
+ In fact, U amounts to the intercept of the tangent to the potential at a certain Φ, shown
1211
+ as the upper part of Fig. 4. As long as the intercept is positive and the contribution of
1212
+ radiation is insignificant, the accelerating expansion will proceed successfully. This is the
1213
+ reason why a non-convex potential can bring about oscillating inflation.
1214
+ 16
1215
+
1216
+ For the process with oscillating inflation, the definition of e-folding number should be
1217
+ replaced to
1218
+ ˜N ≡ ln afHf
1219
+ aiHi
1220
+ ≡ ln aeHe
1221
+ aiHi
1222
+ + ln aoHo
1223
+ aiHi
1224
+ ≃ N + No,
1225
+ (50)
1226
+ where the subscripts i and e have been defined in the last section, af and Hf represent
1227
+ the cosmic scale and Hubble parameter when the full inflationary period ends, ao and Ho
1228
+ represent their multiple of increase or decrease during the oscillating inflation. It indicates
1229
+ that the new definition is equivalent to adding a correction No based on the e-folding number
1230
+ of slow-rolling period if we take He ≈ Hi. Generally, No is related to the shape of potential
1231
+ near its vacuum, reheating efficiency, and the scale of the aforementioned rounded bottom.
1232
+ Given that our model does not possess an explicit rounded bottom, No depends only on
1233
+ the first two aspects.
1234
+ For the shape of potential, actually, our model has the following
1235
+ approximate form near the center vacuum
1236
+ V (Φ) ≃ ξ(Φ4 − Φ2)
1237
+
1238
+ + ξ2Φ4
1239
+
1240
+ ��
1241
+ 1 +
1242
+ 1
1243
+ ξΦ2
1244
+ �3/2
1245
+ − 1
1246
+
1247
+ ,
1248
+ (51)
1249
+ where ξ ≡
1250
+ α2
1251
+ 3βζ. Since α determines the height of the potential, which has been fixed for
1252
+ each set of ζ and β according to the observation result of ∆2
1253
+ s ∼
1254
+ V
1255
+ 24π2ϵ ∼ 2.1 × 10−9 [75], the
1256
+ shape of the potential is essentially determined by ξ in the oscillatory region. For reheating
1257
+ efficiency, we consider a constant transfer rate Γ and the transferred energy all turns to
1258
+ radiation ρr
1259
+ ¨Φ + (3H + Γ) ˙Φ + dV
1260
+ dΦ = 0,
1261
+ (52)
1262
+ ˙ρr + 4Hρr − Γ ˙Φ2 = 0.
1263
+ (53)
1264
+ Then No is substantially related to the parameters ξ and Γ.
1265
+ We numerically solve the above equations, and visualize in the lower part of Fig. 4. It
1266
+ is transparent that if ξ ≫ 0.1, oscillating inflation will bring appreciable correction to the
1267
+ e-folding number.
1268
+ Because an inefficient reheating process will postpone the end of the
1269
+ oscillating inflation, we can see a smaller Γ corresponds to a larger No for a certain ξ.
1270
+ However, No will tend to a fixed value as Γ decreases. This property can be understood as
1271
+ follows. We can prove that the potential has a quasi-linear form when Φ → 0
1272
+ V |Φ→0 ≃
1273
+ √ξ
1274
+ 3α |Φ|,
1275
+ (54)
1276
+ 17
1277
+
1278
+ -0.4
1279
+ -0.3
1280
+ -0.2
1281
+ -0.1
1282
+ 0
1283
+ 0.1
1284
+ 0.2
1285
+ 0.3
1286
+ 0.4
1287
+ 10-2
1288
+ 100
1289
+ 102
1290
+ 104
1291
+ 0
1292
+ 0.5
1293
+ 1
1294
+ 1.5
1295
+ FIG. 4. Oscillating inflation in the center-evolving pattern of Weyl R2 + R3 model. The upper
1296
+ part is a diagram for visualizing the condition of oscillating inflation, where the effective equation
1297
+ of state ⟨w⟩ < − 1
1298
+ 3 is equated with that the intercept U of the tangent to a certain point on the
1299
+ potential corresponding to the average amplitude is positive. The lower part shows the increased
1300
+ e-folding number during the oscillating inflation for various ξ and reheating efficiency Γ.
1301
+ which implies that U|Φ→0 → 0 according to its definition as the intercept of the tangent to
1302
+ the potential. Hence ⟨w⟩ will quickly converge to − 1
1303
+ 3 as the oscillation proceeds, and No will
1304
+ soon grow to a nearly constant maximum if Γ is too small to make the universe promptly
1305
+ produce enough radiation to stop the oscillating inflation. This is the reason why No has an
1306
+ extreme for each ξ.
1307
+ Now we consider the reheating is inefficient, that is to adopt No with Γ → 0, to derive
1308
+ the slow-roll e-folding number N corresponding to ˜N ∼ (50, 60), and then to calculate ns
1309
+ 18
1310
+
1311
+ 103
1312
+ 104
1313
+ 105
1314
+ 106
1315
+ 107
1316
+ 108
1317
+ 10-7
1318
+ 10-5
1319
+ 10-3
1320
+ 103
1321
+ 104
1322
+ 105
1323
+ 106
1324
+ 107
1325
+ 108
1326
+ 10-7
1327
+ 10-5
1328
+ 10-3
1329
+ <10-4
1330
+ 10-3
1331
+ 10-2
1332
+ FIG. 5. Possible parameter space for Weyl R2 + R3 model when Φ evolves to the center vacuum.
1333
+ Here the total e-folding number ˜N ≡ N + No is considered with Γ → 0. The meaning of markers
1334
+ is the same as that in Fig. 3, except for the color correspondence of r.
1335
+ and r for various parameters ζ and γ. The viable parameter space is depicted in Fig. 5,
1336
+ where the meaning of markers is the same as that in Fig. 3, except for the scale of color
1337
+ bar. It is evident that the observation constraint on ns limits the parameters to ζ > 103 and
1338
+ γ < 5 × 10−4. r has an upper limit ∼ 0.006, but no lower limit in this case.
1339
+ V.
1340
+ CONCLUSIONS
1341
+ Cosmological observations have suggested that our universe has a nearly scaling invariant
1342
+ power spectrum of the primordial density perturbation, which motivates the scaling sym-
1343
+ 19
1344
+
1345
+ metry as the possible feature of the underlying fundamental theories that lead to inflation.
1346
+ We present the theoretical formalism of the Weyl scaling invariant gravity, ˆR2 + ˆR3. We
1347
+ show this model in Eq. (1) can be rewritten equivalently to the Einstein gravity coupled
1348
+ with a massive gauge boson, and a scalar field as the inflaton. We further discuss the viable
1349
+ ranges of the scalar potential according to the requirement for reality and demonstrate how
1350
+ the R3 term would affect the shape of potentials. Compared with the Weyl R2 inflationary
1351
+ potential [41, 45] with two side minima, the R3 extension brings an additional minimum at
1352
+ center. Hence, there are two viable scenarios for the inflation in this model. The first is
1353
+ to roll towards the side minima, while the other is a new situation of rolling towards the
1354
+ center minimum. Both scenarios allows viable parameter spaces that be probed by future
1355
+ experiments on cosmic microwave background and primordial gravitational wave.
1356
+ For the first scenario, we calculate the spectral index ns and tensor-to-scalar ratio r
1357
+ of primordial perturbations both analytically and numerically, and contrast the parameter
1358
+ spaces with the latest observational constraints. The results manifest that the level of cubic
1359
+ curvature is limited to |γ| < 6×10−3, and the prediction of r in this pattern has a wide range
1360
+ from O(10−4) to the upper limit of the observations, O(10−2). These results are significantly
1361
+ different from the R3-extended Starobinsky model.
1362
+ For the second scenario, a special process called oscillating inflation emerges after the
1363
+ familiar slow-roll inflation because the potential near the center minimum is a non-convex
1364
+ function that can lead to a sufficiently negative value of average equation of state.
1365
+ We
1366
+ calculate the correction of e-folding number in the oscillating inflation stage, and then derive
1367
+ the viable parameter spaces. The results indicate that the parameters are limited to γ <
1368
+ 5 × 10−4 and ζ > 103. Moreover, r has an upper limit ∼ 0.006, but no lower limit.
1369
+ ACKNOWLEDGMENTS
1370
+ QYW and YT thank Shi Pi for helpful discussions. YT is supported by National Key Re-
1371
+ search and Development Program of China (Grant No.2021YFC2201901), and Natural Sci-
1372
+ ence Foundation of China (NSFC) under Grants No. 11851302. YLW is supported by the Na-
1373
+ tional Key Research and Development Program of China under Grant No.2020YFC2201501,
1374
+ and NSFC under Grants No. 11690022, No. 11747601, No. 12147103, and the Strategic Prior-
1375
+ ity Research Program of the Chinese Academy of Sciences under Grant No. XDB23030100.
1376
+ 20
1377
+
1378
+ Appendix A: Analytical treatment of Starobinsky inflation
1379
+ We give an analytical calculation of the tensor-to-scalar ratio r and spectral index ns in
1380
+ the Starobinsky inflationary model, namely, the Einstein gravity modified by a R2 term.
1381
+ The effective scalar potential can be written as
1382
+ V (φ) = 1
1383
+
1384
+
1385
+ 1 − e−√
1386
+ 2/3φ�2
1387
+ ,
1388
+ (A1)
1389
+ where α is the coefficient of R2. The relevant two slow-roll parameters are computed as
1390
+ ϵ = 4
1391
+ 3
1392
+ 1
1393
+
1394
+ e
1395
+
1396
+ 2/3φ − 1
1397
+ �2,
1398
+ η = −4
1399
+ 3
1400
+ e
1401
+
1402
+ 2/3φ − 2
1403
+
1404
+ e
1405
+
1406
+ 2/3φ − 1
1407
+ �2.
1408
+ (A2)
1409
+ Since inflation ends when ϵ ∼ 1 is reached first (η ≃ −0.15), we have
1410
+ φe =
1411
+
1412
+ 3
1413
+ 2 ln
1414
+
1415
+ 1 + 2
1416
+
1417
+ 3
1418
+
1419
+ ≃ 0.94MP.
1420
+ (A3)
1421
+ Then according to Eq. (23), the e-folding number is
1422
+ N =
1423
+
1424
+ 3
1425
+ 4
1426
+
1427
+ e
1428
+
1429
+ 2/3φ −
1430
+
1431
+ 2
1432
+
1433
+ ��φe
1434
+ φi
1435
+ = 3
1436
+ 4
1437
+
1438
+ e
1439
+
1440
+ 2/3φi − e
1441
+
1442
+ 2/3φe −
1443
+
1444
+ 2
1445
+ 3(φi − φe)
1446
+
1447
+ .
1448
+ (A4)
1449
+ For N ∼ (50, 60), we find that approximately
1450
+ φi ≃
1451
+
1452
+ 3
1453
+ 2 ln
1454
+ �4
1455
+ 3(N + 4.3)
1456
+
1457
+ .
1458
+ (A5)
1459
+ Substituting it into Eq. (A2), we finally derive
1460
+ r = 16ϵ =
1461
+ 12
1462
+ (N + 3.55)2,
1463
+ (A6)
1464
+ ns = 1 − 6ϵ + 2η = 1 −
1465
+ 2
1466
+ N + 3.55 −
1467
+ 3
1468
+ (N + 3.55)2.
1469
+ (A7)
1470
+ These results are shown as the red line in Fig. 2.
1471
+ [1] A. H. Guth, Phys. Rev. D 23, 347-356 (1981).
1472
+ [2] A. D. Linde, Phys. Lett. B 108, 389-393 (1982).
1473
+ [3] V. F. Mukhanov, H. A. Feldman and R. H. Brandenberger, Phys. Rept. 215, 203-333 (1992).
1474
+ 21
1475
+
1476
+ [4] Y. Akrami et al. [Planck], Astron. Astrophys. 641, A10 (2020).
1477
+ [5] V. F. Mukhanov and G. V. Chibisov, JETP Lett. 33, 532-535 (1981).
1478
+ [6] S. W. Hawking, Phys. Lett. B 115, 295 (1982).
1479
+ [7] A. H. Guth and S. Y. Pi, Phys. Rev. Lett. 49, 1110-1113 (1982).
1480
+ [8] A. A. Starobinsky, Phys. Lett. B 117, 175-178 (1982).
1481
+ [9] J. M. Bardeen, P. J. Steinhardt and M. S. Turner, Phys. Rev. D 28, 679 (1983).
1482
+ [10] H. Weyl, Sitzungsber. Preuss. Akad. Wiss. Berlin (Math. Phys. ) 1918, 465 (1918).
1483
+ [11] H. Weyl, Annalen Phys. 59, 101-133 (1919).
1484
+ [12] L. Smolin, Nucl. Phys. B 160, 253-268 (1979).
1485
+ [13] H. Cheng, Phys. Rev. Lett. 61, 2182 (1988).
1486
+ [14] H. Nishino and S. Rajpoot, Phys. Rev. D 79, 125025 (2009).
1487
+ [15] C. Romero, J. B. Fonseca-Neto and M. L. Pucheu, Class. Quant. Grav. 29, 155015 (2012).
1488
+ [16] I. Bars, P. Steinhardt and N. Turok, Phys. Rev. D 89, no.4, 043515 (2014).
1489
+ [17] I. Quiros, [arXiv:1401.2643 [gr-qc]].
1490
+ [18] E. Scholz, Gen. Rel. Grav. 47, no.2, 7 (2015).
1491
+ [19] H. C. Ohanian, Gen. Rel. Grav. 48, no.3, 25 (2016).
1492
+ [20] P. G. Ferreira, C. T. Hill and G. G. Ross, Phys. Rev. D 95, no.4, 043507 (2017).
1493
+ [21] M. de Cesare, J. W. Moffat and M. Sakellariadou, Eur. Phys. J. C 77, no.9, 605 (2017).
1494
+ [22] P. G. Ferreira, C. T. Hill and G. G. Ross, Phys. Rev. D 98, no.11, 116012 (2018).
1495
+ [23] P. G. Ferreira, C. T. Hill, J. Noller and G. G. Ross, Phys. Rev. D 97, no.12, 123516 (2018).
1496
+ [24] Y. Tang and Y. L. Wu, Phys. Lett. B 784, 163-168 (2018).
1497
+ [25] D. M. Ghilencea and H. M. Lee, Phys. Rev. D 99, no.11, 115007 (2019)
1498
+ [26] C. Wetterich, [arXiv:1901.04741 [hep-th]].
1499
+ [27] Y. Tang and Y. L. Wu, Phys. Lett. B 803, 135320 (2020).
1500
+ [28] Y. Tang and Y. L. Wu, JCAP 03, 067 (2020)
1501
+ [29] D. M. Ghilencea, Eur. Phys. J. C 82, no.1, 23 (2022).
1502
+ [30] D. M. Ghilencea and T. Harko, [arXiv:2110.07056 [gr-qc]].
1503
+ [31] Y. L. Wu, Phys. Rev. D 93, no.2, 024012 (2016).
1504
+ [32] Y. L. Wu,
1505
+ Eur. Phys. J. C 78,
1506
+ no.1,
1507
+ 28 (2018) doi:10.1140/epjc/s10052-017-5504-3
1508
+ [arXiv:1712.04537 [hep-th]].
1509
+ 22
1510
+
1511
+ [33] Y. L. Wu, Int. J. Mod. Phys. A 36, no.28, 2143001 (2021) doi:10.1142/S0217751X21430016
1512
+ [arXiv:2104.05404 [physics.gen-ph]].
1513
+ [34] Y. L. Wu, Int. J. Mod. Phys. A 36, no.28, 2143002 (2021) doi:10.1142/S0217751X21430028
1514
+ [arXiv:2104.11078 [physics.gen-ph]].
1515
+ [35] D. M. Ghilencea, JHEP 03, 049 (2019).
1516
+ [36] P. G. Ferreira, C. T. Hill, J. Noller and G. G. Ross, Phys. Rev. D 100, no.12, 123516 (2019).
1517
+ [37] D. M. Ghilencea, JHEP 10, 209 (2019).
1518
+ [38] I. Oda, [arXiv:2003.01437 [hep-th]].
1519
+ [39] D. M. Ghilencea, Eur. Phys. J. C 80, no.12, 1147.
1520
+ [40] I. Oda, PoS CORFU2019, 070 (2020).
1521
+ [41] Y. Tang and Y. L. Wu, Phys. Lett. B 809, 135716 (2020).
1522
+ [42] I. Oda, Mod. Phys. Lett. A 35, no.37, 2050304 (2020).
1523
+ [43] D. M. Ghilencea, Eur. Phys. J. C 81, no.6, 510 (2021).
1524
+ [44] R. G. Cai, Y. S. Hao and S. J. Wang, Commun. Theor. Phys. 74, no.9, 095401 (2022).
1525
+ [45] Q. Y. Wang, Y. Tang and Y. L. Wu, Phys. Rev. D 106, no.2, 023502 (2022).
1526
+ [46] A. A. Starobinsky, Phys. Lett. B 91, 99-102 (1980).
1527
+ [47] A. Vilenkin, Phys. Rev. D 32, 2511 (1985).
1528
+ [48] M. B. Mijic, M. S. Morris and W. M. Suen, Phys. Rev. D 34, 2934 (1986).
1529
+ [49] K. i. Maeda, Phys. Rev. D 37, 858 (1988).
1530
+ [50] M. Aoki, J. Kubo and J. Yang, JCAP 01, no.01, 005 (2022).
1531
+ [51] K. S. Stelle, Phys. Rev. D 16, 953-969 (1977).
1532
+ [52] B. Whitt, Phys. Lett. B 145, 176-178 (1984).
1533
+ [53] J. D. Barrow and S. Cotsakis, Phys. Lett. B 214, 515-518 (1988).
1534
+ [54] P. A. R. Ade et al. [BICEP and Keck], Phys. Rev. Lett. 127, no.15, 151301 (2021).
1535
+ [55] Q. G. Huang, JCAP 02, 035 (2014).
1536
+ [56] T. Asaka, S. Iso, H. Kawai, K. Kohri, T. Noumi and T. Terada, PTEP 2016, no.12, 123E01
1537
+ (2016).
1538
+ [57] D. Y. Cheong, H. M. Lee and S. C. Park, Phys. Lett. B 805, 135453 (2020).
1539
+ [58] G. Rodrigues-da-Silva, J. Bezerra-Sobrinho and L. G. Medeiros, Phys. Rev. D 105, no.6,
1540
+ 063504 (2022).
1541
+ [59] V. R. Ivanov, S. V. Ketov, E. O. Pozdeeva and S. Y. Vernov, JCAP 03, no.03, 058 (2022).
1542
+ 23
1543
+
1544
+ [60] Y. Shtanov, V. Sahni and S. S. Mishra, [arXiv:2210.01828 [gr-qc]].
1545
+ [61] T. Modak, L. R¨over, B. M. Sch¨afer, B. Schosser and T. Plehn, [arXiv:2210.05698 [astro-
1546
+ ph.CO]].
1547
+ [62] T. Damour and V. F. Mukhanov, Phys. Rev. Lett. 80, 3440-3443 (1998).
1548
+ [63] A. R. Liddle and A. Mazumdar, Phys. Rev. D 58, 083508 (1998).
1549
+ [64] A. Taruya, Phys. Rev. D 59, 103505 (1999).
1550
+ [65] V. H. Cardenas and G. Palma, Phys. Rev. D 61, 027302 (2000).
1551
+ [66] J. w. Lee, S. Koh, C. Park, S. J. Sin and C. H. Lee, Phys. Rev. D 61, 027301 (2000).
1552
+ [67] V. Sahni and L. M. Wang, Phys. Rev. D 62, 103517 (2000).
1553
+ [68] S. Tsujikawa, Phys. Rev. D 61, 083516 (2000).
1554
+ [69] M. Sami, Grav. Cosmol. 8, 309-312 (2003).
1555
+ [70] S. Dutta and R. J. Scherrer, Phys. Rev. D 78, 083512 (2008).
1556
+ [71] M. C. Johnson and M. Kamionkowski, Phys. Rev. D 78, 063010 (2008).
1557
+ [72] H. Mohseni Sadjadi and P. Goodarzi, Phys. Lett. B 732, 278-284 (2014).
1558
+ [73] J. A. R. Cembranos, A. L. Maroto and S. J. N´u˜nez Jare˜no, JHEP 03, 013 (2016).
1559
+ [74] P. Goodarzi and H. Mohseni Sadjadi, Eur. Phys. J. C 77, no.7, 463 (2017).
1560
+ [75] N. Aghanim et al. [Planck], Astron. Astrophys. 641, A6 (2020).
1561
+ 24
1562
+
39E2T4oBgHgl3EQfOAbJ/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
39FKT4oBgHgl3EQfRS0O/content/2301.11770v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb08c8439ac709c9766b010874415a8f5d7e633fbbaedd33f2ff29cde7bf0d31
3
+ size 205810
39FKT4oBgHgl3EQfRS0O/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:885788825b458e4b6e57c786bad1dc9de86597802be94462cb4bfabd71e6bfca
3
+ size 2818093
39FKT4oBgHgl3EQfRS0O/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b234b15e9fd40b64cb3caa535277b124ae8a0ce403c273acc5a00c84371c32a6
3
+ size 105727
3dFKT4oBgHgl3EQfQy0a/content/tmp_files/2301.11768v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
3dFKT4oBgHgl3EQfQy0a/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
4NE1T4oBgHgl3EQf6AXQ/content/tmp_files/2301.03519v1.pdf.txt ADDED
@@ -0,0 +1,1374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Draft version January 10, 2023
2
+ Typeset using LATEX twocolumn style in AASTeX63
3
+ Evolution of elemental abundances in hot active region cores from Chandrayaan-2 XSM observations
4
+ Biswajit Mondal,1, 2 Santosh V. Vadawale,1 Giulio Del Zanna,3 N. P. S. Mithun,1 Aveek Sarkar,1
5
+ Helen E. Mason,3 P. Janardhan,1 and Anil Bhardwaj1
6
+ 1Physical Research Laboratory, Navrangpura, Ahmedabad, Gujarat-380 009, India
7
+ 2Indian Institute of Technology Gandhinagar, Palaj, Gandhinagar, Gujarat-382 355, India
8
+ 3DAMTP, Centre for Mathematical Sciences, University of Cambridge, Wilberforce Road, Cambridge CB3 0WA, UK
9
+ ABSTRACT
10
+ The First Ionization Potential (FIP) bias, whereby elemental abundances for low FIP elements in
11
+ different coronal structures vary from their photospheric values and may also vary with time, has been
12
+ widely studied. In order to study the temporal variation, and to understand the physical mechanisms
13
+ giving rise to the FIP bias, we have investigated the hot cores of three ARs using disk-integrated
14
+ soft X-ray spectroscopic observation with the Solar X-ray Monitor (XSM) onboard Chandrayaan-2.
15
+ Observations for periods when only one AR was present on the solar disk were used so as to ensure that
16
+ the AR was the principal contributor to the total X-ray intensity. The average values of temperature
17
+ and EM were ∼3 MK and 3×1046 cm−3 respectively. Regardless of the age and activity of the AR,
18
+ the elemental abundances of the low FIP elements, Al, Mg, and Si were consistently higher than their
19
+ photospheric values. The average FIP bias for Mg and Si was ∼3, whereas the FIP bias for the mid-FIP
20
+ element, S, was ∼1.5. However, the FIP bias for the lowest FIP element, Al, was observed to be higher
21
+ than 3, which, if real, suggests a dependence of the FIP bias of low FIP elements on their FIP value.
22
+ Another major result from our analysis is that the FIP bias of these elements is established in within
23
+ ∼10 hours of emergence of the AR and then remains almost constant throughout its lifetime.
24
+ Keywords: Solar X-ray corona, Solar abundances, FIP bias, FIP effect, Active Region
25
+ 1. INTRODUCTION
26
+ The earlier study of the Sun as a star by Pottasch
27
+ (1963) revealed that solar coronal abundances are dif-
28
+ ferent from those of the photosphere. The differences
29
+ are correlated to the First Ionization Potential (FIP) of
30
+ the element, in the sense that the abundance ratio of
31
+ a low-FIP (less than 10 eV) element versus that of a
32
+ high-FIP element is higher in the corona. A measure
33
+ of the difference is the so called FIP bias, i.e. the ratio
34
+ between the coronal and the photospheric abundance of
35
+ an element.
36
+ In most of the available literature, the FIP bias has
37
+ been (and still is) estimated by measuring the relative
38
+ abundances between elements, and not relative to hy-
39
+ drogen. This is due to the fact that abundance mea-
40
+ surements with respect to Hydrogen in the low corona,
41
+ and on-disk is non-trivial, due to the lack of H-emission
42
+ Corresponding author: Biswajit Mondal
43
44
+ lines at a few million Kelvin. Hence, whether it is the
45
+ low-FIP elements that have an increased abundance or
46
+ the high-FIP elements that have a reduced one (com-
47
+ pared to their photospheric values) has been a subject
48
+ of continued debate.
49
+ Further, it has become clear that different solar struc-
50
+ tures have different FIP biases. There are also indica-
51
+ tions that the FIP bias depends on the temperature of
52
+ the plasma. For a long time, it has been widely accepted
53
+ that coronal abundances in active regions increase with
54
+ time. We refer the reader to the recent reviews by Lam-
55
+ ing (2015); Del Zanna and Mason (2018) for more de-
56
+ tails. We also provide in the following section a brief
57
+ summary of available measurements related to active re-
58
+ gions.
59
+ Knowledge of the elemental abundances in different
60
+ atmospheric layers of the Sun is a topic of great inter-
61
+ est to the solar physics community mainly due to the
62
+ following two reasons. The first is that they provide, in
63
+ principle, a way to link the solar source regions to the
64
+ various components of the solar wind. In fact, elemental
65
+ abundance variations are also clearly observed in-situ.
66
+ arXiv:2301.03519v1 [astro-ph.SR] 9 Jan 2023
67
+
68
+ 2
69
+ The slow-speed solar wind has a high FIP bias simi-
70
+ lar to that measured in AR core loops, 3MK, whereas
71
+ the high-speed wind has a near unit FIP bias, similar
72
+ to that of coronal holes (see, e.g., Brooks et al. 2015;
73
+ Gloeckler and Geiss 1989; Feldman et al. 1998; Bochsler
74
+ 2007; Brooks and Warren 2011).
75
+ The second reason is that studying abundance vari-
76
+ ations might contribute to a better understanding of
77
+ the physical processes at play in the solar corona. In
78
+ fact, we know that the FIP bias is closely related to
79
+ the magnetic field activity of the Sun (see, e.g. Feld-
80
+ man and Widing 2002; Brooks et al. 2017; Baker et al.
81
+ 2018). The Ponderomotive force model (Laming 2004,
82
+ 2009, 2012, 2017) is now widely accepted, as it is able to
83
+ reproduce the main characteristics of the FIP effect, as
84
+ measured in-situ and remotely. According to this model,
85
+ the separation of ions from neutral atoms within closed
86
+ loops in an upward direction is caused by the reflec-
87
+ tion of downward propagating Alfv’en waves at chromo-
88
+ spheric heights, causing an enhancement of the low-FIP
89
+ elements in the corona. Since coronal waves can be pro-
90
+ duced by mechanisms that heat the solar corona, it is
91
+ thought that the mechanism underlying the FIP effect
92
+ is inextricably linked to processes that heat the solar
93
+ corona.
94
+ Hence, measuring the FIP bias is an impor-
95
+ tant diagnostic for coronal plasma characteristics (Lam-
96
+ ing 2015; Dahlburg et al. 2016).
97
+ In this paper, we focus on the elemental abundances
98
+ of hot, quiescent AR core emission at 3 MK, by provid-
99
+ ing line-to-continuum measurements of the Sun in the
100
+ soft X-ray energy band using data from the Solar X-ray
101
+ Monitor (XSM: Vadawale et al. 2014; Shanmugam et al.
102
+ 2020). It may be noted here that the XSM is the only
103
+ spectrometer to have observed the Sun in the 1-15 keV
104
+ range during the minimum of solar cycle 24 with an en-
105
+ ergy resolution better than 180 eV at 5.9 keV. This reso-
106
+ lution is sufficient to measure the abundances of several
107
+ elements. The soft X-ray continuum is dominated by
108
+ free-free radiation (with some free-bound emission, see
109
+ e.g. Figure 12b of Mondal et al. 2021), which primarily
110
+ originates from H. Hence, measuring the abundances of
111
+ an emission line with respect to the continuum provides
112
+ the absolute abundance of that element. It should be
113
+ noted that the measurement of free-free emission can
114
+ also be carried out in the EUV energy band, but it is
115
+ limited to large flares (e.g., Feldman et al. 2003).
116
+ The XSM energy band is sensitive to temperatures
117
+ above 2 MK. When the Sun was at minimum activity
118
+ levels, without any ARs, the XSM observed a steady sig-
119
+ nal originating from X-ray Bright Points (XBPs), with
120
+ a peak emission around 2 MK (Vadawale et al. 2021b).
121
+ When a single non-flaring AR is present, the signal is
122
+ dominated by the AR’s near-isothermal ∼ 3 MK emis-
123
+ sion (see, e.g. Del Zanna 2013). This provides an ex-
124
+ cellent opportunity to measure the FIP bias of the hot
125
+ quiescent core for individual active regions during their
126
+ evolution.
127
+ In the literature, very few abundance measurements
128
+ are know to be associated specifically with the 3 MK
129
+ emission from quiescent AR cores.
130
+ These are sum-
131
+ marised in Del Zanna and Mason (2018). X-ray spectra
132
+ in the 10–20 ˚A range have provided the relative abun-
133
+ dances of the low-FIP Fe, Mg vs. O, Ne. Most stud-
134
+ ies provided results on single active regions. Saba and
135
+ Strong (1993) reported a significant variability of the
136
+ FIP bias using SMM/FCS observations of several active
137
+ regions. On the other hand, a re-analysis of several qui-
138
+ escent AR cores with improved atomic data and using
139
+ a multi-thermal DEM technique by Del Zanna and Ma-
140
+ son (2014) indicated the same FIP bias, around 3, for
141
+ all active regions, irrespective of their age and size.
142
+ Since 2006, EUV spectra from the Hinode EIS instru-
143
+ ment have provided an opportunity to measure the rel-
144
+ ative FIP bias between low-FIP elements (e.g. Fe, Si)
145
+ and the high-FIP Ar, as well as the mid-FIP S, which
146
+ actually shows the same abundance variations as the
147
+ high-FIP elements. An example case was discussed by
148
+ Del Zanna (2013), showing that the FIP bias in the EUV
149
+ of 3 MK plasma was the same as in the X-rays. Con-
150
+ sidering the size of the emitting plasma and its emission
151
+ measure, Del Zanna (2013) concluded that it should be
152
+ the low-FIP elements that are over-abundant by about
153
+ a factor of 3.
154
+ Del Zanna et al. (2022) carried out a multi-wavelength
155
+ study of an AR as it crossed the solar disk which was ob-
156
+ served by XSM as well as by SDO/AIA, Hinode/EIS and
157
+ Hinode/XRT. The relative FIP bias obtained from Hin-
158
+ ode/EIS observations confirmed the Del Zanna (2013)
159
+ results, and showed no variation with the disk passage.
160
+ The analysis of simultaneous XSM spectra on two days
161
+ also indicated no significant variability, and provided an
162
+ absolute FIP bias for Si of 2.4, i.e. close to the value
163
+ suggested by Del Zanna (2013), and also very close to
164
+ the prediction of Laming’s model.
165
+ In the present study, we extend the previous XSM
166
+ analysis to all the quiescent periods of the same active
167
+ region, and also investigate two other active regions dur-
168
+ ing their disk crossings. One AR in particular is of in-
169
+ terest as it emerged on-disk, and hence offers the op-
170
+ portunity to study the elemental abundances during the
171
+ early phase of the evolution of an AR.
172
+ The rest of the paper is organized as follows: Sec-
173
+ tion 2 provides a short overview of previous abundance
174
+ measurements in active regions. Section 3 describes the
175
+
176
+ 3
177
+ observations and data analysis.
178
+ Section 4 provides a
179
+ detailed spectral analysis. After obtaining the results,
180
+ these are discussed in Section 5. Section 6 provides a
181
+ brief summary of the article.
182
+ 2. HISTORICAL OVERVIEW
183
+ Spatially resolved measurements of the relative FIP
184
+ bias have been carried out by several authors (see,e.g.
185
+ Widing and Feldman 1993; Sheeley 1995, 1996; Widing
186
+ 1997; Widing and Feldman 2001) using Skylab spectro-
187
+ heliograms with Mg, Ne transition region lines. These
188
+ are formed well below 1 MK, in the legs of active re-
189
+ gion ‘cool’ (1 MK) loops. They found photospheric com-
190
+ position (FIP bias=1) for newly emerged closed loops,
191
+ but increasing FIP bias reaching a value of 3-4 within a
192
+ timescale of 1-2 days (Widing and Feldman 2001), and
193
+ much higher values, up to about 10, within a few more
194
+ days. Differing FIP biases were also obtained by Young
195
+ and Mason (1997) and Dwivedi et al. (1999) using Mg
196
+ and Ne line ratios observed by the CDS and SUMER
197
+ spectrometers onboard the Solar and Heliospheric Ob-
198
+ servatory (SOHO).
199
+ The large values are hard to reconcile with in-situ
200
+ measurements, where the FIP bias is at most 3, and
201
+ also with theory. However, Del Zanna (2003) pointed
202
+ out that as the cool AR loops are almost isothermal in
203
+ their cross-section, the assumption that a smooth emis-
204
+ sion measure distribution was present in the plasma,
205
+ used to interpret the Skylab data, was not justified.
206
+ Del Zanna (2003) took the intensities measured by Wid-
207
+ ing and Feldman (1993), and using an emission measure
208
+ loci approach, showed that a FIP bias of 3.7 was con-
209
+ sistent with the data, much lower than the value of 14
210
+ reported by Widing and Feldman.
211
+ Del Zanna (2003)
212
+ also analysed the legs of several cool loops observed
213
+ by SoHO/CDS and found photospheric abundances, al-
214
+ though a similar analysis for other loops by Del Zanna
215
+ and Mason (2003) found a FIP bias of 4.
216
+ In summary, the legs of cool AR loops do show a range
217
+ of FIP bias values, between 1 and 4, and perhaps occa-
218
+ sionally larger. However, the very high FIP biases found
219
+ from Skylab data were largely overestimated.
220
+ As shown by Del Zanna and Mason (2003), active re-
221
+ gion cores are composed not only of cool 1 MK loops
222
+ and unresolved, almost isothermal 3 MK loops, but also
223
+ unresolved emission in the 1–3 MK range. The plasma
224
+ at different temperatures is generally not cospatial.
225
+ There is evidence from Hinode EIS observations of e.g.
226
+ Si X, S X lines that this ≃2 MK emission has a lower
227
+ relative FIP bias, around 2 (see,e.g. Del Zanna 2012).
228
+ Further studies using the same lines (e.g., Baker et al.
229
+ 2013, 2015; Doschek and Warren 2019; Mihailescu et al.
230
+ 2022; Ko et al. 2016; Testa et al. 2022) have shown some
231
+ variation (around the value of 2) of the relative FIP bias
232
+ within each active region, but little variability in time,
233
+ except during the decay phase, when an AR effectively
234
+ disappears and the relative abundances become photo-
235
+ spheric.
236
+ In summary, active region structures formed at tem-
237
+ peratures below 2 MK show a range of relative FIP bi-
238
+ ases, and some temporal variability. The few observa-
239
+ tions of the hotter, 3 MK, AR cores have in contrast
240
+ shown a remarkable consistency, with relative FIP bi-
241
+ ases around 3.
242
+ Finally, to interpret observations of the Sun as a star,
243
+ one needs to take into account the above (and other)
244
+ issues. As shown by Del Zanna (2019), when the Sun’s
245
+ actvity is at a minimum with no active region present
246
+ on the solar disk, the corona around 1 MK shows near
247
+ photospheric abundances, whereas in presence of active
248
+ regions, the FIP bias for the 1 MK emission stays the
249
+ same, but the hotter emission shows a higher relative
250
+ FIP bias.
251
+ When active regions flare, the high tem-
252
+ perature plasma shows nearly photospheric composition
253
+ around the peak X-ray emission (see e.g., Mondal et al.
254
+ 2021).
255
+ 3. OBSERVATIONS AND DATA ANALYSIS
256
+ Observations of the Sun were carried out with the
257
+ XSM during the minimum of solar cycle 24, when no
258
+ active regions were present, covering the years 2019-
259
+ 2020. Results are given in Vadawale et al. (2021b). They
260
+ reported intermediate abundances of low-FIP elements
261
+ (Mg, Al, and Si) of 2 MK plasma, primarily originating
262
+ from X-ray Bright Points, XBPs (Mondal et al. 2022).
263
+ Frequent micro-flaring activity was observed and found
264
+ to be occurring everywhere on the solar disk, even when
265
+ no ARs were present (Vadawale et al. 2021a). During
266
+ the minimum of solar cycle 24, XSM observed the disk
267
+ passage of a few individual, isolated ARs in the absence
268
+ of any other major activity. When ARs were present
269
+ on-disk, XSM recorded hundreds of small flares of dif-
270
+ ferent classes. Elemental abundance variations during
271
+ these small flares were found, for the first time, to ini-
272
+ tially drop to photospheric values, then rapidly return
273
+ to coronal values, as described by Mondal et al. (2021),
274
+ Mithun et al. (2022), and Lakshitha et al. (2022). In
275
+ this paper, we analyze the temporal evolution of active
276
+ regions outside of flaring activity and for this we have
277
+ chosen to study three isolated active regions: AR12749,
278
+ AR12758, and AR12759.
279
+ XSM data contain spectra at 1 s cadence in a raw
280
+ (level-1) daily file. Since the visibility of the Sun varies
281
+ within the XSM field-of-view (FOV), with the Sun be-
282
+
283
+ 4
284
+ ing sometimes outside the FOV or being occulted by the
285
+ Moon, the data include both solar and non-solar spectra.
286
+ The XSM Data Analysis Software (XSMDAS: Mithun
287
+ et al. (2021)) has been used to generate the level-2 sci-
288
+ ence data product using the appropriate Good Time
289
+ Intervals (GTIs) and the other necessary instrumental
290
+ parameters. The available default level-2 data contains
291
+ the effective area corrected light curves for every second
292
+ and spectra for every minute. XSMDAS also provides
293
+ the functionality to generate the light curves and spec-
294
+ tra for a given cadence and energy range, which we have
295
+ used in the present analysis.
296
+ Using the XSMDAS, we have generated 2 min av-
297
+ eraged XSM light curves in the energy range of 1-15
298
+ keV during the disk passage of the AR12749, AR12758,
299
+ and AR12759, as shown in the three panels of Figure 1.
300
+ During the evolution of these three ARs, representative
301
+ full disk X-ray images taken by the XRT Be-thin fil-
302
+ ter are shown in the top row of each panel. AR12749
303
+ (Figure 1a) appeared from the east limb on Sept 29,
304
+ 2019. Whilst crossing the solar disk, it became fainter
305
+ towards the west limb and went behind the limb on 14
306
+ Oct. AR12758 (Figure 1b) appears to form on disk on
307
+ 06 Mar 2020 and fully emerged after 08 Mar. It decays
308
+ whilst crossing the solar disk and finally goes behind the
309
+ west limb on 18 Mar. AR12759 appeared from the east
310
+ limb on 29 Mar 2020 and transited the solar disk until
311
+ 14 Apr 2020, before disappearing behind the west limb.
312
+ The full disk XRT images show that during the pas-
313
+ sage of these three ARs, no other major activity was
314
+ present on the solar disk. Thus, we conclude that these
315
+ three ARs were primarily responsible, during their disk
316
+ passage, for the enhanced X-ray emission observed by
317
+ the XSM. These ARs produced many small B/A-class
318
+ flares, seen as multiple spikes in the XSM light curves.
319
+ Detailed studies of these small flares were reported by
320
+ Mondal et al. (2021) and Lakshitha et al. (2022).
321
+ In the present study, we have selected only the quies-
322
+ cent periods from the observed light curves by exclud-
323
+ ing the periods when the small flares occurred using a
324
+ semi-automated graphical algorithm. For example, Fig-
325
+ ure 2 shows the representative selection (orange shaded
326
+ regions) for the AR quiescent durations on 2020-04-06.
327
+ These identified time intervals were used as user-defined
328
+ GTIs to generate the spectra for quiescent ARs on a
329
+ daily basis in order to carry out the detailed spectral
330
+ analysis as discussed in Section 4.
331
+ 4. SPECTRAL ANALYSIS
332
+ Broad-band soft X-ray spectra of the solar corona con-
333
+ sist of a continuum as well as the emission lines of the
334
+ different elements. Modeling the soft X-ray spectrum
335
+ provides the measurements of the temperature, emission
336
+ measure, and elemental abundances (with respect to hy-
337
+ drogen) of the emitting plasma (Del Zanna and Mason
338
+ 2018). We use the chisoth model (Mondal et al. 2021)
339
+ for the spectral fitting.
340
+ The chisoth is a local model
341
+ of the X-ray spectral fitting package (XSPEC: Arnaud
342
+ et al. (1999)), and it estimates the theoretical spectrum
343
+ using the CHIANTI atomic database. It takes temper-
344
+ ature, emission measure (EM: which is related to the
345
+ density of the plasma), and the elemental abundances
346
+ of the elements from Z=2 to Z=30 as free variables for
347
+ the spectral fitting.
348
+ After generating the spectra for the quiescent peri-
349
+ ods, we fitted them with an isothermal emission model.
350
+ For the spectral fitting, we ignored the spectra below 1.3
351
+ keV where the XSM response is not well-known (Mithun
352
+ et al. 2020), and above the energy where the solar spec-
353
+ trum is dominated by the non-solar background spec-
354
+ trum. During the spectral fitting, the temperature, EM,
355
+ along with the abundances of Mg, Al, and Si (whose
356
+ emission lines are prominent in the XSM spectrum) were
357
+ kept as variable parameters. The 1σ uncertainty of each
358
+ free parameter was also estimated using the standard
359
+ procedure in XSPEC.
360
+ Although the S line complex is visible in the spectra,
361
+ including it in the spectral fits as a free parameter causes
362
+ a large uncertainty in the measurement of the S abun-
363
+ dance because of its poor statistics.
364
+ Hence, we fixed
365
+ the S abundances along with the abundances of other
366
+ elements (whose emission lines are not visible in the ob-
367
+ served spectra) with the coronal abundances of Feldman
368
+ (1992). However, we found that the measurement of the
369
+ S abundance is possible for the summed spectrum of the
370
+ entire AR period.
371
+ Figure 3 shows the representative XSM spectra, for
372
+ the three ARs fitted, in different colours, with an isother-
373
+ mal model.
374
+ The points with error bars represent the
375
+ observed spectra, whereas the solid curves represent the
376
+ best-fit modeled spectra. The grey error bars represent
377
+ the non-solar background spectrum, which is subtracted
378
+ from the observed spectra during the spectral analysis.
379
+ The lower panel shows the residual between the observed
380
+ and model spectra. We have fitted all the spectra in a
381
+ similar way and found that all of them are well described
382
+ by isothermal model.
383
+ The X-rays observed by XSM originated from both
384
+ the AR and the background quiet Sun regions (outside
385
+ the AR). To determine how much emission is due to the
386
+ background quiet Sun regions, we estimate the average
387
+ quiet Sun spectrum using an average quiet-Sun temper-
388
+ ature, EM, and abundances, as reported by Vadawale
389
+ et al. (2021b). The average quiet Sun spectrum is shown
390
+
391
+ 5
392
+ Sep-29
393
+ Oct-01
394
+ Oct-03
395
+ Oct-05
396
+ Oct-07
397
+ Oct-09
398
+ Oct-11
399
+ Date (2019)
400
+ 101
401
+ 102
402
+ 103
403
+ XSM Counts (s
404
+ 1)
405
+ AR12749
406
+ a
407
+ Mar-06
408
+ Mar-08
409
+ Mar-10
410
+ Mar-12
411
+ Mar-14
412
+ Mar-16
413
+ Mar-18
414
+ Date (2020)
415
+ 101
416
+ 102
417
+ 103
418
+ XSM Counts (s
419
+ 1)
420
+ AR12758
421
+ b
422
+ Mar-26
423
+ Mar-28
424
+ Mar-30
425
+ Apr-01
426
+ Apr-03
427
+ Apr-05
428
+ Apr-07
429
+ Apr-09
430
+ Apr-11
431
+ Apr-13
432
+ Date (2020)
433
+ 101
434
+ 102
435
+ 103
436
+ XSM Counts (s
437
+ 1)
438
+ AR12759
439
+ c
440
+ Figure 1. XSM 1-15 keV light curves during the disk passage of AR12749 (panel a), AR12758 (panel b) and AR12759 (panel
441
+ c). The top row of each panel shows representative full disk X-ray images (negative intensities) taken with the XRT Be-thin
442
+ filter during the evolution of the ARs. The vertical dashed lines represent the timing of the XRT images.
443
+
444
+ 6
445
+ 05:33
446
+ 11:06
447
+ 16:40
448
+ 22:13
449
+ hh:mm on 2020-04-05
450
+ 100
451
+ 101
452
+ 102
453
+ 103
454
+ Rate(c/s)
455
+ c
456
+ 05:33
457
+ 11:06
458
+ 16:40
459
+ 22:13
460
+ hh:mm on 2020-03-11
461
+ 100
462
+ 101
463
+ 102
464
+ 103
465
+ Rate(c/s)
466
+ b
467
+ 05:33
468
+ 11:06
469
+ 16:40
470
+ 22:13
471
+ hh:mm on 2019-10-01
472
+ 100
473
+ 101
474
+ 102
475
+ 103
476
+ Rate(c/s)
477
+ a
478
+ Figure 2. Selection of the quiescent AR periods (orange-
479
+ shaded regions) from the XSM light-curves for one represen-
480
+ tative day of AR12749 (panel a), AR12758 (panel b), and
481
+ AR12759 (panel c).
482
+ by the black dashed curve in Figure 3. The quiet Sun
483
+ spectrum is found to be almost an order of magnitude
484
+ lower than the spectra of the active period when the
485
+ ARs were very bright on the solar disk. We thus con-
486
+ clude that the X-ray emission of the active periods is
487
+ primarily dominated by the AR emission.
488
+ Separating the AR emission from the background
489
+ quiet Sun emission would be possible by subtracting the
490
+ quiet-sun spectra from the AR spectra. But, as the ef-
491
+ fective area of the XSM varies with time, this is not
492
+ recommended. It is possible to model the AR spectra
493
+ using a two-temperature (2T) component model rather
494
+ than subtracting the quiet Sun spectra. This is what we
495
+ have chosen to do. One temperature corresponds to the
496
+ background solar emission originating from the regions
497
+ outside the AR and the second temperature corresponds
498
+ to the AR plasma. We have modeled a few AR spec-
499
+ tra with a two-temperature (2T) model. During the 2T
500
+ spectral fitting, the parameters of the background solar
501
+ emission were kept fixed to the average quiet-Sun values
502
+ reported by Vadawale et al. (2021b). For the AR compo-
503
+ nent, the temperature, EM, along with the abundances
504
+ of Mg, Al, and Si, were kept as variable parameters. We
505
+ found that the 2T model can describe the XSM spectra
506
+ for the active periods with similar best-fitted parameters
507
+ as those obtained by the isothermal model. This verifies
508
+ that the AR emission dominates the spectra of the AR
509
+ periods. Thus, in this study, we show the results of the
510
+ isothermal analysis in Figure 5 and 6. This is discussed
511
+ in Section 5.
512
+ It is interesting to study how the plasma parameters
513
+ vary during the emerging phase of the AR12758, i.e.,
514
+ from 07-Mar-2020 to 09-Mar-2020. Figure 4 shows the
515
+ evolution of the photospheric magnetograms (top row)
516
+ and the X-ray emission (bottom row) as observed by
517
+ SDO/HMI and the Be-thin filter of Hinode/XRT re-
518
+ spectively.
519
+ These images were created by de-rotating
520
+ the synoptic data of HMI1 and XRT2 to a common date
521
+ (08-Mar-2020) using the standard procedure of Solar-
522
+ SoftWare (SSW; Freeland and Handy 1998). We also
523
+ determined the total unsigned photospheric magnetic
524
+ flux for the regions ±10 G within the field-of-view shown
525
+ in Figure 4. During this emerging flux period, we car-
526
+ ried out a time-resolved spectroscopic study using the
527
+ XSM observations with finer time bins of less than a
528
+ day. However, during this period, as the emission from
529
+ the AR was not very bright, the emission from the AR
530
+ and the rest of the Sun could be mixed together. Thus
531
+ to derive the evolution of the plasma parameters during
532
+ this period, we modeled the observed XSM spectra with
533
+ a 2T model, where one component represents the emis-
534
+ sion from the AR, and the other represents the emission
535
+ from the rest of the Sun, as discussed in the previous
536
+ paragraph. The results are shown in Figure 7 and dis-
537
+ cussed in Section 5.
538
+ 5. RESULTS AND DISCUSSION
539
+ In this study, we have performed the X-ray spectral
540
+ analysis for the evolution of three ARs as observed by
541
+ the XSM. The AR spectra (Figure 3) show a clear sig-
542
+ nature of the thermal X-ray emission from the line com-
543
+ 1 http://jsoc.stanford.edu/data/hmi/synoptic/
544
+ 2 http://solar.physics.montana.edu/HINODE/XRT/SCIA/
545
+
546
+ 7
547
+ 10
548
+ 2
549
+ 10
550
+ 1
551
+ 100
552
+ 101
553
+ Counts (s
554
+ 1keV
555
+ 1)
556
+ Mg
557
+ Mg / Al
558
+ Si
559
+ Si
560
+ S
561
+ Quiet Sun
562
+ AR12749 (Oct-01)
563
+ AR12758 (Mar-11)
564
+ AR12759 (Apr-05)
565
+ 1.50
566
+ 1.75
567
+ 2.00
568
+ 2.25
569
+ 2.50
570
+ 2.75
571
+ 3.00
572
+ 3.25
573
+ Energy (keV)
574
+ 5
575
+ 0
576
+ 5
577
+ Figure 3. Soft X-ray spectra measured by the XSM for three
578
+ representative days of the AR period are shown. Solid lines
579
+ represent the best-fit isothermal model, and the residuals are
580
+ shown in the bottom panel. Gray points correspond to the
581
+ non-solar background spectrum.
582
+ plexes of Mg, Al, Si, and S, along with the continuum
583
+ emission up to ∼3.0 keV. The red points in Figure 5
584
+ show the evolution of the temperature and EM through-
585
+ out the evolution of the three ARs. Figure 6 shows the
586
+ evolution of abundances of Mg (panel a), Al (panel b),
587
+ and Si (panel c). The error bars associated with all the
588
+ parameters along the y-axis represent the 1σ uncertain-
589
+ ties.
590
+ We also derived the average S abundance along
591
+ with the other elements from the summed spectrum for
592
+ the duration when the ARs were very bright on the solar
593
+ disk (bounded by the vertical dashed lines in Figures 5
594
+ and 6).
595
+ This provides the average parameters associ-
596
+ ated with each AR, as shown by magenta bars and also
597
+ given in Table 1. The primary findings of the paper are
598
+ discussed below.
599
+ 5.1. Temperature and emission measure
600
+ Temperatures (T) and emission measures (EM) are
601
+ close to the quiet Sun levels (black dashed lines in Fig-
602
+ ure 5) when the ARs were absent from the solar disc
603
+ or only partially present, e.g., 30 September 2019 and
604
+ 6 March 2020. Once the ARs appear, the temperature
605
+ rises to more than ∼3 MK from the ∼2 MK of the quiet
606
+ Sun. As the ∼3 MK emission is predominantly derived
607
+ from a smaller volume of AR plasma, the presence of
608
+ the AR reduces the EM from the quiet Sun values. The
609
+ average temperatures for all the ARs are determined to
610
+ be ∼3 MK (blue error bars in Figure 5a), which is close
611
+ to the “basal” temperature of the AR core reported in
612
+ earlier research (e.g., Del Zanna and Mason 2018; Del
613
+ Zanna 2012; Winebarger et al. 2012). The temperature
614
+ and EM do, however, vary slightly over the course of the
615
+ AR’s evolution, which is consistent with the observed X-
616
+ ray light curve. Following the arrival of AR12749 and
617
+ AR12758, their activity decayed while rotating on the
618
+ solar disk (Figure 1), which is why the temperature and
619
+ EM decreased during their evolution, as indicated by the
620
+ dashed vertical lines in Figure 5. After October 6, 2019,
621
+ the EM for AR12749 begins to rise as the AR weakens
622
+ and the quiet Sun emission takes precedence over the
623
+ AR emission. Thus, after the AR has almost died and is
624
+ very faint, the EM and temperature reach values close
625
+ to the quiet Sun temperature and EM. The temperature
626
+ and EM for the AR12759 remain almost constant with
627
+ time, as this AR crossed the solar disk without much
628
+ decay in activity (Figure 1c).
629
+ 5.2. Abundance evolution
630
+ In contrast to the temperature and EM, the abun-
631
+ dances of Mg, Al, and Si do not follow the X-ray light
632
+ curve of any of the three ARs throughout their evolu-
633
+ tion (Figure 6). The abundances obtained for low-FIP
634
+ elements Al, Mg, and Si are consistently greater than
635
+ the photospheric values, demonstrating a persistent FIP
636
+ bias during the course of the AR. After the emergence
637
+ of AR12758, the FIP bias is found to be almost constant
638
+ throughout its decay phase. Similarly, during the decay
639
+ of the AR12749, the FIP bias remains nearly constant,
640
+ in contrast to certain earlier studies, such as Ko et al.
641
+ (2016).
642
+ They suggested decreasing FIP bias in high-
643
+ temperature plasma of more than two million degrees
644
+ during the decay phase of an AR. The more established
645
+ AR, AR12759, which evolved without decaying much
646
+ during its transit across the solar disk, also shows an
647
+ almost constant FIP bias, similar to the other two ARs.
648
+ We do not find any relationship between the age of
649
+ the AR and the FIP bias, as suggested in some previous
650
+ papers, e.g.,Del Zanna and Mason 2014; Doschek and
651
+ Warren 2019.
652
+ The measured abundances for Mg, Si,
653
+ and S are comparable to those given by (Feldman 1992)
654
+ and Fludra and Schmelz (1999) (orange shaded regions
655
+ in Figure 6). However, the Al abundance is ∼30%-60%
656
+ higher than the coronal abundances reported in the lit-
657
+ erature. We note that the Al lines in the XSM spectra
658
+ are blended with Mg lines. From Markov Chain Monte
659
+ Carlo (MCMC) analysis (discussed in Appendix A), we
660
+ find that there is no anti-correlation between Mg and
661
+ Al abundances. This suggests that the observed spectra
662
+ does indeed require higher abundances of Al and cannot
663
+ be explained by an enhancement of Mg abundances.
664
+ 5.3. FIP bias at the onset of AR core
665
+ Though we do not find any relationship between the
666
+ age of the AR cores and their FIP biases (Section 5.2),
667
+
668
+ 8
669
+ 5-Mar 17:58
670
+ 6-Mar 05:58
671
+ 7-Mar 02:58
672
+ 7-Mar 12:58
673
+ 7-Mar 06:58
674
+ 8-Mar 01:58
675
+ Figure 4. Evolution of the AR12758 during its emergence phase on the solar disk. Top row shows the evolution of photospheric
676
+ magnetograms as observed by HMI and bottom row shows the evolution of X-ray emission as observed by XRT Be-thin filter.
677
+ Figure 5.
678
+ Evolution of the temperature (red points in panel a) and EM (red points in panel b) during the evolution of
679
+ AR12749, AR12758, and AR12759. When the ARs were very bright, as bounded by the vertical dashed lines, the magenta bars
680
+ represent the average values of the temperature and EM. The black horizontal dashed lines represent the average temperature
681
+ and emission measure for the quiet Sun in the absence of any AR reported by Vadawale et al. (2021b). The XSM lightcurves
682
+ of the ARs are shown in grey color, and the lightcurves for the quiescent regions are shown in blue colors.
683
+ which remain constant, it is interesting to study the
684
+ timescale on which the FIP bias developed during the
685
+ emergence of the AR core. Such a study has been made
686
+ possible using the finer (< one day) time-resolved spec-
687
+ troscopy during the emerging phase (07-Mar-2020 to 09-
688
+ Mar-2020) of AR12758.
689
+ During this period, we esti-
690
+ mated the total unsigned photospheric magnetic flux as
691
+ measured by HMI/SDO and shown in Figure 7a (black
692
+ color).
693
+ The peak in the magnetic flux represents the
694
+ time when the AR completely emerged into the solar
695
+ disk.
696
+ After the emergence, the unsigned magnetic flux is
697
+ found to (temporarily) decrease. Figures 7b and 7c show
698
+ the evolution of the AR core temperature and emission-
699
+ measure. With the emergence of the AR. The temper-
700
+ ature becomes close to the AR core temperature of ∼3
701
+
702
+ AR12749
703
+ AR12758
704
+ AR12759
705
+ 4.0
706
+ a
707
+ 3.5
708
+ (MK)
709
+ 3.0
710
+ 2.5
711
+ 2.0
712
+ b
713
+ 12
714
+ 10
715
+ 8
716
+
717
+ 4
718
+ Sep-30 Oct-03
719
+ Oct-06 0ct-09 Mar-06
720
+ Mar-10
721
+ Mar-14 Mar-28 Apr-01
722
+ Apr-05 Apr-09
723
+ Date (2019)
724
+ Date (2020)
725
+ Date (2020)9
726
+ Figure 6. Panels a-c (red error bars) show the evolution of abundance in the logarithmic scale with A(H)=12 for Mg, Al, and Si
727
+ during the evolution of AR12749, AR12758 and AR12759. The magenta bars represented the average abundances when the ARs
728
+ were very bright, as bounded by the vertical dashed lines. The y-error bars represent 1σ uncertainty for each parameter, and the
729
+ x-error bars represent the duration over which a given spectrum is integrated. The black horizontal dashed lines represent the
730
+ average abundances for the quiet Sun in the absence of any AR reported by Vadawale et al. (2021b). XSM light curves for each
731
+ AR are shown in gray in the background, and the blue color on the XSM light curves represents the time duration excluding the
732
+ flaring activities. The range of coronal and photospheric abundances from various authors compiled in the CHIANTI database
733
+ are shown as orange and green bands. The right y-axis shows the FIP bias values for the respective elements with respect to
734
+ average photospheric abundances.
735
+ MK, and the EM increases as the emitting plasma vol-
736
+ ume increase until it has emerged completely. We also
737
+ derived the evolution of the FIP bias during this period,
738
+ shown in Figure 7d for Si. During this period, as the
739
+ emission from the Mg and Al line complex was weak
740
+ compared with the background solar emission, the de-
741
+ rived FIP bias for Mg and Al has a large uncertainty
742
+ and is not shown here. Within ∼10 hours of the AR
743
+ emergence, the FIP bias was already close to 3, and re-
744
+ mained almost constant throughout the evolution. So
745
+ the emerging hot core loops do not show any variation,
746
+ in agreement with previous suggestions. Recall that the
747
+ variations in FIP bias reported earlier (e.g., Widing and
748
+ Feldman 2001) were observed in the cool loops, not the
749
+ core loops.
750
+ 5.4. Enhanced bias for Al
751
+ Figure 8 shows the average values of the FIP bias
752
+ (relative to the photospheric abundance Asplund et al.
753
+ (2009)) for all the elements as a function of their FIP
754
+ values.
755
+ The lower FIP element, Al (FIP = 5.99), is
756
+ found to have the highest FIP bias of 6-7, whereas the
757
+ low-FIP elements, Mg (FIP = 7.65) and Si (FIP = 8.15),
758
+ are found to have a lower FIP bias of ∼3. The mid/high
759
+ FIP element, S, is found to have a much lower FIP bias
760
+ of a factor of ∼ 1.5. A higher FIP bias for Al is note-
761
+ worthy and may point to an intriguing physical process.
762
+ However, this may also be a modeling artifact.
763
+ One of the possibilities could be due to missing flux
764
+ caused by the presence of multi-thermal plasma provid-
765
+ ing strong signals from emission lines of Al or Mg formed
766
+
767
+ AR12749
768
+ AR12758
769
+ AR12759
770
+ 8.2
771
+ a
772
+ 3
773
+ 8.0
774
+ 2
775
+ 7.8
776
+ 7.6
777
+ b
778
+ 7.2
779
+ 6
780
+ 4
781
+ bias
782
+ 6.9
783
+ 3
784
+ N
785
+ FIP
786
+ 2
787
+ 6.6
788
+ 1
789
+ 6.3
790
+ c
791
+ 4
792
+ 8.0
793
+ 3
794
+ +
795
+ S
796
+ 7.8
797
+ 7.6
798
+ 7.4
799
+ Sep-30 (
800
+ Oct-03
801
+ Oct-06 Oct-09 Mar-06 Mar-10
802
+ Mar-14 Mar-18Mar-28 Apr-01 Apr-05 Apr-09
803
+ Date (2019)
804
+ Date (2020)
805
+ Date (2020)10
806
+ Figure 7. Results showing the emerging phase of AR12758.
807
+ The black curve in panel a shows the evolution of the total
808
+ unsigned photospheric magnetic flux. Panel b and c show the
809
+ evolution of temperature and EM. Panel d shows the evolu-
810
+ tion of FIP bias for Si. The dashed lines in panels b-d repre-
811
+ sent the corresponding parameter for the background solar
812
+ emission from the rest of the solar-disk except AR. The back-
813
+ ground grey curves in each panel represent the X-ray light
814
+ curve observed by XSM. Whereas the blue curves represent
815
+ the selected times excluding the flaring period, representing
816
+ the quiescent AR.
817
+ at different temperatures. To verify this we have simu-
818
+ lated the emission lines in the energy range of the Mg/Al
819
+ line complex by considering the isothermal model and a
820
+ multi-thermal model using the AR DEM of AR12759,
821
+ reported by Del Zanna et al. (2022) (see Figure B.1 in
822
+ Appendix B). Similar line intensities from various ion-
823
+ ization stages of Al and Mg can be seen in both the
824
+ isothermal and multi-thermal models, confirming that
825
+ the absence of the flux is not the result of multi-thermal
826
+ plasma.
827
+ Another possibility is that missing flux is caused by
828
+ missing lines of Al or Mg (mostly satellite lines) that
829
+ are not yet present in CHIANTI version 10. We have
830
+ analysed the high-resolution spectroscopic observations
831
+ described by Walker et al. (1974) and found several ob-
832
+ served lines that are missing in the database. However,
833
+ the total missing flux, compared to the predicted flux
834
+ by CHIANTI is not enough to explain the anomalous
835
+ Al abundance. However, the Walker et al. (1974) obser-
836
+ vations were taken during a high level of solar activity,
837
+ so it is possible that the missing lines have a stronger
838
+ contribution at 3 MK. The Al abundance is currently
839
+ clearly overestimated by some degree.
840
+ Although this analysis is not conclusive enough to rule
841
+ out Al’s high FIP bias as an artifact, it is also not suf-
842
+ ficient to conclude that it is not real. A higher Al FIP
843
+ bias could be real. This might be explained by examin-
844
+ ing a few particular scenarios from the Ponderomotive
845
+ force model (Laming 2015) proposed by Laming (pri-
846
+ vate communication), which could be investigated in a
847
+ subsequent study.
848
+ We have also compared the AR core FIP bias obtained
849
+ with that of the different solar activity levels measured
850
+ by the XSM in previous research. These are overplot-
851
+ ted in Figure 8.
852
+ The blue points show the FIP bias
853
+ during the quiet Sun period, which is dominated by X-
854
+ ray Bright Points (XBP), as reported by Vadawale et al.
855
+ (2021b). While the green points depict the FIP bias dur-
856
+ ing the peak of the solar flares as reported by Mondal
857
+ et al. (2021). The FIP bias of the AR core (red points)
858
+ shows a consistently higher value for the elements Al,
859
+ Mg, and Si compared with the FIP bias of XBPs (green
860
+ points). Since ARs have substantially higher magnetic
861
+ activity than the XBPs, the increased FIP bias of the
862
+ ARs relative to the XBPs is expected from the Pondero-
863
+ motive force model. On the other hand, chromospheric
864
+ evaporation during the flaring mechanism results in a
865
+ near unit FIP bias during the peak of the flares (Mon-
866
+ dal et al. 2021).
867
+ 6. SUMMARY
868
+ We present the evolution of plasma characteristics for
869
+ three ARs using disk-integrated soft X-ray spectroscopic
870
+ observations from the XSM to make simultaneous line
871
+ and continuum measurements. Carrying out a compre-
872
+ hensive study of an AR using the Sun-as-a-star mode
873
+ observation is challenging because of the presence of
874
+ multiple activities throughout the solar cycle. Unique
875
+
876
+ a
877
+ Mx)
878
+ 20
879
+ (×1021
880
+ 10
881
+ B-flux
882
+ b
883
+ 4
884
+ (MK)
885
+ 3
886
+ C
887
+ 10
888
+ 5
889
+ EM
890
+ 0
891
+ d
892
+ 6
893
+ 4
894
+ bias
895
+ FIP
896
+ 2
897
+ -20
898
+ 0
899
+ 20
900
+ 40
901
+ Hours from 07-Mar-202011
902
+ Table 1. Best fitted parameters for the average spectrum of each AR.
903
+ AR
904
+ T
905
+ EM
906
+ Mg
907
+ Al
908
+ Si
909
+ S
910
+ (MK)
911
+ (1046 cm−3)
912
+ 12749
913
+ 3.14+0.04
914
+ −0.05
915
+ 2.46+0.24
916
+ −0.19
917
+ 8.00+0.02
918
+ −0.03
919
+ 7.28+0.05
920
+ −0.06
921
+ 8.00+0.02
922
+ −0.02
923
+ 7.23+0.06
924
+ −0.05
925
+ 12759
926
+ 3.22+0.04
927
+ −0.02
928
+ 4.30+0.21
929
+ −0.28
930
+ 7.95+0.02
931
+ −0.02
932
+ 7.26+0.04
933
+ −0.04
934
+ 8.04+0.01
935
+ −0.01
936
+ 7.23+0.02
937
+ −0.03
938
+ 12758
939
+ 2.99+0.05
940
+ −0.03
941
+ 3.48+0.25
942
+ −0.31
943
+ 7.95+0.03
944
+ −0.02
945
+ 7.23+0.06
946
+ −0.05
947
+ 8.02+0.02
948
+ −0.02
949
+ 7.32+0.05
950
+ −0.06
951
+ 6
952
+ 7
953
+ 8
954
+ 9
955
+ 10
956
+ 11
957
+ FIP (eV)
958
+ 2
959
+ 0
960
+ 2
961
+ 4
962
+ 6
963
+ 8
964
+ FIP bias
965
+ Al
966
+ Mg
967
+ Si
968
+ S
969
+ AR
970
+ XBP
971
+ Flare
972
+ Figure 8. Variation of the FIP bias with the FIP of the ele-
973
+ ments. The red points are the averaged FIP bias for the ARs
974
+ reported in the present study. The blue points are the FIP
975
+ bias for the XBPs as reported by Vadawale et al. (2021b).
976
+ The green points are the measured FIP bias during the peak
977
+ of solar flares as reported by Mondal et al. (2021).
978
+ XSM observations made during the minimum of Solar
979
+ Cycle 24 allowed the study of the evolution of temper-
980
+ ature, EM, and the abundances of Mg, Al, and Si for
981
+ the individual ARs in the absence of any other notewor-
982
+ thy activity on the solar disk. Since the ARs were the
983
+ principal contributors of disk-integrated X-rays during
984
+ their evolution, the temperature and EM followed their
985
+ X-ray light curve. The average temperature of all the
986
+ AR is ∼3 MK, close to the well-known temperature of
987
+ the AR core. Irrespective of the activity and age of the
988
+ ARs, the abundances or the FIP biases of Al, Mg, and Si
989
+ were found to be consistently greater than their photo-
990
+ spheric values without much variation. The abundance
991
+ values develop within ∼10 hours of the appearance of
992
+ the AR during its emerging phase. Throughout the AR
993
+ evolution, the low FIP elements, Mg and Si, have a FIP
994
+ bias close to 3, whereas the mid-FIP element, S, has an
995
+ average FIP bias of ∼1.5. The lowest FIP element, Al,
996
+ has a greater FIP bias of ∼6-7. After discussing vari-
997
+ ous modeling artifacts, the Al abundance appears to be
998
+ overestimated, although the exact factor is unknown.
999
+ Increased Al abundance could be real, implying that
1000
+ low-FIP elements degree of FIP bias is linked to their
1001
+ FIP values. Future spectroscopic studies to measure the
1002
+ FIP bias for more low-FIP elements (for example, Ca,
1003
+ whose FIP bias is between Al and Mg) would help us
1004
+ to better understand this phenomenon. In this regard,
1005
+ recent and upcoming X-ray spectrometers (for example,
1006
+ DAXSS: (Schwab et al. 2020) onboard INSPIRESat-1,
1007
+ SoLEXS (Sankarasubramanian et al. 2011) onboard up-
1008
+ coming Aditya-L1 observatory, and rocket-borne spec-
1009
+ trometer MaGIXS (Champey et al. 2022)) will be use-
1010
+ ful.
1011
+ ACKNOWLEDGMENTS
1012
+ We acknowledge the use of data from the Solar X-
1013
+ ray Monitor (XSM) on board the Chandrayaan-2 mis-
1014
+ sion of the Indian Space Research Organisation (ISRO),
1015
+ archived at the Indian Space Science Data Centre
1016
+ (ISSDC). The XSM was developed by the engineer-
1017
+ ing team of Physical Research Laboratory (PRL) lead
1018
+ by Dr.
1019
+ M. Shanmugam, with support from various
1020
+ ISRO centers.
1021
+ We thank various facilities and the
1022
+ technical teams from all contributing institutes and
1023
+ Chandrayaan-2 project, mission operations, and ground
1024
+ segment teams for their support.
1025
+ Research at PRL
1026
+ is supported by the Department of Space, Govt.
1027
+ of
1028
+ India.
1029
+ We acknowledge the support from Royal So-
1030
+ ciety through the international exchanges grant No.
1031
+ IES\R2\170199. GDZ and HEM acknowledge support
1032
+ from STFC (UK) via the consolidated grant to the
1033
+ atomic astrophysics group at DAMTP, University of
1034
+ Cambridge (ST\T000481\1). AB was the J C Bose Na-
1035
+ tional Fellow during the period of this work. We thank
1036
+ Dr. Martin Laming for the useful discussion on anoma-
1037
+ lous Al abundance.
1038
+ APPENDIX
1039
+
1040
+ 12
1041
+ A. RESULTS OF MCMC ANALYSIS
1042
+ We carried out Markov Chain Monte Carlo (MCMC) analysis of the spectra to obtain the regions of parameter space
1043
+ that best fits the observed spectra. This was done using the ‘chain’ method available within XSPEC. Figure A1 shows
1044
+ the corner plot of the results for the spectrum on 01-Oct-2019. The results show that all parameters are well constrained
1045
+ by the spectra. Particularly, we note that there is no anti-correlation observed between Al and Mg abundances showing
1046
+ that the enhances Al abundances obtained cannot be adjusted by enhancements in Mg abundances. Similar trends
1047
+ are observed for spectra of other days as well.
1048
+ 7.80
1049
+ 7.85
1050
+ 7.90
1051
+ 7.95
1052
+ Mg
1053
+ 7.0
1054
+ 7.1
1055
+ 7.2
1056
+ 7.3
1057
+ Al
1058
+ 7.92
1059
+ 7.95
1060
+ 7.98
1061
+ 8.01
1062
+ Si
1063
+ 3.30
1064
+ 3.36
1065
+ 3.42
1066
+ 3.48
1067
+ 3.54
1068
+ T
1069
+ 3.5
1070
+ 4.0
1071
+ 4.5
1072
+ 5.0
1073
+ 5.5
1074
+ EM
1075
+ 7.80
1076
+ 7.85
1077
+ 7.90
1078
+ 7.95
1079
+ Mg
1080
+ 7.0
1081
+ 7.1
1082
+ 7.2
1083
+ 7.3
1084
+ Al
1085
+ 7.92
1086
+ 7.95
1087
+ 7.98
1088
+ 8.01
1089
+ Si
1090
+ 3.5
1091
+ 4.0
1092
+ 4.5
1093
+ 5.0
1094
+ 5.5
1095
+ EM
1096
+ Figure A.1. Corner plot depicting the results of MCMC analysis for the fitted spectrum on 01-Oct-2019. The histograms
1097
+ depict the marginalized distribution associated with each parameter. The scatter-plots are overlaid with contours representing
1098
+ 1σ, 2σ, and 3σ levels to show correlations between all parameters. The best-fit parameters are represented by green lines.
1099
+ B. SIMULATED SPECTRUM
1100
+ To check the effect of temperatures on the Mg/Al line fluxes in the XSM energy range of 1.55 to 1.70 keV, we
1101
+ have compared the simulated spectra in the same energy range by considering the isothermal and multi-thermal DEM
1102
+ models. Figure B.1 shows the simulated 3 MK spectrum (blue) overplotted with the multithermal spectrum (red).
1103
+ The isothermal spectrum is generated for an emission measure of 1027 cm−5. The multithermal spectrum is derived by
1104
+
1105
+ 13
1106
+ 1.56
1107
+ 1.58
1108
+ 1.60
1109
+ 1.62
1110
+ 1.64
1111
+ 1.66
1112
+ 1.68
1113
+ 1.70
1114
+ Energy (keV)
1115
+ 0.0
1116
+ 0.2
1117
+ 0.4
1118
+ 0.6
1119
+ 0.8
1120
+ 1.0
1121
+ Normalized intensity
1122
+ Mg XI, Al XI-XII
1123
+ Al XII
1124
+ Mg XI
1125
+ Mg XI
1126
+ DEM
1127
+ Isothermal
1128
+ Figure B.1. Simulated spectra from CHIANTI v 10 in the energy range of Mg/Al line complex of XSM observed spectrum.
1129
+ Solid blue curve show the multi-thermal spectrum and dashed orange curve shows the isothermal spectrum.
1130
+ using the reported quiescent AR DEM by Del Zanna et al. (2022), which was obtained from the Hinode EIS observation
1131
+ of AR12759. For the comparison of both spectra, we have normalized them with the corresponding line flux of Mg XI,
1132
+ and Al XI-XII. Similar line intensities predicted by both isothermal and multithermal models indicates that spectra
1133
+ are insensitive to temperature in this case.
1134
+
1135
+ 14
1136
+ REFERENCES
1137
+ Arnaud, K., Dorman, B., and Gordon, C. (1999). XSPEC:
1138
+ An X-ray spectral fitting package.
1139
+ Asplund, M., Grevesse, N., Sauval, A. J., and Scott, P.
1140
+ (2009). The Chemical Composition of the Sun.
1141
+ ARA&A, 47(1):481–522.
1142
+ Baker, D., Brooks, D. H., D´emoulin, P., van
1143
+ Driel-Gesztelyi, L., Green, L. M., Steed, K., and Carlyle,
1144
+ J. (2013). Plasma Composition in a Sigmoidal Anemone
1145
+ Active Region. ApJ, 778(1):69.
1146
+ Baker, D., Brooks, D. H., D´emoulin, P., Yardley, S. L., van
1147
+ Driel-Gesztelyi, L., Long, D. M., and Green, L. M.
1148
+ (2015). FIP Bias Evolution in a Decaying Active Region.
1149
+ ApJ, 802(2):104.
1150
+ Baker, D., Brooks, D. H., van Driel-Gesztelyi, L., James,
1151
+ A. W., D´emoulin, P., Long, D. M., Warren, H. P., and
1152
+ Williams, D. R. (2018). Coronal Elemental Abundances
1153
+ in Solar Emerging Flux Regions. ApJ, 856(1):71.
1154
+ Bochsler, P. (2007). Minor ions in the solar wind.
1155
+ A&A Rv, 14(1):1–40.
1156
+ Brooks, D. H., Baker, D., van Driel-Gesztelyi, L., and
1157
+ Warren, H. P. (2017). A solar cycle correlation of
1158
+ coronal element abundances in sun-as-a-star
1159
+ observations. Nature Communications, 8(1).
1160
+ Brooks, D. H., Ugarte-Urra, I., and Warren, H. P. (2015).
1161
+ Full-Sun observations for identifying the source of the
1162
+ slow solar wind. Nature Communications, 6:5947.
1163
+ Brooks, D. H. and Warren, H. P. (2011). Establishing a
1164
+ Connection Between Active Region Outflows and the
1165
+ Solar Wind: Abundance Measurements with EIS/Hinode.
1166
+ ApJL, 727(1):L13.
1167
+ Champey, P. R., Winebarger, A. R., Kobayashi, K.,
1168
+ Athiray, P. S., Hertz, E., Savage, S., Beabout, B.,
1169
+ Beabout, D., Broadway, D., Bruccoleri, A. R., Cheimets,
1170
+ P., Davis, J., Duffy, J., Golub, L., Gregory, D. A.,
1171
+ Griffith, C., Haight, H., Heilmann, R. K., Hogue, B.,
1172
+ Hohl, J., Hyde, D., Kegley, J., Kolodzieczjak, J.,
1173
+ Ramsey, B., Ranganathan, J., Robertson, B.,
1174
+ Schattenburg, M. L., Speegle, C. O., Vigil, G., Walsh, R.,
1175
+ Weddenorf, B., and Wright, E. (2022). The Marshall
1176
+ Grazing Incidence X-ray Spectrometer (MaGIXS).
1177
+ Journal of Astronomical Instrumentation, 11(2):2250010.
1178
+ Dahlburg, R. B., Laming, J. M., Taylor, B. D., and
1179
+ Obenschain, K. (2016). PONDEROMOTIVE
1180
+ ACCELERATION IN CORONAL LOOPS. The
1181
+ Astrophysical Journal, 831(2):160.
1182
+ Del Zanna, G. (2003). Solar active regions: The footpoints
1183
+ of 1 MK loops. A&A, 406:L5–L8.
1184
+ Del Zanna, G. (2012). Benchmarking atomic data for the
1185
+ CHIANTI atomic database: coronal lines observed by
1186
+ Hinode EIS. A&A, 537:A38.
1187
+ Del Zanna, G. (2013). The multi-thermal emission in solar
1188
+ active regions. A&A, 558:A73.
1189
+ Del Zanna, G. (2019). The EUV spectrum of the Sun:
1190
+ Quiet- and active-Sun irradiances and chemical
1191
+ composition. A&A, 624:A36.
1192
+ Del Zanna, G. and Mason, H. E. (2003). Solar active
1193
+ regions: SOHO/CDS and TRACE observations of
1194
+ quiescent coronal loops. A&A, 406:1089–1103.
1195
+ Del Zanna, G. and Mason, H. E. (2014). Elemental
1196
+ abundances and temperatures of quiescent solar active
1197
+ region cores from X-ray observations. A&A, 565:A14.
1198
+ Del Zanna, G. and Mason, H. E. (2018). Solar UV and
1199
+ X-ray spectral diagnostics. Living Reviews in Solar
1200
+ Physics, 15(1):5.
1201
+ Del Zanna, G., Mondal, B., Rao, Y. K., Mithun, N. P. S.,
1202
+ Vadawale, S. V., Reeves, K. K., Mason, H. E., Sarkar, A.,
1203
+ Janardhan, P., and Bhardwaj, A. (2022).
1204
+ Multiwavelength Observations by XSM, Hinode, and
1205
+ SDO of an Active Region. Chemical Abundances and
1206
+ Temperatures. ApJ, 934(2):159.
1207
+ Doschek, G. A. and Warren, H. P. (2019). The Variability
1208
+ of Solar Coronal Abundances in Active Regions and the
1209
+ Quiet Sun. ApJ, 884(2):158.
1210
+ Dwivedi, B. N., Curdt, W., and Wilhelm, K. (1999).
1211
+ Analysis of Extreme-Ultraviolet Off-Limb Spectra
1212
+ Obtained with SUMER/SOHO: Ne VI-Mg VI Emission
1213
+ Lines. ApJ, 517(1):516–525.
1214
+ Feldman, U. (1992). Elemental abundances in the upper
1215
+ solar atmosphere. PhyS, 46(3):202–220.
1216
+ Feldman, U., Landi, E., Doschek, G. A., Dammasch, I., and
1217
+ Curdt, W. (2003). Free-Free Emission in the
1218
+ Far-Ultraviolet Spectral Range: A Resource for
1219
+ Diagnosing Solar and Stellar Flare Plasmas. ApJ,
1220
+ 593(2):1226–1241.
1221
+ Feldman, U., Schuhle, U., Widing, K. G., and Laming,
1222
+ J. M. (1998). Coronal composition above the solar
1223
+ equator and the north pole as determined from spectra
1224
+ acquired by the SUMER instrument onSOHO. The
1225
+ Astrophysical Journal, 505(2):999–1006.
1226
+ Feldman, U. and Widing, K. G. (2002). A review of the
1227
+ first ionization potential effect on elemental abundances
1228
+ in the solar corona and in flares. Physics of Plasmas,
1229
+ 9(2):629–635.
1230
+ Fludra, A. and Schmelz, J. T. (1999). The absolute coronal
1231
+ abundances of sulfur, calcium, and iron from
1232
+ Yohkoh-BCS flare spectra. A&A, 348:286–294.
1233
+
1234
+ 15
1235
+ Freeland, S. L. and Handy, B. N. (1998). Data Analysis
1236
+ with the SolarSoft System. SoPh, 182(2):497–500.
1237
+ Gloeckler, G. and Geiss, J. (1989). The abundances of
1238
+ elements and isotopes in the solar wind. In Waddington,
1239
+ C. J., editor, Cosmic Abundances of Matter, volume 183
1240
+ of American Institute of Physics Conference Series, pages
1241
+ 49–71.
1242
+ Ko, Y.-K., Young, P. R., Muglach, K., Warren, H. P., and
1243
+ Ugarte-Urra, I. (2016). Correlation of Coronal Plasma
1244
+ Properties and Solar Magnetic Field in a Decaying
1245
+ Active Region. ApJ, 826(2):126.
1246
+ Lakshitha, N., Mondal, B., Narendranath, S., and Paul, K.
1247
+ (2022). Elemental abundances during A-class solar
1248
+ flares: Soft X-ray spectroscopy from Chandrayaan-2
1249
+ XSM. Under preparation.
1250
+ Laming, J. M. (2004). A Unified Picture of the First
1251
+ Ionization Potential and Inverse First Ionization
1252
+ Potential Effects. ApJ, 614(2):1063–1072.
1253
+ Laming, J. M. (2009). Non-Wkb Models of the First
1254
+ Ionization Potential Effect: Implications for Solar
1255
+ Coronal Heating and the Coronal Helium and Neon
1256
+ Abundances. ApJ, 695(2):954–969.
1257
+ Laming, J. M. (2012). Non-WKB Models of the First
1258
+ Ionization Potential Effect: The Role of Slow Mode
1259
+ Waves. ApJ, 744(2):115.
1260
+ Laming, J. M. (2015). The fip and inverse fip effects in
1261
+ solar and stellar coronae. Living Reviews in Solar
1262
+ Physics, 12:1–76.
1263
+ Laming, J. M. (2017). The first ionization potential effect
1264
+ from the ponderomotive force: On the polarization and
1265
+ coronal origin of alfv´en waves. The Astrophysical
1266
+ Journal, 844(2):153.
1267
+ Mihailescu, T., Baker, D., Green, L. M., van
1268
+ Driel-Gesztelyi, L., Long, D. M., Brooks, D. H., and To,
1269
+ A. S. H. (2022). What Determines Active Region
1270
+ Coronal Plasma Composition? ApJ, 933(2):245.
1271
+ Mithun, N., Vadawale, S., Patel, A., Shanmugam, M.,
1272
+ Chakrabarty, D., Konar, P., Sarvaiya, T., Padia, G.,
1273
+ Sarkar, A., Kumar, P., Jangid, P., Sarda, A., Shah, M.,
1274
+ and Bhardwaj, A. (2021). Data processing software for
1275
+ chandrayaan-2 solar x-ray monitor. Astronomy and
1276
+ Computing, 34:100449.
1277
+ Mithun, N. P. S., Vadawale, S. V., Sarkar, A., Shanmugam,
1278
+ M., Patel, A. R., Mondal, B., Joshi, B., Janardhan, P.,
1279
+ Adalja, H. L., Goyal, S. K., Ladiya, T., Tiwari, N. K.,
1280
+ Singh, N., Kumar, S., Tiwari, M. K., Modi, M. H., and
1281
+ Bhardwaj, A. (2020). Solar X-Ray Monitor on Board the
1282
+ Chandrayaan-2 Orbiter: In-Flight Performance and
1283
+ Science Prospects. SoPh, 295(10):139.
1284
+ Mithun, N. P. S., Vadawale, S. V., Zanna, G. D., Rao,
1285
+ Y. K., Joshi, B., Sarkar, A., Mondal, B., Janardhan, P.,
1286
+ Bhardwaj, A., and Mason, H. E. (2022). Soft X-Ray
1287
+ Spectral Diagnostics of Multithermal Plasma in Solar
1288
+ Flares with Chandrayaan-2 XSM. ApJ, 939(2):112.
1289
+ Mondal, B., Klimchuk, J. A., Vadawale, S. V., Sarkar, A.,
1290
+ Zanna, G. D., Athiray, P. S., Mithun, N., Mason, H. E.,
1291
+ and Bhardwaj, A. (2022). Role of small-scale impulsive
1292
+ events in heating the X-ray bright points of the quiet
1293
+ Sun. Submitted to ApJ.
1294
+ Mondal, B., Sarkar, A., Vadawale, S. V., Mithun, N. P. S.,
1295
+ Janardhan, P., Del Zanna, G., Mason, H. E.,
1296
+ Mitra-Kraev, U., and Narendranath, S. (2021).
1297
+ Evolution of Elemental Abundances during B-Class Solar
1298
+ Flares: Soft X-Ray Spectral Measurements with
1299
+ Chandrayaan-2 XSM. ApJ, 920(1):4.
1300
+ Pottasch, S. R. (1963). The Lower Solar Corona:
1301
+ Interpretation of the Ultraviolet Spectrum. ApJ, 137:945.
1302
+ Saba, J. L. R. and Strong, K. T. (1993). Coronal
1303
+ abundances of O, Ne, Mg, and Fe in solar active regions.
1304
+ Advances in Space Research, 13(9):391–394.
1305
+ Sankarasubramanian, K., Ramadevi, M. C., Bug, M.,
1306
+ Umapathy, C. N., Seetha, S., Sreekumar, P., and Kumar
1307
+ (2011). SoLEXS - A low energy X-ray spectrometer for
1308
+ solar coronal studies. In Astronomical Society of India
1309
+ Conference Series, volume 2 of Astronomical Society of
1310
+ India Conference Series, pages 63–69.
1311
+ Schwab, B. D., Sewell, R. H. A., Woods, T. N., Caspi, A.,
1312
+ Mason, J. P., and Moore, C. (2020). Soft X-Ray
1313
+ Observations of Quiescent Solar Active Regions Using
1314
+ the Novel Dual-zone Aperture X-Ray Solar Spectrometer.
1315
+ ApJ, 904(1):20.
1316
+ Shanmugam, M., Vadawale, S. V., Patel, A. R., Adalaja,
1317
+ H. K., Mithun, N. P. S., Ladiya, T., Goyal, S. K., Tiwari,
1318
+ N. K., Singh, N., Kumar, S., Painkra, D. K., Acharya,
1319
+ Y. B., Bhardwaj, A., Hait, A. K., Patinge, A., Kapoor,
1320
+ A. h., Kumar, H. N. S., Satya, N., Saxena, G., and
1321
+ Arvind, K. (2020). Solar X-ray Monitor Onboard
1322
+ Chandrayaan-2 Orbiter. Current Science, 118(1):45–52.
1323
+ Sheeley, N. R., J. (1995). A Volcanic Origin for High-FIP
1324
+ Material in the Solar Atmosphere. ApJ, 440:884.
1325
+ Sheeley, N. R., J. (1996). Elemental Abundance Variations
1326
+ in the Solar Atmosphere. ApJ, 469:423.
1327
+ Testa, P., Martinez-Sykora, J., and De Pontieu, B. (2022).
1328
+ Coronal Abundances in an Active Region: Evolution and
1329
+ Underlying Chromospheric and Transition Region
1330
+ Properties. arXiv e-prints, page arXiv:2211.07755.
1331
+
1332
+ 16
1333
+ Vadawale, S., Shanmugam, M., Acharya, Y., Patel, A.,
1334
+ Goyal, S., Shah, B., Hait, A., Patinge, A., and
1335
+ Subrahmanyam, D. (2014). Solar x-ray monitor (xsm)
1336
+ on-board chandrayaan-2 orbiter. Advances in Space
1337
+ Research, 54(10):2021 – 2028. Lunar Science and
1338
+ Exploration.
1339
+ Vadawale, S. V., Mithun, N. P. S., Mondal, B., Sarkar, A.,
1340
+ Janardhan, P., Joshi, B., Bhardwaj, A., Shanmugam, M.,
1341
+ Patel, A. R., Adalja, H. K. L., Goyal, S. K., Ladiya, T.,
1342
+ Tiwari, N. K., Singh, N., and Kumar, S. (2021a).
1343
+ Observations of the quiet sun during the deepest solar
1344
+ minimum of the past century with chandrayaan-2 XSM:
1345
+ Sub-a-class microflares outside active regions. The
1346
+ Astrophysical Journal Letters, 912(1):L13.
1347
+ Vadawale, S. V., Mondal, B., Mithun, N. P. S., Sarkar, A.,
1348
+ Janardhan, P., Joshi, B., Bhardwaj, A., Shanmugam, M.,
1349
+ Patel, A. R., Adalja, H. K. L., Goyal, S. K., Ladiya, T.,
1350
+ Tiwari, N. K., Singh, N., and Kumar, S. (2021b).
1351
+ Observations of the quiet sun during the deepest solar
1352
+ minimum of the past century with chandrayaan-2 XSM:
1353
+ Elemental abundances in the quiescent corona. The
1354
+ Astrophysical Journal Letters, 912(1):L12.
1355
+ Walker, A. B. C., J., Rugge, H. R., and Weiss, K. (1974).
1356
+ Relative Coronal Abundances Derived from X-Ray
1357
+ Observations. I. Sodium, Magnesium, Aluminum, Silicon,
1358
+ Sulfur, and Argon. ApJ, 188:423–440.
1359
+ Widing, K. G. (1997). Emerging Active Regions on the
1360
+ Sun and the Photospheric Abundance of Neon. ApJ,
1361
+ 480(1):400–405.
1362
+ Widing, K. G. and Feldman, U. (1993). Nonphotospheric
1363
+ abundances in a solar active region. ApJ, 416:392.
1364
+ Widing, K. G. and Feldman, U. (2001). On the Rate of
1365
+ Abundance Modifications versus Time in Active Region
1366
+ Plasmas. ApJ, 555(1):426–434.
1367
+ Winebarger, A. R., Warren, H. P., Schmelz, J. T., Cirtain,
1368
+ J., Mulu-Moore, F., Golub, L., and Kobayashi, K. (2012).
1369
+ Defining the “Blind Spot” of Hinode EIS and XRT
1370
+ Temperature Measurements. ApJL, 746(2):L17.
1371
+ Young, P. R. and Mason, H. E. (1997). The Mg/Ne
1372
+ abundance ratio in a recently emerged flux region
1373
+ observed by CDS. SoPh, 175(2):523–539.
1374
+
4NE1T4oBgHgl3EQf6AXQ/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
4dE1T4oBgHgl3EQf6QUq/content/tmp_files/2301.03520v1.pdf.txt ADDED
@@ -0,0 +1,715 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.03520v1 [math.FA] 9 Jan 2023
2
+ CLASSIFYING WEAK PHASE RETRIEVAL
3
+ P. G. CASAZZA AND F. AKRAMI
4
+ Abstract. We will give several surprising equivalences and consequences of
5
+ weak phase retrieval. These results give a complete understanding of the dif-
6
+ ference between weak phase retrieval and phase retrieval. We also answer two
7
+ longstanding open problems on weak phase retrieval: (1) We show that the
8
+ families of weak phase retrievable frames {xi}m
9
+ i=1 in Rn are not dense in the
10
+ family of m-element sets of vectors in Rn for all m ≥ 2n − 2; (2) We show
11
+ that any frame {xi}2n−2
12
+ i=1
13
+ containing one or more canonical basis vectors in Rn
14
+ cannot do weak phase retrieval. We provide numerous examples to show that
15
+ the obtained results are best possible.
16
+ 1. Introduction
17
+ The concept of frames in a separable Hilbert space was originally introduced by
18
+ Duffin and Schaeffer in the context of non-harmonic Fourier series [14]. Frames
19
+ are a more flexible tool than bases because of the redundancy property that make
20
+ them more applicable than bases. Phase retrieval is an old problem of recovering
21
+ a signal from the absolute value of linear measurement coefficients called intensity
22
+ measurements. Phase retrieval and norm retrieval have become very active areas of
23
+ research in applied mathematics, computer science, engineering, and more today.
24
+ Phase retrieval has been defined for both vectors and subspaces (projections) in all
25
+ separable Hilbert spaces, (e.g., [3], [4], [5], [6], [9], [10] and [11]).
26
+ The concept of weak phase retrieval weakened the notion of phase retrieval and it
27
+ has been first defined for vectors in ([8] and [7]). The rest of the paper is organized
28
+ as follows: In Section 2, we give the basic definitions and certain preliminary results
29
+ to be used in the paper. Weak phase retrieval by vectors is introduced in section
30
+ 3. In section 4 we show that any family of vectors {xi}2n−2
31
+ i=1
32
+ doing weak phase
33
+ retrieval cannot contain a unit vector. In section 5, we show that the weak phase
34
+ retrievable frames are not dense in all frames. And in section 6 we give several
35
+ surprising equivalences and consequences of weak phase retrieval. These results
36
+ give a complete understanding of the difference between weak phase retrieval and
37
+ phase retrieval.
38
+ 2. preliminaries
39
+ First we give the background material needed for the paper. Let H be a finite
40
+ or infinite dimensional real Hilbert space and B(H) the class of all bounded linear
41
+ operators defined on H. The natural numbers and real numbers are denoted by
42
+ “N” and “R”, respectively. We use [m] instead of the set {1, 2, 3, . . ., m} and use
43
+ [{xi}i∈I] instead of span{xi}i∈I, where I is a finite or countable subset of N. We
44
+ 2010 Mathematics Subject Classification. 42C15, 42C40.
45
+ Key words and phrases. Real Hilbert frames, Full spark, Phase retrieval, Weak phase retrieval.
46
+ The first author was supported by NSF DMS 1609760.
47
+ 1
48
+
49
+ 2
50
+ P. G. CASAZZA AND F. AKRAMI
51
+ denote by Rn a n dimensional real Hilbert space. We start with the definition of a
52
+ real Hilbert space frame.
53
+ Definition 1. A family of vectors {xi}i∈I in a finite or infinite dimensional separable
54
+ real Hilbert space H is a frame if there are constants 0 < A ≤ B < ∞ so that
55
+ A∥x∥2 ≤
56
+
57
+ i∈I
58
+ |⟨x, xi⟩|2 ≤ B∥x∥2,
59
+ for all
60
+ f ∈ H.
61
+ The constants A and B are called the lower and upper frame bounds for {xi}i∈I,
62
+ respectively. If an upper frame bound exists, then {xi}i∈I is called a B-Bessel
63
+ seqiemce or simply Bessel when the constant is implicit. If A = B, it is called an
64
+ A-tight frame and in case A = B = 1, it is called a Parseval frame. The values
65
+ {⟨x, xi⟩}∞
66
+ i=1 are called the frame coefficients of the vector x ∈ H.
67
+ It is immediate that a frame must span the space. We will need to work with
68
+ Riesz sequences.
69
+ Definition 2. A family X = {xi}i∈I in a finite or infinite dimensional real Hilbert
70
+ space H is a Riesz sequence if there are constants 0 < A ≤ B < ∞ satisfying
71
+ A
72
+
73
+ i∈I
74
+ |ci|2 ≤ ∥
75
+
76
+ i∈I
77
+ cixi∥2 ≤ B
78
+
79
+ i∈I
80
+ |ci|2
81
+ for all sequences of scalars {ci}i∈I. If it is complete in H, we call X a Riesz basis.
82
+ For an introduction to frame theory we recommend [12, 13].
83
+ Throughout the paper the orthogonal projection or simply projection will be a self-
84
+ adjoint positive projection and {ei}∞
85
+ i=1 will be used to denote the canonical basis
86
+ for the real space Rn, i.e., a basis for which
87
+ ⟨ei, ej⟩ = δi,j =
88
+
89
+ 1
90
+ if i = j,
91
+ 0
92
+ if i ̸= j.
93
+ Definition 3. A family of vectors {xi}i∈I in a real Hilbert space H does phase
94
+ (norm) retrieval if whenever x, y ∈ H, satisfy
95
+ |⟨x, xi⟩| = |⟨y, xi⟩|
96
+ for all i ∈ I,
97
+ then x = ±y
98
+ (∥x∥ = ∥y∥).
99
+ Phase retrieval was introduced in reference [4]. See reference [1] for an introduc-
100
+ tion to norm retrieval.
101
+ Note that if {xi}i∈I does phase (norm) retrieval, then so does {aixi}i∈I for any
102
+ 0 < ai < ∞ for all i ∈ I. But in the case where |I| = ∞, we have to be careful to
103
+ maintain frame bounds. This always works if 0 < infi∈I ai ≤ supi∈Iai < ∞. But
104
+ this is not necessary in general [1]. The complement property is an essential issue
105
+ here.
106
+ Definition 4. A family of vectors {xi}i∈I in a finite or infinite dimensional real
107
+ Hilbert space H has the complement property if for any subset J ⊂ I,
108
+ either span{xi}i∈J = H
109
+ or
110
+ span{xi}i∈Jc = H.
111
+ Fundamental to this area is the following for which the finite dimensional case
112
+ appeared in [10].
113
+
114
+ WEAK PHASE RETRIEVAL
115
+ 3
116
+ Theorem 1. A family of vectors {xi}i∈I does phase retrieval in Rn if and only if it
117
+ has the complement property.
118
+ We recall:
119
+ Definition 5. A family of vectors {xi}m
120
+ i=1 in Rn is full spark if for every I ⊂
121
+ [m] with |I| = n , {xi}i∈I is linearly independent.
122
+ Corollary 1. If {xi}m
123
+ i=1 does phase retrieval in Rn, then m ≥ 2n− 1. If m = 2n− 1,
124
+ {xi}m
125
+ i=1 does phase retrieval if and only if it is full spark.
126
+ We rely heavily on a significant result from [2]:
127
+ Theorem 2. If {xi}2n−2
128
+ i=1
129
+ does weak phase retrieval in Rn then for every I ⊂ [2n−2],
130
+ if x ⊥ span{xi}i∈I and y ⊥ {xi}i∈Ic then
131
+ x
132
+ ∥x∥ +
133
+ y
134
+ ∥y∥ and
135
+ x
136
+ ∥x∥ −
137
+ y
138
+ ∥y∥ are disjointly
139
+ supported. In particular, if ∥x∥ = ∥y∥ = 1, then x + y and x − y are disjointly
140
+ supported. Hence, if x = (a1, a2, . . . , an) then y = (ǫ1a1, ǫ2a2, . . . , ǫnan), where
141
+ ǫi = ±1 for i = 1, 2, . . ., n.
142
+ Remark 2.1. The above theorem may fail if ∥x∥ ̸= ∥y∥. For example, consider the
143
+ weak phase retrievable frame in R3:
144
+
145
+ 
146
+ 1
147
+ 1
148
+ 1
149
+ −1
150
+ 1
151
+ 1
152
+ 1
153
+ −1
154
+ 1
155
+ 1
156
+ 1
157
+ −1
158
+
159
+ 
160
+ Also, x = (0, 1, −1) is perpendicular to rows 1 and 2 and y = (0, 1
161
+ 2, 1
162
+ 2) is orthogonal
163
+ to rows 2 and 3. But x + y = (0, 3
164
+ 2, 1
165
+ 2) and x − y = (0, −1
166
+ 2 , −3
167
+ 2 ) and these are not
168
+ disjointly supported. But if we let them have the same norm we get x = (0, 1, −1)
169
+ and y = (0, 1, 1) so x + y = (0, 1, 0) and x − y = (0, 0, 1) and these are disjointly
170
+ supported.
171
+ 3. Weak phase retrieval
172
+ The notion of “Weak phase retrieval by vectors” in Rn was introduced in [8] and
173
+ was developed further in [7]. One limitation of current methods used for retrieving
174
+ the phase of a signal is computing power. Recall that a generic family of (2n − 1)-
175
+ vectors in Rn satisfies phaseless reconstruction, however no set of (2n − 2)-vectors
176
+ can (See [7] for details). By generic we are referring to an open dense set in the set
177
+ of (2n − 1)-element frames in Rn.
178
+ Definition 6. Two vectors x = (a1, a2, . . . , an) and y = (b1, b2, . . . , bn) in Rn weakly
179
+ have the same phase if there is a |θ| = 1 so that phase(ai) = θphase(bi) for all
180
+ i ∈ [n], for which ai ̸= 0 ̸= bi.
181
+ If θ = 1, we say x and y weakly have the same signs and if θ = −1, they weakly
182
+ have the opposite signs.
183
+ Therefore with above definition the zero vector in Rn weakly has the same phase
184
+ with all vectors in Rn. For x ∈ R, sgn(x) = 1 if x > 0 and sgn(x) = −1 if x < 0.
185
+ Definition 7. A family of vectors {xi}m
186
+ i=1 does weak phase retrieval in Rn if for
187
+ any x = (a1, a2, . . . , an) and y = (b1, b2, . . . , bn) in Rn with |⟨x, xi⟩| = |⟨y, xi⟩| for
188
+ all i ∈ [m], then x and y weakly have the same phase.
189
+ A fundamental result here is
190
+
191
+ 4
192
+ P. G. CASAZZA AND F. AKRAMI
193
+ Proposition 1. [8] Let x = (a1, a2, . . . , an) and y = (b1, b2, . . . , bn) in Rn.
194
+ The
195
+ following are equivalent:
196
+ (1) We have sgn(aiaj) = sgn(bibj), for all 1 ≤ i ̸= j ≤ n
197
+ (2) Either x, y have weakly the same sign or they have the opposite signs.
198
+ It is clear that if {xi}m
199
+ i=1 does weak phase retrieval in Rn, then {cixi}m
200
+ i=1 does
201
+ weak phase retrieval as long as ci > 0 for all i = 1, 2, . . ., m.
202
+ The following appears in [7].
203
+ Theorem 3. If X = {xi}m
204
+ i=1 does weak phase retrieval in Rn, then m ≥ 2n − 2.
205
+ Finally, we have:
206
+ Theorem 4. [7] If a frame X = {xi}2n−2
207
+ i=1
208
+ does weak phase retrieval in Rn, then X
209
+ is a full spark frame.
210
+ Clearly the converse of above theorem is not hold, for example {(1, 0), (0, 1)} is
211
+ full spark frame that fails weak phase retrieval in R2.
212
+ If {xi}i∈I does phase retrieval and R is an invertible operator on the space
213
+ then {Rxi}i∈I does phase retrieval. This follows easily since |⟨x, Rxi⟩| = |⟨y, Rxi⟩|
214
+ implies |⟨R∗x, xi⟩| = |⟨R∗y, xi⟩|, and so R∗x = θR∗y for |θ| = 1.
215
+ Since R is
216
+ invertible, x = θy. This result fails badly for weak phase retrieval. For example,
217
+ let e1 = (1, 0), e2 = (0, 1), x1 = ( 1
218
+
219
+ 2,
220
+ 1
221
+
222
+ 2, x2 = ( 1
223
+
224
+ 2, −1
225
+
226
+ 2) in R2. Then {e1, e2} fails
227
+ weak phase retrieval, {x1, x2} does weak phase retrieval and Uei = xi is a unitary
228
+ operator.
229
+ 4. Frames Containing Unit Vectors
230
+ Theorem 5. Any frame {xi}2n−2
231
+ i=1
232
+ whith one or more canonical basis vectors in Rn
233
+ cannot do weak phase retrieval.
234
+ Proof. We proceed by way of contradiction. Recall that {xi}2n−2
235
+ i=1
236
+ must be full spark.
237
+ Let {ei}n
238
+ i=1 be the canonical orthonormal basis of Rn. Assume I ⊂ {1, 2, . . ., 2n−2}
239
+ with |I| = n − 1 and assume x = (a1, a2, . . . , an), y = (b1, b2, . . . , bn) with ∥x∥ =
240
+ ∥y∥ = 1 and x ⊥ X = span{xi}i∈I and y ⊥ span{xi}2n−2
241
+ i=n . After reindexing {ei}n
242
+ i=1
243
+ and {xi}2n−2
244
+ i=1 }, we assume x1 = e1, I = {1, 2, . . ., n−1 and Ic = {n, n+1, . . . , 2n−
245
+ 2}. Since ⟨x, x1⟩ = a1 = 0, by Theorem 2, b1 = 0. Let P be the projection on
246
+ span{ei}n
247
+ i=2. So {Pxi}2n−2
248
+ i=n
249
+ is (n − 1)-vectors in an (n − 1)-dimensional space and
250
+ y is orthogonal to all these vectors. So there exist {ci}2n−2
251
+ i=n
252
+ not all zero so that
253
+ 2n−2
254
+
255
+ i=n
256
+ ciPxi = 0 and so
257
+ 2n−1
258
+
259
+ i=n
260
+ cixi(1)x1 −
261
+ 2n−2
262
+
263
+ i=n
264
+ cixi = 0.
265
+ That is, our vectors are not full spark, a contradiction.
266
+
267
+ Remark 4.1. The fact that there are (2n− 2) vectors in the theorem is critical. For
268
+ example, e1, e2, e1 + e2 is full spark in R2, so it does phase retrieval - and hence
269
+ weak phase retrieval - despite the fact that it contains both basis vectors.
270
+ The converse of Theorem 5 is not true in general.
271
+ Example 1. Consider the full spark frame X = {(1, 2, 3), (0, 1, 0), (0, −2, 3), (1, −2, −3)}
272
+ in R3. Every set of its two same coordinates,
273
+ {(1, 2), (0, 1), (0, −2), (1, −2)}, {(1, 3), (0, 0), (0, 3), (1, −3)}, and
274
+
275
+ WEAK PHASE RETRIEVAL
276
+ 5
277
+ {(2, 3), (1, 0), (−2, 3), (−2, −3)}
278
+ do weak phase retrieval in R2, but by Theorem 5, X cannot do weak phase retrieval
279
+ in R3.
280
+ 5. Weak Phase Retrievable Frames are not Dense in all Frames
281
+ If m ≥ 2n − 1 and {xi}m
282
+ i=1 is full spark then it has complement property and
283
+ hence does phase retrieval. Since the full spark frames are dense in all frames, it
284
+ follows that the frames doing phase retrieval are dense in all frames with ≥ 2n − 1
285
+ vectors. We will now show that this result fails for weak phase retrievable frames.
286
+ The easiest way to get very general frames failing weak phase retrieval is:
287
+ Choose x, y ∈ Rn so that x + y, x − y do not have the same or opposite signs.
288
+ Let X1 = x⊥ and Y1 = y⊥. Then span{X1, X2} = Rn. Choose {xi}n−1
289
+ i=1 vectors
290
+ spanning X1 and {xi}2n−2
291
+ i=n
292
+ be vectors spanning X2. Then {xi}2n−2
293
+ i=1
294
+ is a frame for
295
+ Rn with x ⊥ xi, for i = 1, 2, . . ., n − 1 and y ⊥ xi, for all i = n, n + 1, , . . . , 2n − 2.
296
+ It follows that
297
+ |⟨x + y, xi⟩| = |⟨x − y, xi⟩|, for all i = 1, 2, . . . , n,
298
+ but x, y do not have the same or opposite signs and so {xi}2n−2
299
+ i=1
300
+ fails weak phase
301
+ retrieval.
302
+ Definition 8. If X is a subspace of Rn, we define the sphere of X as
303
+ SX = {x ∈ X : ∥x∥ = 1}.
304
+ Definition 9. If X, Y are subspaces of Rn, we define the distance between X
305
+ and Y as
306
+ d(X, Y ) = supx∈SXinfy∈SY ∥x − y∥.
307
+ It follows that if d(X, Y ) < ǫ then for any x ∈ X there is a z ∈ SY so that
308
+ ∥ x
309
+ ∥x∥ − z∥ < ǫ. Letting y = ∥x∥z we have that ∥y∥ = ∥x∥ and ∥x − y∥ < ǫ∥x∥.
310
+ Proposition 2. Let X, Y be hyperplanes in Rn and unit vectors x ⊥ X, y ⊥ Y . If
311
+ d(X, Y ) < ǫ then min{∥x − y∥, ∥x + y∥} < 6ǫ.
312
+ Proof. Since span{y, Y } = Rn, x = ay + z for some z ∈ Y . By replacing y by −y
313
+ if necessary, we may assume 0 < a. By assumption, there is some w ∈ X with
314
+ ∥w∥ = ∥z∥ so that ∥w − z∥ < ǫ. Now
315
+ a = a∥y∥ = ∥ay∥ = ∥x − z∥ ≥ ∥x − w∥ − ∥w − z∥ ≥ ∥x∥ − ǫ = 1 − ǫ.
316
+ So, 1 − a < ǫ. Also, 1 = ∥x∥2 = a2 + ∥w∥2 implies a < 1. I.e. 0 < 1 − a < ǫ.
317
+ 1 = ∥x∥2 = ∥ay + z∥2 = a2∥y∥2 + ∥z∥2 = a2 + ∥z∥2 ≥ (1 − ǫ)2 + ∥z∥2.
318
+ So
319
+ ∥z∥2 ≤ 1 − (1 − ǫ)2 = 2ǫ − ǫ2 ≤ 2ǫ.
320
+ Finally,
321
+ ∥x − y∥2 = ∥(ay + z) − y∥2
322
+ ≤ (∥(1 − a)y∥ + ∥z∥)2
323
+ ≤ (1 − a)2∥y∥2 + ∥z∥2 + 2(1 − a)∥y∥∥z∥
324
+ < ǫ2 + 2ǫ + 2
325
+
326
+ 2ǫ2
327
+ < 6ǫ.
328
+
329
+ 6
330
+ P. G. CASAZZA AND F. AKRAMI
331
+
332
+ Lemma 1. Let X, Y be hyperplanes in Rn, {xi}n−1
333
+ i=1 be a unit norm basis for X and
334
+ {yi}n−1
335
+ i=1 be a unit norm basis for Y with basis bounds B. If �n−1
336
+ i=1 ∥xi − yi∥ < ǫ
337
+ then d(X, Y ) < 2ǫB.
338
+ Proof. Let 0 < A ≤ B < ∞ be upper and lower basis bounds for the two bases.
339
+ Given a unit vector x = �n−1
340
+ i=1 aixi ∈ X, let y = �n−1
341
+ i=1 aiyi ∈ Y . We have that
342
+ sup1≤i≤n−1|ai| ≤ B. We compute:
343
+ ∥x − y∥ = ∥
344
+ n−1
345
+
346
+ i=1
347
+ ai(xi − yi)∥
348
+
349
+ n−1
350
+
351
+ i=1
352
+ |ai|∥xi − yi∥
353
+ ≤ (sup1≤i≤n−1|ai|)
354
+ n−1
355
+
356
+ i=1
357
+ ∥xi − yi∥ ≤ Bǫ.
358
+ So
359
+ ∥y∥ ≥ ∥x∥ − ∥x − y∥ ≥ 1 − Bǫ.
360
+ ����x −
361
+ y
362
+ ∥y∥
363
+ ���� ≤ ∥x − y∥ +
364
+ ����y −
365
+ y
366
+ ∥y∥
367
+ ����
368
+ ≤ Bǫ +
369
+ 1
370
+ ∥y∥∥(1 − ∥y∥)y∥
371
+ = Bǫ + (1 − ∥y∥)
372
+ ≤ 2Bǫ.
373
+ It follows that d(X, Y ) < 2Bǫ.
374
+
375
+ Lemma 2. Let {xi}n
376
+ i=1 be a basis for Rn with unconditional basis constant B and
377
+ assume yi ∈ Rn satisfies �n
378
+ i=1 ∥xi − yi∥ < ǫ. Then {yi}n
379
+ i=1 is a basis for Rn which
380
+ is 1 + ǫB-equivalent to {xi}n
381
+ i=1 and has unconditional basis constant B(1 + ǫB)2.
382
+ Proof. Fix {ai}n
383
+ i=1 and compute
384
+
385
+ n
386
+
387
+ i=1
388
+ aiyi∥ ≤ ∥
389
+ n
390
+
391
+ i=1
392
+ aixi∥ + ∥
393
+ n
394
+
395
+ i=1
396
+ |ai|(xi − yi)∥
397
+ ≤ ∥
398
+ n
399
+
400
+ i=1
401
+ aixi∥ + (sup1≤i≤n|ai|)
402
+ n
403
+
404
+ i=1
405
+ ∥xi − yi∥
406
+ ≤ ∥
407
+ n
408
+
409
+ i=1
410
+ aixi∥ + (sup1≤i���n|ai|)ǫ
411
+ ≤ ∥
412
+ n
413
+
414
+ i=1
415
+ aixi∥ + ǫB∥
416
+ n
417
+
418
+ i=1
419
+ aixi∥
420
+ = (1 + ǫB)∥
421
+ n
422
+
423
+ i=1
424
+ aixi∥.
425
+
426
+ WEAK PHASE RETRIEVAL
427
+ 7
428
+ Similarly,
429
+
430
+ n
431
+
432
+ i=1
433
+ |ai|yi∥ ≥ (1 − ǫB)∥
434
+ n
435
+
436
+ i=1
437
+ aixi∥.
438
+ So {xi}n
439
+ i=1 is (1 + ǫB)-equivalent to {yi}n
440
+ i=1.
441
+ For ǫi = ±1,
442
+
443
+ n
444
+
445
+ i=1
446
+ ǫiaiyi∥ ≤ (1 + ǫB)∥
447
+ n
448
+
449
+ i=1
450
+ ǫiaixi∥
451
+ ≤ B(1 + ǫB)∥
452
+ n
453
+
454
+ i=1
455
+ aixi∥
456
+ ≤ B(1 + ǫB)2∥
457
+ n
458
+
459
+ i=1
460
+ aiyi∥.
461
+ and so {yi}n
462
+ i=1 is a B(1 + ǫB) unconditional basis.
463
+
464
+ Theorem 6. The family of m-element weak phase retrieval frames are not dense in
465
+ the set of m-element frames in Rn for all m ≥ 2n − 2.
466
+ Proof. We may assume m = 2n−2 since for larger m we just repeat the (2n-2) vec-
467
+ tors over and over until we get m vectors. Let {ei}n
468
+ i=1 be the canonical orthonormal
469
+ basis for Rn and let xi = ei for i = 1, 2, . . . , n. By [10], there is an orthonormal
470
+ sequence {xi}2n−2
471
+ i=n+1 so that {xi}2n−2
472
+ i=1
473
+ is full spark. Let I = {1, 2, . . ., n − 1}. Let
474
+ X = span{xi}n−1
475
+ i=1 and Y = span{xi}2n−2
476
+ i=n .
477
+ Then x = en ⊥ X and there is a
478
+ ∥y∥ = 1 with y ⊥ Y .
479
+ Note that ⟨x − y, en⟩ ̸= 0 ̸= ⟨x + y, en⟩, for otherwise,
480
+ x = ±y ⊥ span{xi}i̸=n, contradicting the fact that the vectors are full spark. So
481
+ there is a j = n and a δ > 0 so that |(x + y)(j)|, |(x − y)(j)| ≥ δ. We will show
482
+ that there exists an 0 < ǫ so that whenever {yi}2n−2
483
+ i=1
484
+ are vectors in Rn satisfying
485
+ �n
486
+ i=1 ∥xi − yi∥ < ǫ, then {yi}n
487
+ i=1 fails weak phase retrieval.
488
+ Fix 0 < ǫ. Assume {yi}2n−2
489
+ i=1
490
+ are vectors so that �2n−2
491
+ i=1
492
+ ∥xi−yi∥ < ǫ. Choose unit
493
+ vectors x′ ⊥ span{yi}i∈I, y′ ⊥ span{yi}i∈Ic. By Proposition 2 and Lemma 1, we
494
+ may choose ǫ small enough (and change signs if necessary) so that ∥x−x′∥, ∥y−y′∥ <
495
+ δ
496
+ 4B . Hence, since the unconditional basis constant is B,
497
+ |[(x + y) − (x′ + y′)](j)|
498
+ ≤ |(x − x′)j| + |(y − y′)(j)|
499
+ < B∥x − x′∥ + B∥y − y′∥
500
+ ≤ 2B δ
501
+ 4B = δ
502
+ 2.
503
+ It follows that
504
+ |(x′ + y′)(j)| ≥ |(x + y)(j)| − |[(x + y) − (x′ + y′)](j)| ≥ δ − 1
505
+ 2δ = δ
506
+ 2.
507
+ Similarly, |(x′ − y′)(j)| > δ
508
+ 2. So x′ + y′, x′ − y′ are not disjointly supported and so
509
+ {yi}2n−2
510
+ i=1
511
+ fails weak phase retrieval by Theorem 2.
512
+
513
+ 6. Classifying Weak Phase Retrieval
514
+ In this section we will give several surprising equivalences and consequences of
515
+ weak phase retrieval. These results give a complete understanding of the difference
516
+ between weak phase retrieval and phase retrieval.
517
+ Now we give a surprising and very strong classification of weak phase retrieval.
518
+
519
+ 8
520
+ P. G. CASAZZA AND F. AKRAMI
521
+ Theorem 7. Let {xi}2n−2
522
+ i=1
523
+ be non-zero vectors in Rn. The following are equivalent:
524
+ (1) The family {xi}2n−2
525
+ i=1
526
+ does weak phase retrieval in Rn.
527
+ (2) If x, y ∈ Rn and
528
+ (6.1)
529
+ |⟨x, xi⟩| = |⟨y, xi⟩| for all i = 1, 2, . . . , 2n − 2,
530
+ then one of the following holds:
531
+ (a) x = ±y.
532
+ (b) x and y are disjointly supported.
533
+ Proof. (1) ⇒ (2): Given the assumption in the theorem, assume (a) fails and we will
534
+ show that (b) holds. Let x = (a1, a2, . . . , an), y = (b1, b2, . . . , bn). Since {xi}2n−2
535
+ i=1
536
+ does weak phase retrieval, replacing y by −y if necessary, Equation 6.1 implies
537
+ aj = bj whenever aj ̸= 0 ̸= bj.
538
+ Let
539
+ I = {1 ≤ i ≤ 2n − 2 : ⟨x, xi⟩ = ⟨y, yi⟩.
540
+ Then
541
+ x + y ⊥ xi for all i ∈ Ic and x − y ⊥ xi for all i ∈ I.
542
+ By Theorem 2,
543
+ x + y
544
+ ∥x + y +
545
+ x − y
546
+ ∥x − y∥ and
547
+ x + y
548
+ ∥x + y∥ −
549
+ x − y
550
+ ∥x − y∥ are disjointly supported.
551
+ Assume there is a 1 ≤ j ≤ n with aj = bj ̸= 0. Then
552
+ (x + y)(j)
553
+ ∥x + y∥
554
+ + (x − y)(j)
555
+ ∥x − y∥
556
+ =
557
+ 2aj
558
+ ∥x + y∥ and (x + y)(j)
559
+ ∥x + y∥
560
+ − (x − y)(j)
561
+ ∥x − y∥
562
+ =
563
+ 2aj
564
+ ∥x + y∥,
565
+ Contradicting Theorem 2.
566
+ (2) ⇒ (1): This is immediate since (a) and (b) give the conditions for weak phase
567
+ retrieval.
568
+
569
+ Phase retrieval is when (a) in the theorem holds for every x, y ∈ Rn. So this the-
570
+ orem shows clearly the difference between weak phase retrieval and phase retrieval:
571
+ namely when (b) holds at least once.
572
+ Corollary 2. If {xi}2n−2
573
+ i=1
574
+ does weak phase retrieval in Rn, then there are disjointly
575
+ supported non-zero vectors x, y ∈ Rn satisfying:
576
+ |⟨x, xi⟩| = |⟨y, xi⟩| for all i = 1, 2, . . . , 2n − 2.
577
+ Proof. Since {xi}2n−2
578
+ i=1
579
+ must fail phase retrieval, (b) of Theorem 7 must hold at least
580
+ once.
581
+
582
+ Definition 10. Let {ei}n
583
+ i=1 be the canonical orthonormal basis of Rn. If J ⊂ [n],
584
+ we define PJ as the projection onto span{ei}i∈J.
585
+ Theorem 8. Let {xi}m
586
+ i=1 be unit vectors in Rn. The following are equivalent:
587
+ (1) Whenever I ⊂ [2n − 2] and 0 ̸= x ⊥ xi for i ∈ I and 0 ̸= y ⊥ xi for i ∈ Ic,
588
+ there is no j ∈ [n] so that ⟨x, ej⟩ = 0 = ⟨y, ej⟩.
589
+ (2) For every J ⊂ [n] with |J| = n − 1, {Pjxi}2n−2
590
+ i=1
591
+ does phase retrieval.
592
+ (3) For every J ⊂ [n] with |J| < n, {PJxi}2n−2
593
+ i=1
594
+ does phase retrieval.
595
+
596
+ WEAK PHASE RETRIEVAL
597
+ 9
598
+ Proof. (1) ⇒ (2): We prove the contrapositive. So assume (2) fails. Then choose
599
+ J ⊂ [n] with |J| = n − 1, J = [n] \ {j}, and {PJxi}2n−2
600
+ i=1
601
+ fails phase retrieval. In
602
+ particular, it fails complement property. That is, there exists I ⊂ [2n− 2] and span
603
+ {PJxi}i∈I ̸= PJRn and span {Pjxi}i∈Ic ̸= PJRn. So there exists norm one vectors
604
+ x, y in PJRn with PJx = x ⊥ PJxi for all i ∈ I and PJy = y ⊥ PJxi for all i ∈ Ic.
605
+ Extend x, y to all of Rn by setting x(j) = y(j) = 0. Hence, x ⊥ xi for i ∈ I and
606
+ y ⊥ xi for i ∈ Ic, proving (1) fails.
607
+ (2) ⇒ (3): This follows from the fact that every projection of a set of vectors
608
+ doing phase retrieval onto a subset of the basis also does phase retrieval.
609
+ (3) ⇒ (2): This is obvious.
610
+ (3) ⇒ (1): We prove the contrapositive. So assume (1) fails. Then there is a
611
+ I ⊂ [2n− 2] and 0 ̸= x ⊥ xi for i ∈ I and 0 ̸= y ⊥ xi for i ∈ Ic and a j ∈ [n] so that
612
+ ⟨x, ej⟩ = ⟨y, ej⟩ = 0. It follows that x = PJx, y = PJy are non zero and x ⊥ Pjxi
613
+ for all i ∈ I and y ⊥ Pjxi for i ∈ Ic, so {PJxi}2n−2
614
+ i=1
615
+ fails phase retrieval.
616
+
617
+ Remark 6.1. The assumptions in the theorem are necessary. That is, in general,
618
+ {xi}m
619
+ i=1 can do weak phase retrieval and {PJxi}m
620
+ i=1 may fail phase retrieval. For
621
+ example, in R3 consider the row vectors {xi}4
622
+ i=1 of:
623
+
624
+ 
625
+ 1
626
+ 1
627
+ 1
628
+ −1
629
+ 1
630
+ 1
631
+ 1
632
+ −1
633
+ 1
634
+ 1
635
+ 1
636
+ −1
637
+
638
+ 
639
+ This set does weak phase retrieval, but if J = {2, 3} then x = (0, 1, −1) ⊥ PJxi for
640
+ i = 1, 2 and y = (0, 1, 1) ⊥ xi for i = 3, 4 and {PJxi}4
641
+ i=1 fails phase retrieval.
642
+ Corollary 3. Assume {xi}2n−2
643
+ i=1
644
+ does weak phase retrieval in Rn and for every J ⊂ [n]
645
+ {PJxi}2n−2
646
+ i=1
647
+ does phase retrieval. Then if x, y ∈ Rn and
648
+ |⟨x, xi⟩| = |⟨y, xi⟩| for all i = 1, 2, . . . , 2n − 2,
649
+ then there is a J ⊂ [n] so that
650
+ x(j) =
651
+
652
+ aj ̸= 0 for j ∈ J
653
+ 0 for j ∈ Jc
654
+ y(j) =
655
+
656
+ 0 for j ∈ J
657
+ bj ̸= 0 for j ∈ Jc
658
+ Proposition 3. Let {ei}n
659
+ i=1 be the unit vector basis of Rn and for I ⊂ [n], let PI be
660
+ the projection onto XI = span{ei}i∈I. For every m ≥ 1, there are vectors {xi}m
661
+ i=1
662
+ so that for every I ⊂ [1, n], {PIxi}m
663
+ i=1 is full spark in XI.
664
+ Proof. We do this by induction on m. For m=1, let x1 = (1, 1, 1, . . ., 1). This
665
+ satisfies the theorem. So assume the theorem holds for {xi}m
666
+ i=1. Choose I ⊂ [1, n]
667
+ with |I| = k. Choose J ⊂ I with |J| = k − 1 and let XJ = span{xi}i∈J ∪ {xi}i∈Ic.
668
+ Then XJ is a hyperplane in Rn for every J. Since there only exist finitely many
669
+ such J′s there is a vector xm+1 /∈ XJ for every J. We will show that {xi}m+1
670
+ i=1
671
+ satisfies the theorem.
672
+ Let I ⊂ [1, n] and J ⊂ I with |J| = |I|. If PIxm+1 /∈ XJ, then {PIxi}i∈J is
673
+ linearly independent by the induction hypothesis. On the other hand, if m + 1 ∈ J
674
+ then xm+1 /∈ XJ. But, if PIxm+1 ∈ span{PIxi}i∈J\m+1, since (I − PI)xm+1 ∈
675
+ span{ei}i∈Ic, it follows that xm+1 ∈ XJ, which is a contradiction.
676
+
677
+
678
+ 10
679
+ P. G. CASAZZA AND F. AKRAMI
680
+ Remark 6.2. In the above proposition, none of the xi can have a zero coordinate.
681
+ Since if it does, projecting the vectors onto that coordinate produces a zero vector
682
+ and so is not full spark.
683
+ References
684
+ [1] F. Akrami, P. G. Casazza, M. A. Hasankhani Fard, A. Rahimi, A note on norm retrievable
685
+ real Hilbert space frames, J. Math. Anal. Appl. 2021. (517)2, (2023) 126620.
686
+ [2] P. G. Casazza, F. Akrami, A. Rahimi, fundamental results on weak phase retrieval, Ann.
687
+ Funct. Anal, arXiv: 2110.06868, 2021.
688
+ [3] S. Bahmanpour, J. Cahill, P.G. Casazza, J. Jasper, and L. M. Woodland, Phase retrieval and
689
+ norm retrieval, arXiv:1409.8266, (2014).
690
+ [4] R. Balan, P. G. Casazza, D. Edidin, On signal reconstruction without phase, Appl. Comput.
691
+ Harmonic Anal. 20, 3, (2006), 345-356.
692
+ [5] S. Botelho-Andrade, Peter G. Casazza, D. Cheng, J. Haas, and Tin T. Tran, Phase retrieval
693
+ in ℓ2(R), arXiv:1804.01139v1, (2018).
694
+ [6] S. Botelho-Andrade, Peter G. Casazza, D. Cheng, J. Haas, and Tin T. Tran, J. C. Tremain,
695
+ and Z. Xu, Phase retrieval by hyperplanes, Am. Math. Soc, comtemp. math. 706, (2018),
696
+ 21-31.
697
+ [7] S. Botelho-Andrade, P. G. Casazza, D. Ghoreishi, S. Jose, J. C. Tremain, Weak phase retrieval
698
+ and phaseless reconstruction, arXiv:1612.08018, (2016).
699
+ [8] S. Botelho-Andrade, P. G. Casazza, H. V. Nguyen, And J. C. Tremain, Phase retrieval versus
700
+ phaseless reconstruction, J. Math. Anal. Appl, 436, 1, (2016), 131-137.
701
+ [9] J. Cahill, P.G. Casazza, and I. Daubechies, Phase retrieval in infinite dimensional Hilbert
702
+ spaces, Transactions of the AMS, Series B, 3, (2016), 63-76.
703
+ [10] J. Cahill, P.G. Casazza, J. Peterson and L. Woodland, Phase retrivial by projections, Houston
704
+ Journal of Mathematics 42. 2, (2016), 537-558.
705
+ [11] P. G. Casazza, D. Ghoreishi, S. Jose, J. C. Tremain, Norm retrieval and phase Retrieval by
706
+ projections, Axioms, 6, (2017), 1-15.
707
+ [12] P. G. Casazza and G. Kutyniok, Finite Frames, Theory and applications, Birkhauser, (2013).
708
+ [13] O. Christensen, An introduction to frames and Riesz bases, Birkhauser, Boston (2003).
709
+ [14] R. J. Duffin, A. C. Schaeffer. A class of nonharmonic Fourier series, Trans. Am. Math. Soc,
710
+ 72, (1952), 341-366.
711
+ Department of Mathematics, University of Missouri, Columbia, USA.
712
+ Email address: [email protected]
713
+ Department of Mathematics, University of Maragheh, Maragheh, Iran.
714
+ Email address: [email protected]
715
+
4tAzT4oBgHgl3EQffvxD/content/tmp_files/2301.01456v1.pdf.txt ADDED
@@ -0,0 +1,1639 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Audio-Visual Efficient Conformer for Robust Speech Recognition
2
+ Maxime Burchi, Radu Timofte
3
+ Computer Vision Lab, CAIDAS, IFI, University of W¨urzburg, Germany
4
+ {maxime.burchi,radu.timofte}@uni-wuerzburg.de
5
+ Abstract
6
+ End-to-end Automatic Speech Recognition (ASR) sys-
7
+ tems based on neural networks have seen large improve-
8
+ ments in recent years. The availability of large scale hand-
9
+ labeled datasets and sufficient computing resources made it
10
+ possible to train powerful deep neural networks, reaching
11
+ very low Word Error Rate (WER) on academic benchmarks.
12
+ However, despite impressive performance on clean audio
13
+ samples, a drop of performance is often observed on noisy
14
+ speech. In this work, we propose to improve the noise ro-
15
+ bustness of the recently proposed Efficient Conformer Con-
16
+ nectionist Temporal Classification (CTC)-based architec-
17
+ ture by processing both audio and visual modalities. We im-
18
+ prove previous lip reading methods using an Efficient Con-
19
+ former back-end on top of a ResNet-18 visual front-end and
20
+ by adding intermediate CTC losses between blocks. We con-
21
+ dition intermediate block features on early predictions us-
22
+ ing Inter CTC residual modules to relax the conditional in-
23
+ dependence assumption of CTC-based models. We also re-
24
+ place the Efficient Conformer grouped attention by a more
25
+ efficient and simpler attention mechanism that we call patch
26
+ attention. We experiment with publicly available Lip Read-
27
+ ing Sentences 2 (LRS2) and Lip Reading Sentences 3 (LRS3)
28
+ datasets. Our experiments show that using audio and visual
29
+ modalities allows to better recognize speech in the presence
30
+ of environmental noise and significantly accelerate training,
31
+ reaching lower WER with 4 times less training steps. Our
32
+ Audio-Visual Efficient Conformer (AVEC) model achieves
33
+ state-of-the-art performance, reaching WER of 2.3% and
34
+ 1.8% on LRS2 and LRS3 test sets. Code and pretrained
35
+ models are available at https://github.com/burchim/AVEC.
36
+ 1. Introduction
37
+ End-to-end Automatic Speech Recognition based on
38
+ deep neural networks has become the standard of state-of-
39
+ the-art approaches in recent years [25, 47, 18, 16, 17, 31, 7].
40
+ The availability of large scale hand-labeled datasets and suf-
41
+ ficient computing resources made it possible to train power-
42
+ 40 ms rate
43
+ Visual Conformer
44
+ Stage 2
45
+ 20 ms rate
46
+ Visual Conformer
47
+ Stage 1
48
+ Visual Front-end
49
+ Conv3d + ResNet-18
50
+ Audio Front-end
51
+ STFT + Conv2d
52
+ Audio Conformer
53
+ Stage 1
54
+ Audio Conformer
55
+ Stage 2
56
+ Audio Conformer
57
+ Stage 3
58
+ Audio-Visual
59
+ Fusion Module
60
+ Audio-Visual
61
+ Conformer Stage
62
+ Visual
63
+ Back-end
64
+ Audio
65
+ Back-end
66
+ 80 ms rate
67
+ CTC loss
68
+ 40 ms rate
69
+ 80 ms rate
70
+ 80 ms rate
71
+ Figure 1: Audio-Visual Efficient Conformer architec-
72
+ ture. The model is trained end-to-end using CTC loss and
73
+ takes raw audio waveforms and lip movements from the
74
+ speaker as inputs.
75
+ ful deep neural networks for ASR, reaching very low WER
76
+ on academic benchmarks like LibriSpeech [34]. Neural ar-
77
+ chitectures like Recurrent Neural Networks (RNN) [15, 19],
78
+ Convolution Neural Networks (CNN) [10, 28] and Trans-
79
+ formers [12, 23] have successfully been trained from raw
80
+ audio waveforms and mel-spectrograms audio features to
81
+ transcribe speech to text.
82
+ Recently, Gulati et al. [16]
83
+ proposed a convolution-augmented transformer architec-
84
+ ture (Conformer) to model both local and global dependen-
85
+ cies using convolution and attention to reach better speech
86
+ recognition performance. Concurrently, Nozaki et al. [33]
87
+ arXiv:2301.01456v1 [cs.CV] 4 Jan 2023
88
+
89
+ ++improved CTC-based speech recognition by conditioning
90
+ intermediate encoder block features on early predictions us-
91
+ ing intermediate CTC losses [14]. Burchi et al. [7] also pro-
92
+ posed an Efficient Conformer architecture using grouped
93
+ attention for speech recognition, lowering the amount of
94
+ computation while achieving better performance. Inspired
95
+ from computer vision backbones, the Efficient Conformer
96
+ encoder is composed of multiple stages where each stage
97
+ comprises a number of Conformer blocks to progressively
98
+ downsample and project the audio sequence to wider fea-
99
+ ture dimensions.
100
+ Yet, even if these audio-only approaches are breaking
101
+ the state-of-the-art, one major pitfall for using them in the
102
+ real-world is the rapid deterioration of performance in the
103
+ presence of ambient noise. In parallel to that, Audio Visual
104
+ Speech Recognition (AVSR) has recently attracted a lot of
105
+ research attention due to its ability to use image process-
106
+ ing techniques to aid speech recognition systems. Preced-
107
+ ing works have shown that including the visual modality of
108
+ lip movements could improve the robustness of ASR sys-
109
+ tems with respect to noise while reaching better recognition
110
+ performance [41, 42, 36, 1, 45, 29]. Xu et al. [45] pro-
111
+ posed a two-stage approach to first separate the target voice
112
+ from background noise using the speakers lip movements
113
+ and then transcribe the filtered audio signal with the help of
114
+ lip movements. Petridis et al. [36] uses a hybrid architec-
115
+ ture, training an LSTM-based sequence-to-sequence (S2S)
116
+ model with an auxiliary CTC loss using an early fusion
117
+ strategy to reach better performance. Ma et al. [29] uses
118
+ Conformer back-end networks with ResNet-18 [20] front-
119
+ end networks to improve recognition performance.
120
+ Other works focus on Visual Speech Recognition (VSR),
121
+ only using lip movements to transcribe spoken language
122
+ into text [4, 9, 48, 3, 49, 37, 30]. An important line of
123
+ research is the use of cross-modal distillation. Afouras et
124
+ al. [3] and Zhao et al. [49] proposed to improve the lip read-
125
+ ing performance by distilling from an ASR model trained
126
+ on a large-scale audio-only corpus while Ma et al. [30]
127
+ uses prediction-based auxiliary tasks. Prajwal et al. [37]
128
+ also proposed to use sub-words units instead of characters
129
+ to transcribe sequences, greatly reducing running time and
130
+ memory requirements. Also providing a language prior, re-
131
+ ducing the language modelling burden of the model.
132
+ In this work we focus on the design of a noise robust
133
+ speech recognition architecture processing both audio and
134
+ visual modalities.
135
+ We use the recently proposed CTC-
136
+ based Efficient Conformer architecture [7] and show that
137
+ including the visual modality of lip movements can suc-
138
+ cessfully improve noise robustness while significantly ac-
139
+ celerating training. Our Audio-Visual Efficient Conformer
140
+ (AVEC) reaches lower WER using 4 times less training
141
+ steps than its audio-only counterpart.
142
+ Moreover, we are
143
+ the first work to apply intermediate CTC losses between
144
+ blocks [27, 33] to improve visual speech recognition perfor-
145
+ mance. We show that conditioning intermediate features on
146
+ early predictions using Inter CTC residual modules allows
147
+ to close the gap in WER between autoregressive and non-
148
+ autoregressive AVSR systems based on S2S. This also helps
149
+ to counter a common failure case which is that audio-visual
150
+ models tend to ignore the visual modality. In this way, we
151
+ force pre-fusion layers to learn spatiotemporal features. Fi-
152
+ nally, we replace the Efficient Conformer grouped attention
153
+ by a more efficient and simpler attention mechanism that
154
+ we call patch attention. Patch attention reaches similar per-
155
+ formance to grouped attention while having a lower com-
156
+ plexity. The contributions of this work are as follows:
157
+ • We improve the noise robustness of the recently pro-
158
+ posed Efficient Conformer architecture by processing
159
+ both audio and visual modalities.
160
+ • We condition intermediate Conformer block features
161
+ on early predictions using Inter CTC residual modules
162
+ to relax the conditional independence assumption of
163
+ CTC models. This allows us to close the gap in WER
164
+ between autoregressive and non-autoregressive meth-
165
+ ods based on S2S.
166
+ • We propose to replace the Efficient Conformer
167
+ grouped attention by a more efficient and simpler at-
168
+ tention mechanism that we call patch attention. Patch
169
+ attention reaches similar performance to grouped at-
170
+ tention with a lower complexity.
171
+ • We experiment on publicly available LRS2 and LRS3
172
+ datasets and reach state-of-the-art results using audio
173
+ and visual modalities.
174
+ 2. Method
175
+ In this section, we describe our proposed Audio-Visual
176
+ Efficient Conformer network. The model is composed of
177
+ 4 main components: An audio encoder, a visual encoder,
178
+ an audio-visual fusion module and an audio-visual encoder.
179
+ The audio and visual encoders are separated into modality
180
+ specific front-end networks to transform each input modal-
181
+ ity into temporal sequences and Efficient Conformer back-
182
+ end networks to model local and global temporal relation-
183
+ ships. The full model is trained end-to-end using intermedi-
184
+ ate CTC losses between Conformer blocks in addition to the
185
+ output CTC layer. The complete architecture of the model
186
+ is shown in Figure 1.
187
+ 2.1. Model Architecture
188
+ Audio front-end.
189
+ The audio front-end network first
190
+ transforms raw audio wave-forms into mel-spectrograms
191
+ using a short-time Fourier transform computed over win-
192
+ dows of 20ms with a step size of 10ms. 80-dimensional
193
+
194
+ mel-scale log filter banks are applied to the resulting fre-
195
+ quency features. The mel-spectrograms are processed by
196
+ a 2D convolution stem to extract local temporal-frequency
197
+ features, resulting in a 20ms frame rate signal. The audio
198
+ front-end architecture is shown in Table 1.
199
+ Table 1: Audio Front-end architecture, 1.2 Millions param-
200
+ eters. Ta denotes the input audio sample length.
201
+ Stage
202
+ Layers
203
+ Output Shape
204
+ Fourier
205
+ Transf
206
+ STFT: 400 window length
207
+ 160 hop length, 512 ffts
208
+ (257, Ta//160 + 1)
209
+ Mel
210
+ Scale
211
+ Mel Scale: 80 mels
212
+ (80, Ta//160 + 1)
213
+ Stem
214
+ Conv2d: 32, 180 filters, 22 stride
215
+ (180, 40, Ta//320 + 1)
216
+ Proj
217
+ Linear, 180 units
218
+ (Ta//320 + 1, 180)
219
+ Visual front-end.
220
+ The visual front-end network [29]
221
+ transforms input video frames into temporal sequences. A
222
+ 3D convolution stem with kernel size 5 × 7 × 7 is first ap-
223
+ plied to the video. Each video frame is then processed inde-
224
+ pendently using a 2D ResNet-18 [20] with an output spatial
225
+ average pooling. Temporal features are then projected to
226
+ the back-end network input dimension using a linear layer.
227
+ The visual front-end architecture is shown in Table 2.
228
+ Table 2: Visual Front-end architecture, 11.3 Millions pa-
229
+ rameters. Tv denotes the number of input video frames.
230
+ Stage
231
+ Layers
232
+ Output Shape
233
+ Stem
234
+ Conv3d: 5 × 72, 64 filters, 1 × 22 stride
235
+ MaxPoo3d: 1 × 32, 1 × 22 stride
236
+ (64, Tv, 22, 22)
237
+ Res 1
238
+ 2 ×
239
+
240
+ Conv2d: 32, 64 filters
241
+ Conv2d: 32, 64 filters
242
+
243
+ (Tv, 64, 22, 22)
244
+ Res 2
245
+ 2 ×
246
+
247
+ Conv2d: 32, 128 filters
248
+ Conv2d: 32, 128 filters
249
+
250
+ (Tv, 128, 11, 11)
251
+ Res 3
252
+ 2 ×
253
+
254
+ Conv2d: 32, 256 filters
255
+ Conv2d: 32, 256 filters
256
+
257
+ (Tv, 256, 6, 6)
258
+ Res 4
259
+ 2 ×
260
+
261
+ Conv2d: 32, 512 filters
262
+ Conv2d: 32, 512 filters
263
+
264
+ (Tv, 512, 3, 3)
265
+ Pool
266
+ Global Average Pooling
267
+ (Tv, 512)
268
+ Proj
269
+ Linear, 256 units
270
+ (Tv, 256)
271
+ Back-end networks. The back-end networks use an Ef-
272
+ ficient Conformer architecture.
273
+ The Efficient Conformer
274
+ encoder was proposed in [7], it is composed of several
275
+ stages where each stage comprises a number of Conformer
276
+ blocks [16] using grouped attention with relative positional
277
+ encodings. The temporal sequence is progressively down-
278
+ sampled using strided convolutions and projected to wider
279
+ feature dimensions, lowering the amount of computation
280
+ while achieving better performance. We use 3 stages in the
281
+ audio back-end network to downsample the audio signal to
282
+ a 80 milliseconds frame rate. Only 2 stages are necessary
283
+ to downsample the visual signal to the same frame rate. Ta-
284
+ ble 6 shows the hyper-parameter of each back-end network.
285
+ Table 3: Back-end networks hyper-parameters. InterCTC
286
+ blocks indicates Conformer blocks having a post Inter CTC
287
+ residual module.
288
+ Network
289
+ Visual
290
+ Back-end
291
+ Audio
292
+ Back-end
293
+ Audio-Visual
294
+ Encoder
295
+ Num Params (M)
296
+ 13.6
297
+ 17.9
298
+ 15.9
299
+ Num Stages
300
+ 2
301
+ 3
302
+ 1
303
+ Blocks per Stage
304
+ 6, 1
305
+ 5, 6, 1
306
+ 5
307
+ Total Num Blocks
308
+ 7
309
+ 12
310
+ 5
311
+ Stage Feature Dim
312
+ 256, 360
313
+ 180, 256, 360
314
+ 360
315
+ Conv Kernel Size
316
+ 15
317
+ 15
318
+ 15
319
+ Stage Patch Size
320
+ 1, 1
321
+ 3, 1, 1
322
+ 1
323
+ InterCTC Blocks
324
+ 3, 6
325
+ 8, 11
326
+ 2
327
+ Audio-visual fusion module. Similar to [36, 29], we
328
+ use an early fusion strategy to learn audio-visual features
329
+ and reduce model complexity. The acoustic and visual fea-
330
+ tures from the back-end networks are concatenated and fed
331
+ into a joint feed-forward network. The concatenated fea-
332
+ tures of size 2 × dmodel are first expanded using a linear
333
+ layer with output size dff = 4 × dmodel, passed through
334
+ a Swish activation function [38] and projected back to the
335
+ original feature dimension dmodel.
336
+ Audio-visual encoder. The audio-visual encoder is a
337
+ single stage back-end network composed of 5 Conformer
338
+ blocks without downsampling.
339
+ The encoder outputs are
340
+ then projected to a CTC layer to maximize the sum of prob-
341
+ abilities of correct target alignments.
342
+ 2.2. Patch Attention.
343
+ The Efficient Conformer [7] proposed to replace Multi-
344
+ Head Self-Attention (MHSA) [44] in earlier encoder lay-
345
+ ers with grouped attention. Grouped MHSA reduce atten-
346
+ tion complexity by grouping neighbouring temporal ele-
347
+ ments along the feature dimension before applying scaled
348
+ dot-product attention. Attention having a quadratic com-
349
+ putational complexity with respect to the sequence length,
350
+ this caused the network to have an asymmetric complexity
351
+ with earlier attention layers requiring more flops than latter
352
+ layers with shorter sequence length. In this work, we pro-
353
+ pose to replace grouped attention with a simpler and more
354
+ efficient attention mechanism that we call patch attention
355
+ (Figure 2). Similar to the pooling attention proposed by the
356
+ Multiscale Vision Transformer (MViT) [13] for video and
357
+ image recognition, the patch attention proceed to an average
358
+ Table 4: Attention variants complexities including query,
359
+ key, value and output linear projections. n and d are the
360
+ sequence length and feature dimension respectively.
361
+ Attention
362
+ Variant
363
+ Hyper
364
+ Parameter
365
+ Full Attention
366
+ Complexity
367
+ Regular
368
+ -
369
+ O(n · d2 + n2 · d)
370
+ Grouped
371
+ Group Size (g)
372
+ O(n · d2 + (n/g)2 · d · g)
373
+ Patch
374
+ Patch Size (k)
375
+ O(n/k · d2 + (n/k)2 · d)
376
+
377
+ AvgPool
378
+ 1
379
+ 2
380
+ 3
381
+ 4
382
+ 5
383
+ 6
384
+ 7
385
+ 8
386
+ 9
387
+ a
388
+ a
389
+ a
390
+ b
391
+ b
392
+ b
393
+ c
394
+ c
395
+ c
396
+ Upsample
397
+ a
398
+ c
399
+ Attention
400
+ b
401
+ a
402
+ c
403
+ b
404
+ Figure 2: Patch Multi-Head Self-Attention. The input sequence is downsampled using an average pooling before applying
405
+ multi-head self-attention. The output sequence is then upsampled via nearest neighbor upsampling, reducing attention com-
406
+ plexity from O(n2 · d) to O((n/k)2 · d) where k defines the pooling / upsampling kernel size. Patch attention is equivalent
407
+ to regular attention when k = 1.
408
+ pooling on the input sequence before projection the query,
409
+ key and values.
410
+ X = AvgPooling1d(Xin)
411
+ (1)
412
+ with Q, K, V = XW Q, XW K, XW V
413
+ (2)
414
+ Where W Q, W K, W V ∈ Rd×d are query, key and value
415
+ linear projections parameter matrices. MHSA with relative
416
+ sinusoidal positional encoding is then performed at lower
417
+ resolution as:
418
+ MHSA(X) = Concat (O1, ..., OH) W O
419
+ (3)
420
+ with Oh = softmax
421
+ �QhKT
422
+ h + Srel
423
+ h
424
+ √dh
425
+
426
+ Vh
427
+ (4)
428
+ Where Srel ∈ Rn×n is a relative position score matrix that
429
+ satisfy Srel[i, j] = QiET
430
+ j−i. E is the linear projection of
431
+ a standard sinusoidal positional encoding matrix with posi-
432
+ tions ranging from −(nmax − 1) to (nmax − 1). The atten-
433
+ tion output sequence is then projected and up-sampled back
434
+ to the initial resolution using nearest neighbor up-sampling.
435
+ Xout = UpsampleNearest1d(MHSA(X))
436
+ (5)
437
+ In consequence, each temporal element of the same patch
438
+ produce the same attention output. Local temporal relation-
439
+ ships are only modeled in the convolution modules while
440
+ global relationships are modeled by patch attention. We
441
+ use 1-dimensional patches in this work but patch attention
442
+ Audio back-end Conformer Stage
443
+ Module Giga FLOPs
444
+ 0
445
+ 0.1
446
+ 0.2
447
+ 0.3
448
+ Stage 1 (d=180, n=500)
449
+ Stage 2 (d=256, n=250)
450
+ Stage 3 (d=360, n=125)
451
+ attention
452
+ grouped attention (g=3)
453
+ patch attention (k=3)
454
+ feed-forward
455
+ Figure 3: Audio-only back-end modules FLOPs (Billion).
456
+ could also be generalized to image and video data using
457
+ 2D and 3D patches. We leave this to future works. The
458
+ computational complexity of each attention variant is shown
459
+ in Table 4. Path attention further reduce complexity com-
460
+ pared to grouped attention by decreasing the amount of
461
+ computation needed by Query, Key, Value and Output fully
462
+ connected layers while keeping the feature dimension un-
463
+ changed. Similar to previous work [7], we only use patch
464
+ attention in the first audio back-end stage to reduce com-
465
+ plexity while maintaining model recognition performance.
466
+ Figure 3 shows the amount of FLOPs for each attention
467
+ module variant with respect to encoded sequence length n
468
+ and model feature dimension d. Using patch or grouped at-
469
+ tention variants instead of regular MHSA greatly reduce the
470
+ amount of FLOPs in the first audio back-end stage.
471
+ 2.3. Intermediate CTC Predictions.
472
+ Inspired by [27] and [33] who proposed to add interme-
473
+ diate CTC losses between encoder blocks to improve CTC-
474
+ based speech recognition performance, we add Inter CTC
475
+ residual modules (Figure 4) in encoder networks. We con-
476
+ dition intermediate block features of both audio, visual and
477
+ audio-visual encoders on early predictions to relax the con-
478
+ ditional independence assumption of CTC models. During
479
+ both training and inference, each intermediate prediction is
480
+ summed to the input of the next layer to help recognition.
481
+ We use the same method proposed in [33] except that we do
482
+ not share layer parameters between losses. The lth block
483
+ output Xout
484
+ l
485
+ is passed through a feed-forward network with
486
+ residual connection and a softmax activation function:
487
+ Zl = Softmax(Linear(Xout
488
+ l
489
+ ))
490
+ (6)
491
+ Xin
492
+ l+1 = Xout
493
+ l
494
+ + Linear(Zl)
495
+ (7)
496
+ Where Zl ∈ RT ×V is a probability distribution over the
497
+ output vocabulary. The intermediate CTC loss is then com-
498
+ puted using the target sequence y as:
499
+ Linter
500
+ l
501
+ = −log(P(y|Zl))
502
+ (8)
503
+ with P(y|Zl) =
504
+
505
+ π∈B−1
506
+ CT C(y)
507
+ T
508
+
509
+ t=1
510
+ Zt,πt
511
+ (9)
512
+
513
+ Conformer Block
514
+ Inter CTC
515
+ Residual Module
516
+ Conformer Block
517
+ Linear
518
+ Softmax
519
+ Linear
520
+ CTC loss
521
+ +
522
+ Figure 4: Inter CTC residual module. Intermediate pre-
523
+ dictions are summed to the input of the next Conformer
524
+ block to condition the prediction of the final block on it.
525
+ Intermediate CTC losses are added to the output CTC loss
526
+ for the computation of the final loss.
527
+ Where π ∈ V T are paths of tokens and BCT C is a many-to-
528
+ one map that simply removes all blanks and repeated labels
529
+ from the paths. The total training objective is defined as
530
+ follows:
531
+ L = (1 − λ)LCT C + λLinter
532
+ (10)
533
+ with Linter = 1
534
+ K
535
+
536
+ k∈interblocks
537
+ Linter
538
+ k
539
+ (11)
540
+ Where interblocks is the set of blocks having a post Inter
541
+ CTC residual module (Figure 4). Similar to [33], we use
542
+ Inter CTC residual modules every 3 Conformer blocks with
543
+ λ set to 0.5 in every experiments.
544
+ 3. Experiments
545
+ 3.1. Datasets
546
+ We use 3 publicly available AVSR datasets in this
547
+ work. The Lip Reading in the Wild (LRW) [8] dataset is
548
+ used for visual pre-training and the Lip Reading Sentences
549
+ 2 (LRS2) [1] and Lip Reading Sentences 3 (LRS3) [2]
550
+ datasets are used for training and evaluation.
551
+ LRW dataset. LRW is an audio-visual word recogni-
552
+ tion dataset consisting of short video segments containing a
553
+ single word out of a vocabulary of 500. The dataset com-
554
+ prise 488,766 training samples with at least 800 utterances
555
+ per class and a validation and test sets of 25,000 samples
556
+ containing 50 utterances per class.
557
+ LRS2 & LRS3 datasets. The LRS2 dataset is composed
558
+ of 224.1 hours with 144,482 videos clips from the BBC tele-
559
+ vision whereas the LRS3 dataset consists of 438.9 hours
560
+ with 151,819 video clips extracted from TED and TEDx
561
+ talks. Both datasets include corresponding subtitles with
562
+ word alignment boundaries and are composed of a pre-train
563
+ split, train-val split and test split. LRS2 has 96,318 utter-
564
+ ances for pre-training (195 hours), 45,839 for training (28
565
+ hours), 1,082 for validation (0.6 hours), and 1,243 for test-
566
+ ing (0.5 hours). Whereas LRS3 has 118,516 utterances in
567
+ the pre-training set (408 hours), 31,982 utterances in the
568
+ training-validation set (30 hours) and 1,321 utterances in
569
+ the test set (0.9 hours). All videos contain a single speaker,
570
+ have a 224 × 224 pixels resolution and are sampled at 25
571
+ fps with 16kHz audio.
572
+ 3.2. Implementation Details
573
+ Pre-processing Similar to [29], we remove differences
574
+ related to rotation and scale by cropping the lip regions us-
575
+ ing bounding boxes of 96 × 96 pixels to facilitate recog-
576
+ nition. The RetinaFace [11] face detector and Face Align-
577
+ ment Network (FAN) [6] are used to detect 68 facial land-
578
+ marks. The cropped images are then converted to gray-scale
579
+ and normalised between −1 and 1. Facial landmarks of the
580
+ LRW, LRS2 and LRS3 datasets are obtained from previous
581
+ work [30] and reused for pre-processing to get a clean com-
582
+ parison of the methods. A byte-pair encoding tokenizer is
583
+ built from LRS2&3 pre-train and trainval splits transcripts
584
+ using sentencepiece [26]. We use a vocabulary size of 256
585
+ including the CTC blank token following preceding works
586
+ on CTC-based speech recognition [31, 7].
587
+ Data augmentation Spec-Augment [35] is applied on
588
+ the audio mel-spectrograms during training to prevent over-
589
+ fitting with two frequency masks with mask size parameter
590
+ F = 27 and five time masks with adaptive size pS = 0.05.
591
+ Similarly to [30], we mask videos on the time axis using one
592
+ mask per second with the maximum mask duration set to 0.4
593
+ seconds. Random cropping with size 88×88 and horizontal
594
+ flipping are also performed for each video during training.
595
+ We also follow Prajwal et al. [37] using central crop with
596
+ horizontal flipping at test time for visual-only experiments.
597
+ Training Setup We first pre-train the visual encoder on
598
+ the LRW dataset [8] using cross-entropy loss to recognize
599
+ words being spoken. The visual encoder is pre-trained for
600
+ 30 epochs and front-end weights are then used as initializa-
601
+ tion for training. Audio and visual encoders are trained on
602
+ the LRS2&3 datasets using a Noam schedule [44] with 10k
603
+ warmup steps and a peak learning rate of 1e-3. We use the
604
+ Adam optimizer [24] with β1 = 0.9, β2 = 0.98. L2 regular-
605
+ ization with a 1e-6 weight is also added to all the trainable
606
+ weights of the model. We train all models with a global
607
+ batch size of 256 on 4 GPUs, using a batch size of 16 per
608
+ GPU with 4 accumulated steps. Nvidia A100 40GB GPUs
609
+ are used for visual-only and audio-visual experiments while
610
+ RTX 2080 Ti are used for audio-only experiments. The
611
+ audio-only models are trained for 200 epochs while visual-
612
+ only and audio-visual models are trained for 100 and 70
613
+ epochs respectively. Note that we only keep videos shorter
614
+ than 400 frames (16 seconds) during training. Finally, we
615
+ average models weights over the last 10 epoch checkpoints
616
+ using Stochastic Weight Averaging [22] before evaluation.
617
+
618
+ Table 5: Comparison of WER (%) on LRS2 / LRS3 test sets with recently published methods using publicly and non-publicly
619
+ available datasets for Audio-Only (AO), Visual-Only (VO) and Audio-Visual (AV) models.
620
+ Method
621
+ Model
622
+ Criterion
623
+ Training
624
+ Datasets
625
+ Total
626
+ Hours
627
+ test WER
628
+ AO
629
+ VO
630
+ AV
631
+ (↓) Using Publicly Available Datasets (↓)
632
+ Petridis et al. [36]
633
+ CTC+S2S
634
+ LRW, LRS2
635
+ 381
636
+ 8.3 / -
637
+ 63.5 / -
638
+ 7.0 / -
639
+ Zhang et al. [48]
640
+ S2S
641
+ LRW, LRS2&3
642
+ 788 / 790
643
+ -
644
+ 51.7 / 60.1
645
+ -
646
+ Afouras et al. [3]
647
+ CTC
648
+ VoxCeleb2clean, LRS2&3
649
+ 1,032 / 808
650
+ -
651
+ 51.3 / 59.8
652
+ -
653
+ Xu et al. [45]
654
+ S2S
655
+ LRW, LRS3
656
+ 595
657
+ - / 7.2
658
+ - / 57.8
659
+ - / 6.8
660
+ Yu et al.[46]
661
+ LF-MMI
662
+ LRS2
663
+ 224
664
+ 6.7 / -
665
+ 48.9 / -
666
+ 5.9 / -
667
+ Ma et al. [29]
668
+ CTC+S2S
669
+ LRW, LRS2&3
670
+ 381 / 595
671
+ 3.9 / 2.3
672
+ 37.9 / 43.3
673
+ 3.7 / 2.3
674
+ Prajwal et al. [37]
675
+ S2S
676
+ LRS2&3
677
+ 698
678
+ -
679
+ 28.9 / 40.6
680
+ -
681
+ Ma et al. [30]
682
+ CTC+S2S
683
+ LRW, LRS2&3
684
+ 818
685
+ -
686
+ 27.3 / 34.7
687
+ -
688
+ Ours
689
+ CTC
690
+ LRW, LRS2&3
691
+ 818
692
+ 2.8 / 2.1
693
+ 32.6 / 39.2
694
+ 2.5 / 1.9
695
+ + Neural LM
696
+ CTC
697
+ LRW, LRS2&3
698
+ 818
699
+ 2.4 / 2.0
700
+ 29.8 / 37.5
701
+ 2.3 / 1.8
702
+ (↓) Using Non-Publicly Available Datasets (↓)
703
+ Afouras et al. [1]
704
+ S2S
705
+ MVLRS, LRS2&3
706
+ 1,395
707
+ 9.7 / 8.3
708
+ 48.3 / 58.9
709
+ 8.5 / 7.2
710
+ Zhao et al. [49]
711
+ S2S
712
+ MVLRS, LRS2
713
+ 954
714
+ -
715
+ 65.3 / -
716
+ -
717
+ Shillingford et al. [40]
718
+ CTC
719
+ LRVSR
720
+ 3,886
721
+ -
722
+ - / 55.1
723
+ -
724
+ Makino et al. [32]
725
+ Transducer
726
+ YouTube-31k
727
+ 31,000
728
+ - / 4.8
729
+ - / 33.6
730
+ - / 4.5
731
+ Serdyuk et al. [39]
732
+ Transducer
733
+ YouTube-90k
734
+ 91,000
735
+ -
736
+ - / 25.9
737
+ - / 2.3
738
+ Prajwal et al. [37]
739
+ S2S
740
+ MVLRS, TEDxext, LRS2&3
741
+ 2,676
742
+ -
743
+ 22.6 / 30.7
744
+ -
745
+ Ma et al. [30]
746
+ CTC+S2S
747
+ LRW, AVSpeech, LRS2&3
748
+ 1,459
749
+ -
750
+ 25.5 / 31.5
751
+ -
752
+ Language Models. Similarly to [28], we experiment
753
+ with a N-gram [21] statistical language model (LM) and a
754
+ Transformer neural language model. A 6-gram LM is used
755
+ to generate a list of hypotheses using beam search and an
756
+ external Transformer LM is used to rescore the final list.
757
+ The 6-gram LM is trained on the LRS2&3 pre-train and
758
+ train-val transcriptions. Concerning the neural LM, we pre-
759
+ train a 12 layer GPT-3 Small [5] on the LibriSpeech LM
760
+ corpus for 0.5M steps using a batch size of 0.1M tokens
761
+ and finetune it for 10 epochs on the LRS2&3 transcriptions.
762
+ 3.3. Results
763
+ Table 5 compares WERs of our Audio-Visual Effi-
764
+ cient Conformer with state-of-the-art methods on the LRS2
765
+ and LRS3 test sets.
766
+ Our Audio-Visual Efficient Con-
767
+ former achieves state-of-the-art performances with WER of
768
+ 2.3%/1.8%. On the visual-only track, our CTC model com-
769
+ petes with most recent autoregressive methods using S2S
770
+ criterion. We were able to recover similar results but still
771
+ lack behind Ma et al. [30] which uses auxiliary losses with
772
+ pre-trained audio-only and visual-only networks. We found
773
+ our audio-visual network to converge faster than audio-only
774
+ experiments, reaching better performance using 4 times less
775
+ training steps. The intermediate CTC losses of the visual
776
+ encoder could reach lower levels than in visual-only experi-
777
+ ments showing that optimizing audio-visual layers can help
778
+ pre-fusion layers to learn better representations.
779
+ 3.4. Ablation Studies
780
+ We propose a detailed ablation study to better understand
781
+ the improvements in terms of complexity and WER brought
782
+ by each architectural modification. We report the number
783
+ of operations measured in FLOPs (number of multiply-and-
784
+ add operations) for the network to process a ten second au-
785
+ dio/video clip. Inverse Real Time Factor (Inv RTF) is also
786
+ measured on the LRS3 test set by decoding with a batch
787
+ size 1 on a single Intel Core i7-12700 CPU thread. All abla-
788
+ tions were performed by training audio-only models for 200
789
+ epochs and visual-only / audio-visual models for 50 epochs.
790
+ Efficient Conformer Visual Back-end. We improve the
791
+ recently proposed visual Conformer encoder [29] using an
792
+ Efficient Conformer back-end network. The use of byte pair
793
+ encodings for tokenization instead of characters allows us to
794
+ further downsample temporal sequences without impacting
795
+ the computation of CTC loss. Table 6 shows that using an
796
+ Efficient Conformer back-end network for our visual-only
797
+ model leads to better performances while reducing model
798
+ complexity and training time. The number of model param-
799
+ eters is also slightly decreased.
800
+ Table 6: Ablation study on visual back-end network.
801
+ Visual
802
+ Back-end
803
+ #Params
804
+ (Million)
805
+ LRS2
806
+ test
807
+ LRS3
808
+ test
809
+ #FLOPs
810
+ (Billion)
811
+ Inv
812
+ RTF
813
+ Conformer
814
+ 43.0
815
+ 39.53
816
+ 47.14
817
+ 87.94
818
+ 5.17
819
+ Eff Conf
820
+ 40.4
821
+ 37.39
822
+ 44.96
823
+ 84.52
824
+ 5.26
825
+
826
+ Reference
827
+ the authors looked at papers written over a 10 year period and hundreds had to be thrown out
828
+ Outputs
829
+ Block 3: the otho looing pa people we over s any your per and conndries that aboutent threghow
830
+ Block 6: the autthherss looking paperss we overai year paiod and hundreds that about thrououtow
831
+ Block 9: the authors looked at papers witen over ainght year period and hundreds that to been throw out
832
+ Block 12: the authors looked at papers written over 10 year period and hundreds had to be thrown out
833
+ Figure 5: Output example of our Visual-only model using greedy search decoding on the LRS3 test set with intermediate
834
+ CTC prediction every 3 blocks. The sentence is almost correctly transcribed except for the missing ’a’ before ’10 year’.
835
+ Inter CTC residual modules. Similar to [33], we exper-
836
+ iment adding Inter CTC residual modules between blocks
837
+ to relax the conditional independence assumption of CTC.
838
+ Table 7 shows that using intermediate CTC losses every 3
839
+ Conformer blocks greatly helps to reduce WER, except for
840
+ the audio-only setting where this does not improve perfor-
841
+ mance. Figure 5 gives an example of intermediate block
842
+ predictions decoded using greedy search without an exter-
843
+ nal language model on the test set of LRS3. We can see
844
+ that the output is being refined in the encoder layers by con-
845
+ ditioning on the intermediate predictions of previous lay-
846
+ ers. Since our model refines the output over the frame-level
847
+ predictions, it can correct insertion and deletion errors in
848
+ addition to substitution errors. We further study the im-
849
+ pact of Inter CTC on multi-modal learning by measuring
850
+ the performance of our audio-visual model when one of
851
+ the two modalities is masked. As pointed out by preced-
852
+ ing works [8, 1, 32], networks with multi-modal inputs can
853
+ often be dominated by one of the modes. In our case speech
854
+ recognition is a significantly easier problem than lip reading
855
+ which can cause the model to ignore visual information. Ta-
856
+ ble 8 shows that Inter CTC can help to counter this problem
857
+ by forcing pre-fusion layers to transcribe the input signal.
858
+ Table 7: Ablation study on Inter CTC residual modules.
859
+ Model
860
+ Back-end
861
+ #Params
862
+ (Million)
863
+ LRS2
864
+ test
865
+ LRS3
866
+ test
867
+ #FLOPs
868
+ (Billion)
869
+ Inv
870
+ RTF
871
+ Audio-only (↓)
872
+ Eff Conf
873
+ 31.5
874
+ 2.83
875
+ 2.13
876
+ 7.54
877
+ 51.98
878
+ + Inter CTC
879
+ 32.1
880
+ 2.84
881
+ 2.11
882
+ 7.67
883
+ 50.30
884
+ Visual-only (↓)
885
+ Eff Conf
886
+ 40.4
887
+ 37.39
888
+ 44.96
889
+ 84.52
890
+ 5.26
891
+ + Inter CTC
892
+ 40.9
893
+ 33.82
894
+ 40.63
895
+ 84.60
896
+ 5.26
897
+ Audio-visual (↓)
898
+ Eff Conf
899
+ 60.9
900
+ 2.87
901
+ 2.54
902
+ 90.53
903
+ 4.84
904
+ + Inter CTC
905
+ 61.7
906
+ 2.58
907
+ 1.99
908
+ 90.66
909
+ 4.82
910
+ Table 8: Impact of Inter CTC on audio-visual model WER
911
+ (%) for LRS2 / LRS3 test sets in a masked modality setting.
912
+ Inter CTC
913
+ Audio-Visual Eval Mode
914
+ masked video
915
+ masked audio
916
+ no mask
917
+ No
918
+ 4.48 / 3.22
919
+ 52.77 / 59.10
920
+ 2.87 / 2.54
921
+ Yes
922
+ 3.39 / 2.38
923
+ 37.62 / 46.55
924
+ 2.58 / 1.99
925
+ Patch multi-head self-attention.
926
+ We experiment re-
927
+ placing grouped attention by patch attention in the first
928
+ audio encoder stage. Our objective being to increase the
929
+ model efficiency and simplicity without harming perfor-
930
+ mance. Grouped attention was proposed in [7] to reduce
931
+ attention complexity for long sequences in the first encoder
932
+ stage. Table 9 shows the impact of each attention variant
933
+ on our audio-only model performance and complexity. We
934
+ start with an Efficient Conformer (M) [7] and replace the
935
+ attention mechanism. We find that grouped attention can be
936
+ replaced by patch attention without a loss of performance
937
+ using a patch size of 3 in the first back-end stage.
938
+ Table 9: Ablation study on audio back-end attention.
939
+ Attention
940
+ Type
941
+ Group /
942
+ Patch Size
943
+ LRS2
944
+ test
945
+ LRS3
946
+ test
947
+ #FLOPs
948
+ (Billion)
949
+ Inv
950
+ RTF
951
+ Regular
952
+ -
953
+ 2.85
954
+ 2.12
955
+ 8.66
956
+ 49.86
957
+ Grouped
958
+ 3, 1, 1
959
+ 2.82
960
+ 2.13
961
+ 8.06
962
+ 50.27
963
+ Patch
964
+ 3, 1, 1
965
+ 2.83
966
+ 2.13
967
+ 7.54
968
+ 51.98
969
+ 3.5. Noise Robustness
970
+ We measure model noise robustness using various types
971
+ of noise and compare our Audio-Visual Efficient Conformer
972
+ with recently published methods. Figure 6 shows the WER
973
+ evolution of audio-only (AO), visual-only (VO) and audio-
974
+ visual (AV) models with respect to multiple Signal to Noise
975
+ Ratio (SNR) using white noise and babble noise from the
976
+ NoiseX corpus [43]. We find that processing both audio and
977
+ visual modalities can help to significantly improve speech
978
+ recognition robustness with respect to babble noise. More-
979
+ over, we also experiment adding babble noise during train-
980
+ ing as done in previous works [36, 29] and find that it can
981
+ further improve noise robustness at test time.
982
+ Robustness to various types of noise. We gather var-
983
+ ious types of recorded audio noise including sounds and
984
+ music. In Table 10, we observe that the Audio-Visual Ef-
985
+ ficient Conformer consistently achieves better performance
986
+ than its audio-only counterpart in the presence of various
987
+ noise types. This confirm our hypothesis that the audio-
988
+ visual model is able to use the visual modality to aid speech
989
+ recognition when audio noise is present in the input.
990
+
991
+ SNR (dB)
992
+ Word Error Rate (%)
993
+ 0
994
+ 10
995
+ 20
996
+ 30
997
+ 40
998
+ 50
999
+ -5
1000
+ 0
1001
+ 5
1002
+ 10
1003
+ 15
1004
+ 20
1005
+ VO LRS2
1006
+ AO LRS2
1007
+ AV LRS2
1008
+ AV* LRS2
1009
+ VO LRS3
1010
+ AO LRS3
1011
+ AV LRS3
1012
+ AV* LRS3
1013
+ (a) Babble noise
1014
+ SNR (dB)
1015
+ Word Error Rate (%)
1016
+ 0
1017
+ 10
1018
+ 20
1019
+ 30
1020
+ 40
1021
+ 50
1022
+ -5
1023
+ 0
1024
+ 5
1025
+ 10
1026
+ 15
1027
+ 20
1028
+ VO LRS2
1029
+ AO LRS2
1030
+ AV LRS2
1031
+ AV* LRS2
1032
+ VO LRS3
1033
+ AO LRS3
1034
+ AV LRS3
1035
+ AV* LRS3
1036
+ (b) White noise
1037
+ Figure 6: LRS2 and LRS3 test WER (%) as a function
1038
+ of SNR (dB). * indicates experiments being trained with
1039
+ babble noise. We measure noise robustness by evaluating
1040
+ our models in the presence of babble and white noise.
1041
+ Table 10: LRS3 test WER (%) as a function of SNR (dB).
1042
+ Noise
1043
+ Mode
1044
+ SNR (dB)
1045
+ -5
1046
+ 0
1047
+ 5
1048
+ 10
1049
+ 15
1050
+ 20
1051
+ babble
1052
+ AO
1053
+ 75.9
1054
+ 32.4
1055
+ 9.3
1056
+ 4.1
1057
+ 2.7
1058
+ 2.3
1059
+ AV
1060
+ 33.5
1061
+ 14.8
1062
+ 5.4
1063
+ 3.0
1064
+ 2.3
1065
+ 2.0
1066
+ AV*
1067
+ 11.2
1068
+ 4.9
1069
+ 3.1
1070
+ 2.5
1071
+ 2.2
1072
+ 2.0
1073
+ white
1074
+ AO
1075
+ 77.6
1076
+ 34.0
1077
+ 15.5
1078
+ 7.3
1079
+ 4.1
1080
+ 2.8
1081
+ AV
1082
+ 28.9
1083
+ 14.7
1084
+ 5.5
1085
+ 3.0
1086
+ 2.3
1087
+ 2.0
1088
+ AV*
1089
+ 17.4
1090
+ 8.9
1091
+ 3.6
1092
+ 2.8
1093
+ 2.3
1094
+ 2.0
1095
+ birds
1096
+ AO
1097
+ 51.8
1098
+ 23.9
1099
+ 10.9
1100
+ 5.9
1101
+ 3.7
1102
+ 2.8
1103
+ AV
1104
+ 21.6
1105
+ 11.5
1106
+ 6.2
1107
+ 4.1
1108
+ 2.9
1109
+ 2.4
1110
+ AV*
1111
+ 15.9
1112
+ 8.3
1113
+ 4.9
1114
+ 3.4
1115
+ 2.7
1116
+ 2.4
1117
+ chainsaw
1118
+ AO
1119
+ 82.9
1120
+ 41.2
1121
+ 14.8
1122
+ 5.5
1123
+ 3.7
1124
+ 2.7
1125
+ AV
1126
+ 37.8
1127
+ 17.3
1128
+ 7.6
1129
+ 3.9
1130
+ 2.6
1131
+ 2.3
1132
+ AV*
1133
+ 25.8
1134
+ 10.8
1135
+ 5.0
1136
+ 3.2
1137
+ 2.4
1138
+ 2.3
1139
+ jazz
1140
+ AO
1141
+ 25.3
1142
+ 9.7
1143
+ 4.1
1144
+ 3.1
1145
+ 2.6
1146
+ 2.3
1147
+ AV
1148
+ 13.9
1149
+ 6.0
1150
+ 3.2
1151
+ 2.4
1152
+ 2.3
1153
+ 2.0
1154
+ AV*
1155
+ 10.6
1156
+ 4.2
1157
+ 2.8
1158
+ 2.4
1159
+ 2.2
1160
+ 2.0
1161
+ street
1162
+ raining
1163
+ AO
1164
+ 58.4
1165
+ 23.8
1166
+ 8.9
1167
+ 4.6
1168
+ 3.0
1169
+ 2.5
1170
+ AV
1171
+ 27.12
1172
+ 10.8
1173
+ 5.7
1174
+ 3.1
1175
+ 2.7
1176
+ 2.3
1177
+ AV*
1178
+ 15.9
1179
+ 6.9
1180
+ 3.8
1181
+ 2.7
1182
+ 2.3
1183
+ 2.2
1184
+ washing
1185
+ dishes
1186
+ AO
1187
+ 47.8
1188
+ 24.5
1189
+ 11.5
1190
+ 6.0
1191
+ 3.7
1192
+ 2.8
1193
+ AV
1194
+ 21.3
1195
+ 11.5
1196
+ 6.1
1197
+ 3.6
1198
+ 2.8
1199
+ 2.3
1200
+ AV*
1201
+ 14.2
1202
+ 7.3
1203
+ 4.3
1204
+ 2.2
1205
+ 2.6
1206
+ 2.3
1207
+ train
1208
+ AO
1209
+ 51.3
1210
+ 18.6
1211
+ 7.0
1212
+ 4.0
1213
+ 2.9
1214
+ 2.5
1215
+ AV
1216
+ 23.1
1217
+ 10.1
1218
+ 4.7
1219
+ 3.0
1220
+ 2.4
1221
+ 2.2
1222
+ AV*
1223
+ 14.5
1224
+ 6.2
1225
+ 3.5
1226
+ 2.6
1227
+ 2.3
1228
+ 2.2
1229
+ Comparison with other methods.
1230
+ We compare our
1231
+ method with results provided by Ma et al. [29] and
1232
+ Petridis et al. [36] on the LRS2 test set. Table 11 shows that
1233
+ our audio-visual model achieves lower WER in the pres-
1234
+ ence of babble noise, reaching WER of 9.7% at -5 dB SNR
1235
+ against 16.3% for Ma et al. [29].
1236
+ Table 11: Comparison with Ma et al. [29]. LRS2 test WER
1237
+ (%) as a function of SNR (dB) using babble noise.
1238
+ Method
1239
+ Mode
1240
+ SNR (dB)
1241
+ -5
1242
+ 0
1243
+ 5
1244
+ 10
1245
+ 15
1246
+ 20
1247
+ Ma et al. [29]
1248
+ VO
1249
+ 37.9
1250
+ 37.9
1251
+ 37.9
1252
+ 37.9
1253
+ 37.9
1254
+ 37.9
1255
+ AO*
1256
+ 28.8
1257
+ 9.8
1258
+ 7
1259
+ 5.2
1260
+ 4.5
1261
+ 4.2
1262
+ AV*
1263
+ 16.3
1264
+ 7.5
1265
+ 6.1
1266
+ 4.7
1267
+ 4.4
1268
+ 4.2
1269
+ Ours
1270
+ VO
1271
+ 32.6
1272
+ 32.6
1273
+ 32.6
1274
+ 32.6
1275
+ 32.6
1276
+ 32.6
1277
+ AO
1278
+ 70.5
1279
+ 27
1280
+ 8.6
1281
+ 4.7
1282
+ 3.4
1283
+ 3.1
1284
+ AV
1285
+ 25
1286
+ 11.2
1287
+ 5.1
1288
+ 3.2
1289
+ 2.8
1290
+ 2.6
1291
+ AV*
1292
+ 9.7
1293
+ 5
1294
+ 3.4
1295
+ 2.9
1296
+ 2.8
1297
+ 2.6
1298
+ Table 12: Comparison with Petridis et al. [36]. LRS2 test
1299
+ WER (%) as a function of SNR (dB) using white noise.
1300
+ Method
1301
+ Mode
1302
+ SNR (dB)
1303
+ -5
1304
+ 0
1305
+ 5
1306
+ 10
1307
+ 15
1308
+ 20
1309
+ Petridis et al. [36]
1310
+ VO
1311
+ 63.5
1312
+ 63.5
1313
+ 63.5
1314
+ 63.5
1315
+ 63.5
1316
+ 63.5
1317
+ AO*
1318
+ 85.0
1319
+ 45.4
1320
+ 19.6
1321
+ 11.7
1322
+ 9.4
1323
+ 8.4
1324
+ AV*
1325
+ 55.0
1326
+ 26.1
1327
+ 13.2
1328
+ 9.4
1329
+ 8.0
1330
+ 7.3
1331
+ Ours
1332
+ VO
1333
+ 32.6
1334
+ 32.6
1335
+ 32.6
1336
+ 32.6
1337
+ 32.6
1338
+ 32.6
1339
+ AO
1340
+ 73.1
1341
+ 32.3
1342
+ 14.3
1343
+ 7.2
1344
+ 4.4
1345
+ 3.5
1346
+ AV
1347
+ 22.5
1348
+ 11.5
1349
+ 6.2
1350
+ 4.1
1351
+ 3.2
1352
+ 2.9
1353
+ AV*
1354
+ 14.4
1355
+ 8.0
1356
+ 5.1
1357
+ 3.9
1358
+ 3.1
1359
+ 2.9
1360
+ 4. Conclusion
1361
+ In this paper, we proposed to improve the noise robust-
1362
+ ness of the recently proposed Efficient Conformer CTC-
1363
+ based architecture by processing both audio and visual
1364
+ modalities. We showed that incorporating multi-scale CTC
1365
+ losses between blocks could help to improve recognition
1366
+ performance, reaching comparable results to most recent
1367
+ autoregressive lip reading methods. We also proposed patch
1368
+ attention, a simpler and more efficient attention mechanism
1369
+ to replace grouped attention in the first audio encoder stage.
1370
+ Our Audio-Visual Efficient Conformer achieves state-of-
1371
+ the-art performance of 2.3% and 1.8% on the LRS2 and
1372
+ LRS3 test sets.
1373
+ In the future, we would like to explore
1374
+ other techniques to further improve the noise robustness
1375
+ of our model and close the gap between recent lip reading
1376
+ methods. This includes adding various audio noises during
1377
+ training and using cross-modal distillation with pre-trained
1378
+ models. We also wish to reduce the visual front-end net-
1379
+ work complexity without arming recognition performance
1380
+ and experiment with the RNN-Transducer learning objec-
1381
+ tive for streaming applications.
1382
+ Acknowledgments
1383
+ This work was partly supported by The Alexander von
1384
+ Humboldt Foundation (AvH).
1385
+
1386
+ References
1387
+ [1] Triantafyllos Afouras, Joon Son Chung, Andrew Senior,
1388
+ Oriol Vinyals, and Andrew Zisserman. Deep audio-visual
1389
+ speech recognition. IEEE transactions on pattern analysis
1390
+ and machine intelligence, 2018.
1391
+ [2] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisser-
1392
+ man. Lrs3-ted: a large-scale dataset for visual speech recog-
1393
+ nition. arXiv preprint arXiv:1809.00496, 2018.
1394
+ [3] Triantafyllos Afouras, Joon Son Chung, and Andrew Zisser-
1395
+ man. Asr is all you need: Cross-modal distillation for lip
1396
+ reading. In ICASSP 2020-2020 IEEE International Confer-
1397
+ ence on Acoustics, Speech and Signal Processing (ICASSP),
1398
+ pages 2143–2147. IEEE, 2020.
1399
+ [4] Yannis M Assael, Brendan Shillingford, Shimon Whiteson,
1400
+ and Nando De Freitas. Lipnet: End-to-end sentence-level
1401
+ lipreading. arXiv preprint arXiv:1611.01599, 2016.
1402
+ [5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Sub-
1403
+ biah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakan-
1404
+ tan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Lan-
1405
+ guage models are few-shot learners. Advances in neural in-
1406
+ formation processing systems, 33:1877–1901, 2020.
1407
+ [6] Adrian Bulat and Georgios Tzimiropoulos. How far are we
1408
+ from solving the 2d & 3d face alignment problem?(and a
1409
+ dataset of 230,000 3d facial landmarks).
1410
+ In Proceedings
1411
+ of the IEEE International Conference on Computer Vision,
1412
+ pages 1021–1030, 2017.
1413
+ [7] Maxime Burchi and Valentin Vielzeuf. Efficient conformer:
1414
+ Progressive downsampling and grouped attention for auto-
1415
+ matic speech recognition. In 2021 IEEE Automatic Speech
1416
+ Recognition and Understanding Workshop (ASRU), pages 8–
1417
+ 15. IEEE, 2021.
1418
+ [8] Joon Son Chung and Andrew Zisserman. Lip reading in the
1419
+ wild. In Asian conference on computer vision, pages 87–103.
1420
+ Springer, 2016.
1421
+ [9] Joon Son Chung and AP Zisserman. Lip reading in profile.
1422
+ 2017.
1423
+ [10] Ronan Collobert, Christian Puhrsch, and Gabriel Synnaeve.
1424
+ Wav2letter: an end-to-end convnet-based speech recognition
1425
+ system. arXiv preprint arXiv:1609.03193, 2016.
1426
+ [11] Jiankang Deng, Jia Guo, Evangelos Ververas, Irene Kot-
1427
+ sia, and Stefanos Zafeiriou. Retinaface: Single-shot multi-
1428
+ level face localisation in the wild.
1429
+ In Proceedings of
1430
+ the IEEE/CVF conference on computer vision and pattern
1431
+ recognition, pages 5203–5212, 2020.
1432
+ [12] Linhao Dong, Shuang Xu, and Bo Xu. Speech-transformer:
1433
+ a no-recurrence sequence-to-sequence model for speech
1434
+ recognition.
1435
+ In 2018 IEEE International Conference on
1436
+ Acoustics, Speech and Signal Processing (ICASSP), pages
1437
+ 5884–5888. IEEE, 2018.
1438
+ [13] Haoqi Fan, Bo Xiong, Karttikeya Mangalam, Yanghao Li,
1439
+ Zhicheng Yan, Jitendra Malik, and Christoph Feichten-
1440
+ hofer.
1441
+ Multiscale vision transformers.
1442
+ In Proceedings of
1443
+ the IEEE/CVF International Conference on Computer Vi-
1444
+ sion, pages 6824–6835, 2021.
1445
+ [14] Alex Graves, Santiago Fern´andez, Faustino Gomez, and
1446
+ J¨urgen Schmidhuber. Connectionist temporal classification:
1447
+ labelling unsegmented sequence data with recurrent neural
1448
+ networks. In ICML, pages 369–376, 2006.
1449
+ [15] Alex Graves, Abdel-rahman Mohamed, and Geoffrey Hin-
1450
+ ton. Speech recognition with deep recurrent neural networks.
1451
+ In 2013 IEEE international conference on acoustics, speech
1452
+ and signal processing, pages 6645–6649. Ieee, 2013.
1453
+ [16] Anmol Gulati, James Qin, Chung-Cheng Chiu, Niki Par-
1454
+ mar, Yu Zhang, Jiahui Yu, Wei Han, Shibo Wang, Zheng-
1455
+ dong Zhang, Yonghui Wu, et al. Conformer: Convolution-
1456
+ augmented transformer for speech recognition.
1457
+ arXiv
1458
+ preprint arXiv:2005.08100, 2020.
1459
+ [17] Pengcheng Guo, Florian Boyer, Xuankai Chang, Tomoki
1460
+ Hayashi, Yosuke Higuchi, Hirofumi Inaguma, Naoyuki
1461
+ Kamo, Chenda Li, Daniel Garcia-Romero, Jiatong Shi, et al.
1462
+ Recent developments on espnet toolkit boosted by con-
1463
+ former. In ICASSP 2021-2021 IEEE International Confer-
1464
+ ence on Acoustics, Speech and Signal Processing (ICASSP),
1465
+ pages 5874–5878. IEEE, 2021.
1466
+ [18] Wei Han, Zhengdong Zhang, Yu Zhang, Jiahui Yu, Chung-
1467
+ Cheng Chiu, James Qin, Anmol Gulati, Ruoming Pang, and
1468
+ Yonghui Wu. Contextnet: Improving convolutional neural
1469
+ networks for automatic speech recognition with global con-
1470
+ text. arXiv preprint arXiv:2005.03191, 2020.
1471
+ [19] Awni Hannun, Carl Case, Jared Casper, Bryan Catanzaro,
1472
+ Greg Diamos, Erich Elsen, Ryan Prenger, Sanjeev Satheesh,
1473
+ Shubho Sengupta, Adam Coates, et al.
1474
+ Deep speech:
1475
+ Scaling up end-to-end speech recognition.
1476
+ arXiv preprint
1477
+ arXiv:1412.5567, 2014.
1478
+ [20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.
1479
+ Deep residual learning for image recognition. In Proceed-
1480
+ ings of the IEEE conference on computer vision and pattern
1481
+ recognition, pages 770–778, 2016.
1482
+ [21] Kenneth Heafield.
1483
+ Kenlm: Faster and smaller language
1484
+ model queries. In Proceedings of the sixth workshop on sta-
1485
+ tistical machine translation, pages 187–197, 2011.
1486
+ [22] Pavel Izmailov, Dmitrii Podoprikhin, Timur Garipov, Dmitry
1487
+ Vetrov, and Andrew Gordon Wilson.
1488
+ Averaging weights
1489
+ leads to wider optima and better generalization.
1490
+ arXiv
1491
+ preprint arXiv:1803.05407, 2018.
1492
+ [23] Shigeki Karita, Nanxin Chen, Tomoki Hayashi, Takaaki
1493
+ Hori, Hirofumi Inaguma, Ziyan Jiang, Masao Someki,
1494
+ Nelson Enrique Yalta Soplin, Ryuichi Yamamoto, Xiaofei
1495
+ Wang, et al. A comparative study on transformer vs rnn in
1496
+ speech applications. In 2019 IEEE Automatic Speech Recog-
1497
+ nition and Understanding Workshop (ASRU), pages 449–
1498
+ 456. IEEE, 2019.
1499
+ [24] Diederik P Kingma and Jimmy Ba. Adam: A method for
1500
+ stochastic optimization.
1501
+ arXiv preprint arXiv:1412.6980,
1502
+ 2014.
1503
+ [25] Samuel Kriman, Stanislav Beliaev, Boris Ginsburg, Joce-
1504
+ lyn Huang, Oleksii Kuchaiev, Vitaly Lavrukhin, Ryan Leary,
1505
+ Jason Li, and Yang Zhang.
1506
+ Quartznet: Deep automatic
1507
+ speech recognition with 1d time-channel separable convolu-
1508
+ tions. In ICASSP 2020-2020 IEEE International Conference
1509
+ on Acoustics, Speech and Signal Processing (ICASSP), pages
1510
+ 6124–6128. IEEE, 2020.
1511
+ [26] Taku Kudo and John Richardson. Sentencepiece: A sim-
1512
+ ple and language independent subword tokenizer and detok-
1513
+
1514
+ enizer for neural text processing. In EMNLP, pages 66–71,
1515
+ 2018.
1516
+ [27] Jaesong Lee and Shinji Watanabe. Intermediate loss regular-
1517
+ ization for ctc-based speech recognition. In ICASSP 2021-
1518
+ 2021 IEEE International Conference on Acoustics, Speech
1519
+ and Signal Processing (ICASSP), pages 6224–6228. IEEE,
1520
+ 2021.
1521
+ [28] Jason Li, Vitaly Lavrukhin, Boris Ginsburg, Ryan Leary,
1522
+ Oleksii Kuchaiev, Jonathan M Cohen, Huyen Nguyen, and
1523
+ Ravi Teja Gadde. Jasper: An end-to-end convolutional neu-
1524
+ ral acoustic model. arXiv preprint arXiv:1904.03288, 2019.
1525
+ [29] Pingchuan Ma, Stavros Petridis, and Maja Pantic.
1526
+ End-
1527
+ to-end audio-visual speech recognition with conformers.
1528
+ In ICASSP 2021-2021 IEEE International Conference on
1529
+ Acoustics, Speech and Signal Processing (ICASSP), pages
1530
+ 7613–7617. IEEE, 2021.
1531
+ [30] Pingchuan Ma, Stavros Petridis, and Maja Pantic.
1532
+ Visual
1533
+ speech recognition for multiple languages in the wild. arXiv
1534
+ preprint arXiv:2202.13084, 2022.
1535
+ [31] Somshubra Majumdar, Jagadeesh Balam, Oleksii Hrinchuk,
1536
+ Vitaly Lavrukhin, Vahid Noroozi, and Boris Ginsburg. Cit-
1537
+ rinet: Closing the gap between non-autoregressive and au-
1538
+ toregressive end-to-end models for automatic speech recog-
1539
+ nition. arXiv preprint arXiv:2104.01721, 2021.
1540
+ [32] Takaki Makino,
1541
+ Hank Liao,
1542
+ Yannis Assael,
1543
+ Brendan
1544
+ Shillingford, Basilio Garcia, Otavio Braga, and Olivier Sio-
1545
+ han. Recurrent neural network transducer for audio-visual
1546
+ speech recognition. In 2019 IEEE automatic speech recog-
1547
+ nition and understanding workshop (ASRU), pages 905–912.
1548
+ IEEE, 2019.
1549
+ [33] Jumon Nozaki and Tatsuya Komatsu.
1550
+ Relaxing the con-
1551
+ ditional independence assumption of ctc-based asr by con-
1552
+ ditioning on intermediate predictions.
1553
+ arXiv preprint
1554
+ arXiv:2104.02724, 2021.
1555
+ [34] Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev
1556
+ Khudanpur. Librispeech: an asr corpus based on public do-
1557
+ main audio books. In 2015 IEEE international conference
1558
+ on acoustics, speech and signal processing (ICASSP), pages
1559
+ 5206–5210. IEEE, 2015.
1560
+ [35] Daniel S Park, Yu Zhang, Chung-Cheng Chiu, Youzheng
1561
+ Chen, Bo Li, William Chan, Quoc V Le, and Yonghui Wu.
1562
+ Specaugment on large scale datasets.
1563
+ In ICASSP, pages
1564
+ 6879–6883, 2020.
1565
+ [36] Stavros Petridis, Themos Stafylakis, Pingchuan Ma, Geor-
1566
+ gios Tzimiropoulos, and Maja Pantic. Audio-visual speech
1567
+ recognition with a hybrid ctc/attention architecture. In 2018
1568
+ IEEE Spoken Language Technology Workshop (SLT), pages
1569
+ 513–520. IEEE, 2018.
1570
+ [37] KR Prajwal, Triantafyllos Afouras, and Andrew Zisserman.
1571
+ Sub-word level lip reading with visual attention. In Proceed-
1572
+ ings of the IEEE/CVF Conference on Computer Vision and
1573
+ Pattern Recognition, pages 5162–5172, 2022.
1574
+ [38] Prajit Ramachandran,
1575
+ Barret Zoph,
1576
+ and Quoc V Le.
1577
+ Searching
1578
+ for
1579
+ activation
1580
+ functions.
1581
+ arXiv
1582
+ preprint
1583
+ arXiv:1710.05941, 2017.
1584
+ [39] Dmitriy Serdyuk, Otavio Braga, and Olivier Siohan. Audio-
1585
+ visual speech recognition is worth 32x32x8 voxels.
1586
+ In
1587
+ 2021 IEEE Automatic Speech Recognition and Understand-
1588
+ ing Workshop (ASRU), pages 796–802. IEEE, 2021.
1589
+ [40] Brendan Shillingford, Yannis Assael, Matthew W Hoff-
1590
+ man, Thomas Paine, C´ıan Hughes, Utsav Prabhu, Hank
1591
+ Liao, Hasim Sak, Kanishka Rao, Lorrayne Bennett, et al.
1592
+ Large-scale visual speech recognition.
1593
+ arXiv preprint
1594
+ arXiv:1807.05162, 2018.
1595
+ [41] Joon Son Chung, Andrew Senior, Oriol Vinyals, and Andrew
1596
+ Zisserman. Lip reading sentences in the wild. In Proceed-
1597
+ ings of the IEEE conference on computer vision and pattern
1598
+ recognition, pages 6447–6456, 2017.
1599
+ [42] George Sterpu, Christian Saam, and Naomi Harte. Attention-
1600
+ based audio-visual fusion for robust automatic speech recog-
1601
+ nition. In Proceedings of the 20th ACM International Con-
1602
+ ference on Multimodal Interaction, pages 111–115, 2018.
1603
+ [43] Andrew Varga and Herman JM Steeneken. Assessment for
1604
+ automatic speech recognition: Ii. noisex-92: A database and
1605
+ an experiment to study the effect of additive noise on speech
1606
+ recognition systems.
1607
+ Speech communication, 12(3):247–
1608
+ 251, 1993.
1609
+ [44] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszko-
1610
+ reit, Llion Jones, Aidan N Gomez, Łukasz Kaiser, and Illia
1611
+ Polosukhin. Attention is all you need. Advances in neural
1612
+ information processing systems, 30, 2017.
1613
+ [45] Bo Xu, Cheng Lu, Yandong Guo, and Jacob Wang. Discrim-
1614
+ inative multi-modality speech recognition. In Proceedings of
1615
+ the IEEE/CVF Conference on Computer Vision and Pattern
1616
+ Recognition, pages 14433–14442, 2020.
1617
+ [46] Jianwei Yu, Shi-Xiong Zhang, Jian Wu, Shahram Ghorbani,
1618
+ Bo Wu, Shiyin Kang, Shansong Liu, Xunying Liu, Helen
1619
+ Meng, and Dong Yu. Audio-visual recognition of overlapped
1620
+ speech for the lrs2 dataset. In ICASSP 2020-2020 IEEE In-
1621
+ ternational Conference on Acoustics, Speech and Signal Pro-
1622
+ cessing (ICASSP), pages 6984–6988. IEEE, 2020.
1623
+ [47] Qian Zhang, Han Lu, Hasim Sak, Anshuman Tripathi, Erik
1624
+ McDermott, Stephen Koo, and Shankar Kumar. Transformer
1625
+ transducer: A streamable speech recognition model with
1626
+ transformer encoders and rnn-t loss. In ICASSP 2020-2020
1627
+ IEEE International Conference on Acoustics, Speech and
1628
+ Signal Processing (ICASSP), pages 7829–7833. IEEE, 2020.
1629
+ [48] Xingxuan Zhang, Feng Cheng, and Shilin Wang.
1630
+ Spatio-
1631
+ temporal fusion based convolutional sequence learning for
1632
+ lip reading. In Proceedings of the IEEE/CVF International
1633
+ Conference on Computer Vision, pages 713–722, 2019.
1634
+ [49] Ya Zhao, Rui Xu, Xinchao Wang, Peng Hou, Haihong Tang,
1635
+ and Mingli Song. Hearing lips: Improving lip reading by dis-
1636
+ tilling speech recognizers. In Proceedings of the AAAI Con-
1637
+ ference on Artificial Intelligence, volume 34, pages 6917–
1638
+ 6924, 2020.
1639
+
4tAzT4oBgHgl3EQffvxD/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
7tAyT4oBgHgl3EQf2_kh/content/2301.00759v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7cb062df0dd7a524b9318b32f476a4edc0e8ac2df34ee081a8feff2f7af3da3
3
+ size 3251879
7tAyT4oBgHgl3EQf2_kh/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d14c9a80467d6115ec1b86d4d670b702f47deb1e129cb11c4a2302ae2d626e54
3
+ size 4587565
7tAyT4oBgHgl3EQf2_kh/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d785fa621d073574f78a94ab19df8174a3ae3f9fa8cd65430b96dd7fdede3e0f
3
+ size 171099
7tE1T4oBgHgl3EQfTwM_/content/tmp_files/2301.03081v1.pdf.txt ADDED
@@ -0,0 +1,1214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 1
3
+
4
+ Abstract— Objective: The objective of this study is to develop a
5
+ deep-learning based detection and diagnosis technique for carotid
6
+ atherosclerosis using a portable freehand 3D ultrasound (US)
7
+ imaging system. Methods: A total of 127 3D carotid artery datasets
8
+ were acquired using a portable 3D US imaging system. A U-Net
9
+ segmentation network was firstly applied to extract the carotid
10
+ artery on 2D transverse frame, then a novel 3D reconstruction
11
+ algorithm using fast dot projection (FDP) method with position
12
+ regularization was proposed to reconstruct the carotid artery
13
+ volume. Furthermore, a convolutional neural network was used
14
+ to classify the healthy case and diseased case qualitatively. 3D
15
+ volume analysis including longitudinal reprojection algorithm and
16
+ stenosis grade measurement algorithm was developed to obtain the
17
+ clinical metrics quantitatively. Results: The proposed system
18
+ achieved sensitivity of 0.714, specificity of 0.851 and accuracy of
19
+ 0.803 respectively in diagnosis of carotid atherosclerosis. The
20
+ automatically
21
+ measured
22
+ stenosis
23
+ grade
24
+ illustrated
25
+ good
26
+ correlation (r=0.762) with the experienced expert measurement.
27
+ Conclusion: the developed technique based on 3D US imaging can
28
+ be applied to the automatic diagnosis of carotid atherosclerosis.
29
+ Significance: The proposed deep-learning based technique was
30
+ specially designed for a portable 3D freehand US system, which
31
+ can
32
+ provide
33
+ carotid
34
+ atherosclerosis
35
+ examination
36
+ more
37
+ conveniently and decrease the dependence on clinician’s
38
+ experience.
39
+ Index Terms—3D ultrasound imaging, automatic carotid
40
+ atherosclerosis
41
+ diagnosis,
42
+ carotid
43
+ artery
44
+ segmentation,
45
+ reconstruction with regularization.
46
+ I. INTRODUCTION
47
+ AROTID atherosclerosis is one of the major causes of
48
+ stroke which is the world’s second leading cause of death
49
+ [1]. The prevalence rate of carotid atherosclerosis is 36.2% in
50
+ Chinese people over 40 years old [2]. The pathological features
51
+ of carotid atherosclerosis are increase of intima-media
52
+ thickness and appearance of atherosclerosis plaque. Magnetic
53
+ resonance imaging (MRI), computed tomography angiography
54
+
55
+ This work was sponsored by Natural Science Foundation of China (NSFC)
56
+ under Grant No.12074258. (Jiawen Li and Yunqian Huang are co-first authors.)
57
+ (Corresponding authors: Rui Zheng, Man Chen.)
58
+ Jiawen Li, Sheng Song, Duo Xu and Haibin Zhang are with School of
59
+ Information Science and Technology, ShanghaiTech University, Shanghai,
60
+ China.
61
+ Hongbo Chen is with School of Information Science and Technology,
62
+ ShanghaiTech University, Shanghai 201210, China, also with Shanghai
63
+ Advanced Research Institute, Chinese Academy of Sciences, Shanghai 200050,
64
+ (CTA) and digital subtraction angiography (DSA) are several
65
+ commonly used methods for visualizing and characterizing
66
+ carotid artery features [3]–[5]. However, these methods still
67
+ have some limitations during application due to invasiveness,
68
+ ionizing radiation, heavy equipment etc.; and the approaches
69
+ are very time-consuming and expensive which can’t satisfy the
70
+ need of large scale of examinations in different environments
71
+ especially for community and countryside areas. 2D Ultrasound
72
+ (US), as a non-invasive and low-cost method, is widely used in
73
+ the examination of carotid plaque. However, there are several
74
+ disadvantages of traditional 2D US in the current ultrasound
75
+ examination of carotid atherosclerosis. (1) It is mainly carried
76
+ out by experienced sonographers in hospital, and becomes a
77
+ huge burden for health care system. (2) Routine health check is
78
+ difficult for carotid atherosclerosis patients especially in rural
79
+ or undeveloped area. (3) Routine ultrasound examination is a
80
+ tedious,
81
+ laborious,
82
+ experience-dependent
83
+ work
84
+ for
85
+ sonographers. (4) Clinically, some metrics such as intima-
86
+ media thickness (IMT), plaque thickness, plaque area, usually
87
+ assess the severity of the carotid atherosclerosis in 2D US
88
+ images, which is prone to variability and lack of 3D
89
+ morphology of carotid plaque [6], [7]. 3D US carotid artery
90
+ imaging approaches mainly include mechanical scanning and
91
+ tracked freehand scanning using various sensors e.g., magnetic
92
+ tracked senor, optical tracked sensor, etc., [8] which can
93
+ provide plaque volume estimation, 3D morphology of plaque
94
+ and other 3D metrics for carotid atherosclerosis diagnosis. The
95
+ 3D techniques are found to be more accurate to evaluate the
96
+ progress of carotid atherosclerosis [9]–[12]. Therefore, it is of
97
+ great importance to develop a portable, reliable and cost-
98
+ effective automatic ultrasound diagnostic technique for carotid
99
+ atherosclerosis screening.
100
+ The automatic diagnosis of carotid atherosclerosis focuses on
101
+ finding the biomarkers on the ultrasound images, for example
102
+ China, and also with University of Chinese Academy of Sciences, Beijing
103
+ 100049, China.
104
+ Yunqian Huang and Junni Shi are with Tongren Hospital, Shanghai Jiao
105
+ Tong University School of Medicine, Shanghai, China.
106
+ Man Chen is with Tongren Hospital, Shanghai Jiao Tong University School
107
+ of Medicine, Shanghai, China (e-mail: [email protected])
108
+ Dr. Rui Zheng is with School of Information Science and Technology,
109
+ Shanghai Engineering Research Center of Energy Efficient and Custom AI IC,
110
+ ShanghaiTech University, Shanghai, China (phone: 86 21-2068 4452, e-mail:
111
112
+ Automatic Diagnosis of Carotid Atherosclerosis
113
+ Using a Portable Freehand 3D Ultrasound
114
+ Imaging System
115
+ Jiawen Li, Yunqian Huang, Sheng Song, Hongbo Chen, Junni Shi, Duo Xu, Haibin Zhang, Man
116
+ Chen*, Rui Zheng*
117
+ C
118
+
119
+
120
+ 2
121
+ vessel wall area, vessel wall volume or total plaque volume
122
+ [13]–[15]. These biomarkers are all bounded by the two
123
+ boundaries of vessels, the media-adventitia boundary (MAB)
124
+ and the lumen-intima boundary (LIB), thus identifying these
125
+ two boundaries is an important issue during the carotid
126
+ atherosclerosis diagnosis. In recent years, deep learning
127
+ methods has achieved excellent performance in medical image
128
+ processing. Jiang et al. [16]–[18]designed a novel adaptive
129
+ triple loss for carotid artery segmentation. To utilize 3D
130
+ information in 3D volume of carotid artery, Jiang et al. [19]
131
+ introduced a fusion module to the U-Net segmentation network
132
+ and yielded promising performance on carotid segmentation
133
+ task. Zhou et al.[20] proposed a deep learning-based MAB and
134
+ LIB segmentation method, and a dynamic convolutional neural
135
+ network (CNN) were applied to image patches in every slice of
136
+ the 3D US images. LIB segmentation was performed by U-Net
137
+ based on the masks of the MAB since the LIB is inside the
138
+ MAB. The method achieved high accuracy but initial anchor
139
+ points were still manually placed. Ruijter et al. [21] created a
140
+ generalized method to segment LIB using CNN. Several U-
141
+ Nets were compared and the experiments showed that the
142
+ combination of various vessels such as radial, ulnar artery, or
143
+ cephalic vein improved the segmentation performance of
144
+ carotid artery. After segmentation, a 3D-geometry can be
145
+ obtained for further therapy. Van Knippenberg et al [22]
146
+ proposed an unsupervised learning method to solve the lack of
147
+ data in carotid segmentation task. Azzopardi et al. [23]
148
+ designed a novel geometrically constrained loss functions and
149
+ received improved segmentation results. Zhou et al.[24]
150
+ proposed a voxel based 3D segmentation neural network to
151
+ segment the MAB and LIB in 3D volume directly. Although the
152
+ proposed algorithm achieved high accuracy with fast process,
153
+ user’s interaction is yet required to identify ROI in the first and
154
+ last slice of the volume.
155
+ After region of interest (ROI) i.e., carotid artery is identified,
156
+ further analysis needs to be performed to get significant clinical
157
+ information for carotid atherosclerosis diagnosis such as the
158
+ existence of plaque, carotid stenosis grade, type of the plaque,
159
+ etc. Zhou et al.[25],[26] applied 8 different backbone and
160
+ UNet++ segmentation algorithm trained on 2D longitudinal US
161
+ images to segment the plaque region and calculate the total
162
+ plaque area. Xia et al. [27] employed a CNN to categorize
163
+ segmented carotid images into normal cases, thickening vessel
164
+ wall cases and plaque cases. Ma et al.[28] proposed a multilevel
165
+ strip pooling-based convolutional neural network to investigate
166
+ the echogenicity of plaque which was found to be closely
167
+ correlated with the risk of stroke. Shen et al. [29] proposed a
168
+ multi task learning method, the authors combined ultrasound
169
+ reports and plaque type label to train a CNN to classify four
170
+ different plaque type. Zhao et al. [30] utilized a novel vessel
171
+ wall thickness mapping algorithm to evaluate the therapeutical
172
+ performance on carotid atherosclerosis. Zhou et al. [31] utilized
173
+ the unsupervised pretrained parameters of U-Net to train a
174
+ plaque segmentation network with a small 3D carotid artery
175
+ ultrasound dataset. Saba et al. [32] used a deep learning based
176
+ method to measure the carotid stenosis, three deep learning
177
+ based systems were evaluated on 407 US dataset, and achieved
178
+ AUC of 0.90, 0.94 and 0.86 on the longitudinal US images
179
+ respectively. Biswas et al. [33] proposed a two-stage artificial
180
+ intelligence model for jointly measurement of atherosclerotic
181
+ wall thickness and plaque burden in longitudinal US images.
182
+ The results showed that the proposed method achieved the
183
+ lowest error compared to previous method.
184
+ The current 3D carotid imaging device was mainly based
185
+ on mechanical system and hard to transport which was almost
186
+ impossible to apply in community or rural area, therefore the
187
+ portable freehand 3D ultrasound imaging system was required
188
+ which can be easily applied for various scenarios. However, for
189
+ the freehand 3D ultrasound reconstruction, the requested small
190
+ voxel size and various noise would lead to reconstruction
191
+ artifacts[34], [35]. On the other hand, the clinicians in different
192
+ scenarios were usually inexperienced so that the diagnosis
193
+ results might be inaccurate and hard to reproduce compared
194
+ with sonographers in clinical ultrasound department. In this
195
+ paper, we developed a new detection and classification
196
+ technique based on deep-learning algorithms for carotid
197
+ atherosclerosis diagnosis which can be employed to a portable
198
+ freehand 3D US imaging system for fast screening. Compared
199
+ to other 3D ultrasound carotid artery imaging methods mainly
200
+ focusing on carotid vessel wall segmentation [18], [20], [21],
201
+ [24], the proposed method aimed at exploring an automatic and
202
+ experience-independent technique and framework for fast
203
+ carotid arteriosclerosis diagnosis.
204
+ The main contributions are outlined as follows. Firstly, a
205
+ portable freehand 3D US carotid imaging and diagnosis
206
+ framework including deep-learning based segmentation, 3D
207
+ reconstruction and automatic volume analysis was developed
208
+ for fast carotid atherosclerosis diagnosis. Secondly, a novel
209
+ position regularization algorithm was designed to reduce the
210
+ reconstruction error caused by freehand scan. Lastly, post
211
+ analysis including automatic reprojection and stenosis
212
+ measurement from 3D volume data provided visible qualitative
213
+ results and quantitative results for atherosclerosis diagnosis.
214
+ II. METHODS
215
+ Fig. 1 showed the overview of data processing procedure
216
+ including transverse image segmentation, 3D volume
217
+ reconstruction, detection of carotid atherosclerosis and 3D
218
+ carotid volume analysis.
219
+ A. MAB and LIB Segmentation
220
+ Three consecutive frames were concatenated in channel
221
+ dimension which is proved to be useful to improve the
222
+ segmentation accuracy [36].
223
+ Since the adjacent frames contained lots of redundant
224
+ information, the pseudo labels were generated using pseudo-
225
+ labeling method to reduce the work load [37]. One of every 5
226
+ neighbor frames were selected to be manually labeled by
227
+ experienced sonographers and the other four frames were
228
+ inferred by the network which was trained using the labeled
229
+ frames. All generated pseudo labels were checked visually, the
230
+ labels would be corrected if the segmentation is incorrect.
231
+ The intensity of the image was normalized to [0,1] as follows:
232
+ 𝐼 =
233
+ 𝐼 − 𝐼𝑚𝑖𝑛
234
+ 𝐼𝑚𝑎𝑥 − 𝐼𝑚𝑖𝑛
235
+
236
+ (1)
237
+
238
+
239
+ 3
240
+ where I represented the intensity of the image. Imax and Imin
241
+ represent the max and minimum value of the intensity in the US
242
+ image. All images and corresponding labels were resized to
243
+ 224*224 for segmentation network training.
244
+ U-Net was employed to segment the MAB and LIB in the
245
+ transverse US image sequence [38]. The architecture of the
246
+ network was illustrated in Fig. 2. The segmentation module
247
+ consisted of two symmetrical sub-module which were encoder
248
+ and decoder. The number of channels for each convolutional
249
+ layer were set to (64, 128, 256, 512, 512). Each convolutional
250
+ layer was followed by a batch normalization module and a
251
+ rectification linear unit (ReLU) module. The two modules were
252
+ connected using skip connection to exploit all resolution
253
+ features. The loss function of the segmentation module was the
254
+ combination of DSC loss and cross-entropy loss:
255
+ 𝐿𝑜𝑠𝑠 = 𝐿𝑜𝑠𝑠𝑑𝑖𝑐𝑒 + 𝐿𝑜𝑠𝑠𝑐𝑒
256
+ (2)
257
+ B. 3D Reconstruction with Regularization
258
+ After the MAB and LIB were identified in every slice of US
259
+ image sequence, the 3D carotid artery volume was
260
+ reconstructed using the Fast Dot Projection (FDP) method [39].
261
+ However, some disturbances caused by the low precision of the
262
+ magnetic sensor, inevitable hand shaking and breathing
263
+ movement during carotid swept, would lead to the
264
+ reconstruction errors and artifacts. The major problem was the
265
+ repeated acquisition at the same or very close positions, and it
266
+ caused large uncertainty at volume voxels and discontinuity in
267
+ the reconstructed volume [40]. To improve the image quality
268
+ and decrease the uncertainty of 3D reconstructed volume, a
269
+ total variation regularization [41] method was integrated with
270
+ FDP reconstruction algorithm.
271
+ (1) For all the position information obtained from 3DUS
272
+ device, it could be formulated as a set of rotation matrix 𝑅 and
273
+ a translation 𝑡. The tuple (𝑹, 𝒕) consisting of all 𝑅 and 𝑡 formed
274
+ the special Euclidean group 𝑆𝐸(3) which was the semi-direct
275
+ product of the rotation group 𝑆𝑂(3) and the translation group.
276
+ Therefore, the 𝑆𝐸(3) can be formulated as:
277
+ 𝑆𝐸(3) = {(𝑅 𝑡
278
+ 0 1) : 𝑅 ∈ 𝑆𝑂(3), 𝑡 ∈ ℝ3}
279
+ (3)
280
+
281
+ Fig. 2. The architecture of the segmentation module.
282
+
283
+ Fig. 1. The pipeline of the proposed system and corresponding algorithm. The top row demonstrated the process of the data acquisition, extraction of ROI and
284
+ 3D reconstruction. The bottom row represented the process of 3D carotid volume analysis. The original image sequence and corresponding position
285
+ information were firstly obtained by the 3D US device. U-Net segmentation algorithm and regularized Fast-Dot Projection algorithm was applied to extract
286
+ the ROI and 3D carotid volume. Then 3D carotid volume analysis included automatic stenosis grade measurement, longitudinal image reprojection and
287
+ healthy/diseased cases classification was conducted based on the reconstructed volume.
288
+
289
+
290
+ :Conv+Batchnorm+ReLU × 2
291
+ :Maxpooling
292
+ : Upsampling
293
+ :Concatenate
294
+ :1×1 ConvSegmented masks
295
+ Data collection
296
+ Pre-processed images
297
+ of LIB and MAB
298
+ Image
299
+ Segmentation
300
+ Fast-Dot Projection
301
+ 3D
302
+ Sequence
303
+ Reconstruction
304
+ Reconstruction
305
+ Algorithm
306
+ Total
307
+ Position
308
+ Variation
309
+ Information
310
+ Fast-Dot Projection
311
+ Regularization
312
+ 3D
313
+ Reconstruction
314
+ Algorithm
315
+ Reconstruction
316
+ Stenosis Grade
317
+ Stenosis rate
318
+ -30°
319
+ Measurement
320
+ Projection
321
+ Reprojection
322
+
323
+ Module
324
+ Projection
325
+ Exist Plaque
326
+ Diagnosis
327
+ or Not
328
+ +30°
329
+ Projection
330
+ 4
331
+ (2) The position signal obtained by the 3DUS system could
332
+ be considered as a set of entries which forms a vector 𝑷 =
333
+ (𝒑𝟏, 𝒑𝟐, … , 𝒑𝒌) ∈ 𝑴𝒌, where 𝑘 was the number of entries and
334
+ 𝑘 ∈ 𝑁, 𝑀𝑘 was a manifold and 𝑀 = 𝑆𝐸(3). Another signal
335
+ set X were considered to be found when the following formula
336
+ is minimal.
337
+ E(𝐱) = D(𝐱, 𝐩) + αR(𝐱), α > 0
338
+ (4)
339
+ Where 𝐷(𝑥, 𝑝) was the penalizing term to reduce the variation
340
+ between original signal P and resulted signal X. 𝑅(𝒙) was a
341
+ regularized term to penalize the position saltation in the signal
342
+ X.
343
+ (3) The deviation penalized term D(x,p) could be defined as:
344
+ 𝐷(𝐱, 𝐟) = ∑  
345
+ 𝑘
346
+ 𝑖=1
347
+ (ℎ ∘ 𝑑)(𝐱𝑖, 𝐩𝑖)
348
+ (5)
349
+ Where d(xi,pi) was the length of the geodesic which was
350
+ defined as a shortest path on 𝑀 between two pose p and q [41].
351
+ ℎ was defined as following:
352
+ ℎ(𝑠) = {𝑠2,
353
+ 𝑠 < 1/√2
354
+ √2𝑠 − 1/2, otherwise
355
+ (6)
356
+ Which was the Huber-Norm.
357
+ (4) For the regularized term, it could be defined as the
358
+ following:
359
+ 𝑅(𝐱) = ∑  
360
+ 𝑘−1
361
+ 𝑖=1
362
+ (ℎ ∘ 𝑑)(𝐱𝑖, 𝐱𝑖+1)
363
+ (7)
364
+ Where d(xi,xi+1) could be considered as the first-order forward
365
+ difference. The optimize problem in (4) could be solved using
366
+ a cyclic proximal point algorithm.
367
+ However, the original regularized algorithm couldn’t handle
368
+ the scanning positions with large backward movements. In this
369
+ case, the position array was not sequential according to the
370
+ coordinates, therefore pose re-rank algorithm was proposed.
371
+ Concretely, considering the centroid point of every frame from
372
+ the 2D segmented image sequence as 𝑪𝒌 = (𝒄𝟏, 𝒄𝟐, … , 𝒄𝒌) , the
373
+ PCA (principal components analysis) algorithm was conducted
374
+ in 𝐶𝑘 and a new matrix 𝐷𝑘 was obtained. The first column of
375
+ the matrix was the principal vector 𝑣𝑘, then a set of vectors 𝑐𝑑
376
+ could be acquired by projecting every centroid vector 𝑐𝑘 to 𝑣𝑘.
377
+ 𝒄𝒅 = 𝒄𝑘 − 𝒄𝒌 ⋅ 𝑣𝑘
378
+ 𝑣𝑘 ⋅ 𝑣𝑘
379
+ 𝑣𝑘
380
+ (8)
381
+ The new position sequence was finally obtained by sorting the
382
+ l2-norm of the 𝒄𝒅 set.
383
+ C. Carotid Atherosclerosis Diagnosis
384
+ The US scans including the segmented and reconstructed
385
+ volume were classified into healthy case and carotid
386
+ atherosclerosis case using a diagnosis network. As illustrated in
387
+ Fig. 3, there were two inputs for the diagnosis module. It had
388
+ been proved that the morphological information was helpful for
389
+ the network to classify the normal or abnormal (diseased) image
390
+ [42], therefore the mask of LIB and MAB extracted from each
391
+ slice of the reconstructed volume was used as one input. The
392
+ other input was the cropped ROI which was determined by the
393
+ max bounding rectangular of the mask, and in the cropped
394
+ image, the intensity in the region between LIB and MAB was
395
+ set to the original value, while the region inside lumen and
396
+ outside vessel wall were set to 0. For each input stream, it
397
+ consisted of three repeated blocks, each block consisted of two
398
+ consequent basic convolutional sub-block and a max pooling
399
+ layer. The basic convolutional sub-block was composed of a
400
+ convolutional layer, a batch normalization module and a linear
401
+ rectification unit. The number of channels for each repeated
402
+ block were set to (24, 48, 96). The fusion block concatenated
403
+ the high-level features of two streams and combined
404
+ information by introducing a basic convolutional sub-block.
405
+ After fusion block, the remaining layers were global average
406
+ pooling (GAP) layers and a fully connected layer to output the
407
+ diagnosis result. We used focal loss in the diagnosis module.
408
+ The scan would be diagnosed as a carotid atherosclerosis
409
+ case if the consecutive 5 transverse slices from the
410
+ reconstructed volume were classified as existing plaque.
411
+ D. 3D Carotid Volume Analysis
412
+ The clinical diagnostic parameters such as plaque thickness,
413
+ plaque length, stenosis grade, plaque area, plaque type, etc. can
414
+ be directly calculated from the reconstructed carotid artery
415
+ volume. To validate accuracy of the proposed method, the
416
+ longitudinal US images of carotid artery were obtained by
417
+ projecting the volume in different angles, and the stenosis grade
418
+ was calculated.
419
+ Stenosis rate was usually used to evaluate the stenosis grade.
420
+ For the slices which were diagnosed as atherosclerosis, stenosis
421
+ degree can be evaluated using the LIB and MAB masks. The
422
+ diameter stenosis rate was usually calculated to evaluate the
423
+ stenosis grade in clinic. We denote it as
424
+ 𝑆𝑑𝑖𝑎𝑚𝑒𝑡𝑒𝑟 =
425
+ 𝐿𝑤𝑎𝑙𝑙
426
+ 𝐿𝑤𝑎𝑙𝑙 + 𝐿𝑙𝑢𝑚𝑒𝑛
427
+ (9)
428
+ where 𝐿 represented the length of respective area. The metric
429
+ was ranged from 0 to 1, the greater number indicated the more
430
+ severe stenosis. The length of vessel wall 𝐿𝑤𝑎𝑙𝑙 and length of
431
+
432
+ Fig. 3. The architecture of the diagnosis module.
433
+
434
+ Fig. 4 The illustration of the approach to calculate the diameter stenosis.
435
+
436
+ 24X128X128
437
+ 128X128
438
+ 48X64X64
439
+ 96X32X32
440
+ 96X16X16
441
+ 96X16X16
442
+ Exist
443
+ plaque
444
+ 24X128X128
445
+ 48X64X64
446
+ or not
447
+ 96X32X32
448
+ :Conv+Batchnorm+ReLU
449
+ 96X16X16
450
+ :Maxpooling
451
+ 128X128
452
+ :Global average pooling
453
+ :Concatenate
454
+ + :Fully connectRadially Sampling
455
+ :lumen
456
+ :Vessel Wall
457
+ 5
458
+ lumen 𝐿𝑙𝑢𝑚𝑒𝑛 were illustrated as Fig. 4. The diameter stenosis
459
+ rate was the max 𝑆𝑑𝑖𝑎𝑚𝑒𝑡𝑒𝑟 which was calculated using all
460
+ points in MAB boundary.
461
+ The longitudinal carotid US images were usually used to
462
+ calculate plaque size and evaluate the morphology of plaque.
463
+ Since the carotid artery is curved volume, the direct projection
464
+ along a fixed axis may lead to missing of some structure.
465
+ Therefore, centroid points of carotid artery in transverse slices
466
+ were selected to determine the projection plane. Specifically, as
467
+ illustrated in Fig. 5, denoting the centroid point of i-th slice in
468
+ the volume as 𝐶𝑖, the line which was 𝜃 degree angled with y-
469
+ axis through the centroid point 𝐶𝑖 , was sampled as the i-th
470
+ column of projected longitude image. In our experiment, the
471
+ longitudinal US images were obtained by reprojecting the 3D
472
+ carotid volume at the angles of 0°, ±15° and ±30°.
473
+ III. EXPERIMENTAL SETUP
474
+ A. Data Acquisition and 3D Ultrasound Scan
475
+ A portable freehand 3D ultrasound imaging system was used
476
+ to obtain three-dimensional images of carotid artery as shown
477
+ in Fig. 6. The system consisted of a 2D linear probe (Clarius,
478
+ L738-K, Canada), an electromagnetic tracking system
479
+ (Polhemus, G4 unit, U.S.A) and a host laptop computer (Intel
480
+ i7-8700k CPU @ 3.70GHz, 32GB RAM) [43]. The 2D
481
+ transverse US images were acquired by the probe while the
482
+ corresponding position and orientation information were
483
+ captured by the magnetic sensor. The images and orientation
484
+ were acquired with a frame rate of 24 Hz.
485
+ During the acquisition, the subjects took the supine position
486
+ and was scanned as shown in Fig. 6 (d), and the probe swept
487
+ consistently along the long axis of common carotid artery from
488
+ the proximal end to the distal end at the speed of approximate
489
+ 10-15 seconds per scan. To reduce the reconstruction artifacts,
490
+ fallback along the swept direction and large movement normal
491
+ to the swept direction should be avoided. The inclusion criteria
492
+ were based on visible plaques which was identified by expert.
493
+ The stenosis grade larger than 70% was excluded to the dataset.
494
+ A total of 127 3D carotid artery scans from 83 subjects with
495
+ stenosis range from 0% to 70% were obtained from local
496
+ hospital, and all subjects consented to participate in this
497
+ experiment, which was approved by the local ethics committee.
498
+ The age of the subjects was ranged from 51 to 86 years old
499
+ (Male: 38, Female: 45).
500
+ Each scan contained 122-250 2D transverse US images with
501
+ resolution of 640*480. 7596 2D images from 40 scans were
502
+ manually delineated for MAB and LIB and labeled for healthy
503
+ or diseased (with plaque) by experienced sonographers for
504
+ further training of segmentation and classification network. All
505
+ Fig. 5. The illustration of the reprojection process. The centroid point was calculated by the segmented MAB mask for each slice in the volume. Then the line
506
+ segment crosses the centroid point was set to conduct the reprojection. The red and green line segment represent the different resample angle respectively.
507
+
508
+
509
+ (a) (b)
510
+
511
+
512
+ (c) (d)
513
+ Fig. 6. Ultrasound scan using the freehand imaging system. (a) a handheld
514
+ US scanner (left), a host laptop computer (middle) and an iPhone SE2
515
+ (right). (b) Tracking system including a hub (left) and a RF/USB module
516
+ (right). (c) The sensor (left) and the magnetic source (right). (d) Ultrasound
517
+ scan using the freehand imaging system.
518
+
519
+
520
+ Get Centroid
521
+ 0
522
+ Theindexofthecolumn
523
+ n
524
+ GetCentroid
525
+ i-1
526
+ Reprojection longitude image, reprojection angle=0o
527
+ i
528
+ i+1
529
+ Get Centroid
530
+ Theindexofthecolumn
531
+ n
532
+ The index of the slice
533
+ 6
534
+ 127 scans were labeled for healthy or diseased (with plaque) by
535
+ the same raters examining 2D images. In addition, stenosis
536
+ grade and plaque size of randomly selected 20 scans were
537
+ manually measured by expert using clinical 2D US device for
538
+ verification of the proposed system and algorithm.
539
+ B. Training Methods
540
+ 25 scans (4694 2D images) were randomly chosen for CNN
541
+ training and 15 scans (2362 2D images) for validation in order
542
+ to build and verify the segmentation module. The original
543
+ images were resized to 224*224. All training process were
544
+ performed using Pytorch 1.5.1 and Python 3.7 on a NVIDIA
545
+ RTX 4000 GPU. The two networks were trained separately. For
546
+ the segmentation module, the applied data augmentation
547
+ strategies including gamma transformation, rotation, zoom,
548
+ horizontal and vertical flip, and Adam optimizer were used. The
549
+ network was trained for 100 epochs with learning rate and batch
550
+ size set to 0.005 and 8 respectively. For the diagnosis module,
551
+ the cropped and resized 2D US image segmented with the mask
552
+ and the corresponding vessel wall mask were used for network
553
+ training. Gamma transformation and horizontal & vertical flip
554
+ were applied for data augmentation. The diagnosis network was
555
+ trained for 50 epochs using Adam optimizer with learning rate
556
+ and batch size set to 0.005 and 64 respectively.
557
+ C. Diagnosis parameter measurement
558
+ To verify the regularized reconstruction and longitudinal
559
+ images reprojection algorithm, the longitudinal images from 20
560
+ clinical patients with and without regularization were compared
561
+ with clinical images acquired by experienced sonographers
562
+ visually, and the projection angles were set as 𝜃 =
563
+ −30°, −15°, 0°, 15°, 30°.
564
+ The plaque length and thickness were manually measured on
565
+ the 3D pseudo volume, the reconstructed 3D volume and the
566
+ clinical images acquired by experienced sonographers
567
+ respectively, where 3D pseudo volume was the volume which
568
+ were stacked directly by the 2D US images sequence. The
569
+ manual measurement of plaque length and thickness was
570
+ conducted on the reprojected longitudinal images, among
571
+ which the reprojection angle was chosen based on the carotid
572
+ structural integrity and maximum stenosis grade. For plaque
573
+ size measurement in reprojected image of reconstructed 3D
574
+ volume, the pixel size was 0.2 × 0.2𝑚𝑚2. For the pseudo 3D
575
+ volume, the velocity of the swept was assumed constant,
576
+ therefore the pixel size of reprojected image was determined by
577
+ the distance of the swept which could be calculated by the
578
+ magnetic sensor.
579
+ The whole system in clinical metric measurement was also
580
+ verified by comparing stenosis rate automatically measured by
581
+ the
582
+ system
583
+ and
584
+ manually
585
+ measured
586
+ by
587
+ experienced
588
+ sonographers using clinical US device on 20 random clinical
589
+ atherosclerosis patients according to formula (9).
590
+ D. Evaluation Metrics and Statistic Analysis
591
+ The dice similarity coefficient (DSC) and 95% hausdorff
592
+ distance (HD95) were used to evaluate the performance of the
593
+ carotid sequence segmentation. DSC indicated the quantitative
594
+ metric of the overlap region between the ground truth and
595
+ prediction mask which was defined as follows:
596
+ 𝐷𝑆𝐶 = 2(𝑃 ∩ 𝐿)
597
+ 𝑃 ∪ 𝐿
598
+ (10)
599
+ where P, L were the prediction mask and ground truth. The
600
+ hausdorff distance was defined as Eq (11), which indicated the
601
+ largest point-wise matching discrepancy:
602
+ 𝐻𝐷(𝐴, 𝐵) = 𝑚𝑎𝑥(ℎ𝑑(𝐴, 𝐵), ℎ𝑑(𝐵, 𝐴))
603
+ (11)
604
+ where
605
+ ℎ𝑑(𝐴, 𝐵) = 𝑚𝑎𝑥𝑎∈𝐴(𝑚𝑖𝑛𝑏∈𝐵||𝑎 − 𝑏||)
606
+ (12)
607
+ ℎ𝑑(𝐵, 𝐴) = 𝑚𝑎𝑥𝑏∈𝐵(𝑚𝑖𝑛𝑎∈𝐴||𝑏 − 𝑎||)
608
+ (13)
609
+ For the evaluation of the diagnosis module. The specificity,
610
+ sensitivity and accuracy were calculated for both 2D US image
611
+ and scans.
612
+ The mean absolute difference (MAD) and standard deviation
613
+ (SD) between results from the pseudo/reconstructed 3D
614
+ volumes and results from experienced sonographers were
615
+ investigated. The metrics in verification of the system, i.e., the
616
+ stenosis grade, were compared between manual or automatic
617
+ approach using the proposed
618
+ technique and manual
619
+ measurement using the clinical US device with the Pearson
620
+ correlation analysis.
621
+ IV. RESULTS
622
+ A. Segmentation and Diagnosis Accuracy
623
+ The comparison between nine typical segmented images
624
+ from U-Net and experienced sonographers was illustrated as
625
+ Fig. 7, and the images were selected from different scans at
626
+ some specific locations. Table I showed the average DSC and
627
+ HD95 between the ground truth and prediction results.
628
+ TABLE I.
629
+ THE RESULTS OF VESSEL SEGMENTATION
630
+ Metrics
631
+ category
632
+ MAB
633
+ Lumen
634
+ DSC
635
+ 95.00%
636
+ 93.30%
637
+ HD95(pixel)
638
+ 4.34
639
+ 4.65
640
+ Table II showed the contingency table of the validation set of
641
+ 2362 2D transverse images, and the sensitivity, specificity and
642
+ accuracy were 0.73, 0.97 and 0.91 respectively. Table III
643
+ showed the diagnostic results of carotid atherosclerosis for all
644
+
645
+
646
+
647
+
648
+
649
+
650
+
651
+
652
+
653
+
654
+
655
+ Fig. 7. Comparison of the auto segmentation from U-net (red) and manual
656
+ segmentation from ground truth (green).
657
+
658
+
659
+ 7
660
+ scans, and the sensitivity, specificity and accuracy of carotid
661
+ atherosclerosis detection was 0.71, 0.85 and 0.80 respectively.
662
+ TABLE II.
663
+ THE RESULTS OF DETECTION TEST FOR 2D IMAGES
664
+ Labels
665
+ Predictions
666
+ Positive (plaque)
667
+ Negative
668
+ Positive
669
+ (plaque)
670
+ 454
671
+ 171
672
+ Negative
673
+ 50
674
+ 1687
675
+ TABLE III.
676
+ THE DETECTION RESULTS OF CA FOR SCANS
677
+ Labels
678
+ Predictions
679
+ Positive (plaque)
680
+ Negative
681
+ Positive
682
+ (plaque)
683
+ 25
684
+ 10
685
+ Negative
686
+ 10
687
+ 57
688
+ B. Reconstruction and Reprojection Accuracy
689
+ Fig. 8 illustrated three representative examples of the
690
+ longitudinal images without regularization, with regularization
691
+ clinical US images acquired by experienced sonographers, and
692
+ the corresponding orientation information. The results revealed
693
+ that the regularized reconstructed volume was smoother with
694
+ less image artifacts. Fig. 9 demonstrated an example with large
695
+ fallback of trajectory, the results showed there were still
696
+ artifacts if directly apply the regularized algorithm and the
697
+ proposed re-rank algorithm could remove the reconstruction
698
+ artifacts. Fig. 10 illustrated the 3D volumes reconstructed from
699
+ the auto-segmentation and ground truth respectively. The
700
+ volumes were rendered by 3D-slicer (www.slicer.org). The
701
+ results showed that the segmentation module achieved good
702
+ agreement with human label. Furthermore, the sunken of the
703
+ lumen area indicated the existence of the plaque. Fig. 11
704
+
705
+
706
+ Fig. 8. Illustration of the US longitudinal images and the corresponding orientation information from three carotid atherosclerosis patients (by rows). The
707
+ images in the first column were reconstructed without regularization algorithm while the images in the third column were reconstructed with regularization
708
+ algorithm. The second column demonstrated the smoother results of the proposed algorithm. The fourth column represents the images acquired by
709
+ sonographers using clinical US devices. The images in fifth column illustrate the corresponding original position information and the images in sixth column
710
+ show the regularized position information.
711
+
712
+ Fig. 9. Illustration of the proposed re-rank algorithm, the first row
713
+ demonstrated the longitudinal image and corresponding position
714
+ information without regularized algorithm. The second row represented
715
+ the images which applied regularized algorithm and the third row showed
716
+ the images which used re-rank and regularized algorithm.
717
+
718
+
719
+ 8
720
+ demonstrated comparison among 5 projected images in
721
+ different angles ( 𝜃 = −30°, −15°, 0°, 15°, 30° ), the image
722
+ directly projected to sagittal plane and the manually acquired
723
+ image by expert from the same atherosclerosis patient. The
724
+ results showed that the projected images in different angles
725
+ could reveal more structures of the carotid than the images only
726
+ projected to sagittal plane. On the other hand, in Fig. 11, the
727
+ image in 15° projection angle was most consistent with the
728
+ clinical image obtained by expert using clinical US device,
729
+ which indicated that the reprojection of 3D volume could
730
+ simulate the different scan angles operated by expert to locate
731
+ the best observation view.
732
+ The plaque size (length and thickness) measured from the
733
+ pseudo volume, reconstructed volume and images acquired by
734
+ expert were compared in Table IV. The results showed good
735
+ agreement between the automatic measurement from the
736
+ reconstructed volume and the manual method, while the plaque
737
+ size measured by pseudo volume showed large difference with
738
+ the expert measurement. The results indicated that the 3D
739
+ reconstruction could reveal the true geometry and clinical
740
+ metric of the carotid artery.
741
+ TABLE IV.
742
+ MAD MEASUREMENTS (N=20) BETWEEN CLINICAL US DEVICE
743
+ AND THE PROPOSED TECHNIQUES
744
+
745
+
746
+
747
+
748
+
749
+
750
+
751
+
752
+
753
+ Fig. 10. The 3D volumes from the auto-segmentation (the first row) and ground truth (the second row). The translucent outer wall represents the vessel wall
754
+ area, the inside red 3D volume represents the lumen area. The sunken of the lumen area indicated the existence of the plaque. The resolution of reconstruction
755
+ is set to 0.2×0.2×0.2 𝑚𝑚3.
756
+
757
+
758
+
759
+
760
+
761
+
762
+
763
+
764
+
765
+ Fig. 11. Illustration of the projected
766
+ images in different angles, from left
767
+ top to right bottom were 5 projected
768
+ images (𝜃 = −30°, −15°, 0°, 15°,
769
+ 30° ), direct sagittal image and
770
+ clinical image respectively. It could
771
+ be observed the sagittal image
772
+ missed the part of vessel wall (in the
773
+ red box) and the reprojected image
774
+ with 𝜃 = 15 ° showed the most
775
+ consistent structure of plague with
776
+ clinical image (in the green boxes
777
+ shows).
778
+
779
+
780
+ 9
781
+
782
+ Plaque
783
+ length(mm)
784
+ Plaque
785
+ thickness(mm)
786
+ Plaque
787
+ length
788
+ (relative
789
+ error)
790
+ Plaque
791
+ thickness
792
+ (relative
793
+ error
794
+ 3D
795
+ Reconstructed
796
+ volume
797
+ 2.65±2.36
798
+ 0.842±0.617
799
+ 15.4%±
800
+ 13.6%
801
+ 26.0%±
802
+ 13.2%
803
+ Direct stacked
804
+ Pesudo volume
805
+ 6.54±7.23
806
+ 0.976±0.648
807
+ 40.0%±
808
+ 48.0%
809
+ 29.4%±
810
+ 14.0%
811
+ C. Stenosis Measurement Accuracy
812
+ Fig. 12 demonstrated the linear correlation (r=0.762) of the
813
+ stenosis grade measured by the system and experienced
814
+ sonographers using the clinical US device on 20 carotid
815
+ atherosclerosis patients, which indicated the proposed
816
+ technique had the strong consistency with expert manual
817
+ approach in carotid atherosclerosis diagnosis.
818
+ V. DISCUSSION
819
+ In this study, we proposed a portable freehand 3D US
820
+ imaging technique for carotid artery diagnosis which could
821
+ achieve real 3D geometry of carotid artery, and the method
822
+ showed good agreements with manual measurement of stenosis
823
+ rate and classification of diseased and healthy case. The system
824
+ was transportable and less dependent on operator’s experience,
825
+ which make it possible for routine health check in different
826
+ environments such as community or rural area. In addition, the
827
+ 3D reconstructed geometry could provide visualized carotid
828
+ artery structure for further atherosclerosis evaluation.
829
+ Since the large position variation or fallback movement
830
+ during scan would cause reconstruction artifacts, we designed
831
+ a standard scan protocol for 3D carotid US data acquisition and
832
+ analysis. The whole processing steps included automatic 3D US
833
+ data
834
+ acquisition,
835
+ MAB
836
+ and
837
+ LIB
838
+ segmentation,
839
+ 3D
840
+ reconstruction, automatic classification and measurement. In
841
+ practice, the intermediate results of each step could be reviewed
842
+ and manually corrected by operator if necessary to ensure the
843
+ accurate final results. The diagnosis result was based on two
844
+ key points: one was the accurate segmentation of vessel area,
845
+ and the other was the correct reconstruction volume. The
846
+ segmentation determined the region of interest (ROI) used for
847
+ following analysis including automatic stenosis evaluation,
848
+ plaque size measurement and 3D geometry visualization. The
849
+ wrong mask might crop regions out of the carotid artery,
850
+ mislead the diagnosis network and cause confusing diagnosis
851
+ results. However, if the 3D volume was directly reconstructed
852
+ from
853
+ original
854
+ 2D
855
+ frames
856
+ before
857
+ segmentation,
858
+ the
859
+ reconstruction artifacts around MAB and LIB such as
860
+ misplacement or severe blurring could lead to segmentation
861
+ error of vessels, especially for some cases with large position
862
+ variation as Fig. 13. showed. Therefore, we conducted
863
+ segmentation on the original 2D US image sequence before 3D
864
+ reconstruction to extract the vessel area firstly to reduce the
865
+ influence of reconstruction artifacts.
866
+ For the reconstruction process, the failure reconstruction
867
+ caused by large position variation could result in severe image
868
+ artifacts which totally deviated the structure of the carotid artery
869
+ as shown in Fig. 14 For the freehand US scan, theoretically, the
870
+ position information recorded by magnetic sensor would be
871
+ consistent with US probe motion i.e., the position of every US
872
+
873
+ Fig. 12. correlation of stenosis grade between the manual measurement by
874
+ expert using the clinical US device and the automatic measurement from
875
+ the proposed technique on 20 carotid atherosclerosis patients.
876
+
877
+
878
+
879
+
880
+
881
+ Fig. 14. Severe reconstruction artifacts caused by the large position
882
+ variation. The image in first row represented the reconstructed volume and
883
+ the orientation information with regularized algorithm while the images in
884
+ the second row represented the results without regularized algorithm. The
885
+ left image shows the transverse image of the locations in the reconstructed
886
+ volume marked in red boxes in the right image, the large distortion could
887
+ be observed in the image while the distortion was alleviated using the
888
+ regularized algorithm.
889
+
890
+
891
+ Fig. 13. Segmentation results on a transverse image collected from the 3D
892
+ volume reconstructed by the original image sequence (left) and on an
893
+ original transverse frame data (right). It could be observed that the severe
894
+ artifacts on the left image led to wrong segmentation result.
895
+
896
+ 0.6
897
+ 0.5
898
+ 0.4
899
+ 0.3
900
+ 0.2
901
+ 0.1
902
+ 0
903
+ 0
904
+ 0.1
905
+ 0.2
906
+ 0.3
907
+ 0.4
908
+ 0.5
909
+ 0.6
910
+ 0.7
911
+ Stenosis grade measured by expert using clinicla US device
912
+ 10
913
+ image. However, the low precision of the sensor and inevitable
914
+ hand jitter would lead to the noticeable measurement
915
+ uncertainty of the position information along the scan direction
916
+ and influence the reconstruction accuracy. Therefore, we
917
+ adopted a novel total variation regularization algorithm to
918
+ smooth the track of the position information and decrease
919
+ distortion and disconnection of the image volume. The position
920
+ of the freehand scan can be regarded as continuous and
921
+ sequential array; therefore, the proposed regularization
922
+ algorithm could reduce the uncertainty by constructing and
923
+ minimize a regularized formulate in the manifold of Euclidean
924
+ transformations. Meanwhile, a re-rank strategy was designed to
925
+ solve the unordered image sequence caused by fallback
926
+ movement during scan. In the future, the reconstruction
927
+ accuracy could be further improved using the neural network.
928
+ After segmentation and reconstruction, the carotid artery
929
+ volume could be obtained for further analysis such as healthy
930
+ or diseased case diagnosis, plaque thickness, length area
931
+ measurement,
932
+ plaque
933
+ type
934
+ identification
935
+ and
936
+ stenosis
937
+ measurement etc. In the diagnosis module, the cropped and
938
+ resized images instead of the whole US images were used as the
939
+ input. Since the plaque was only located inside vessel wall area,
940
+ removing useless information outside the vessel wall could
941
+ accelerate network training and improve the detection accuracy.
942
+ On the other hand, there may be low intensity area in the vessel
943
+ region which could mislead the network and result in wrong
944
+ classification since negative sample (no plaque) usually had
945
+ low intensity in lumen area. Therefore, the MAB and LIB mask
946
+ were introduced to combine the morphological information
947
+ with original image information to improve the detection
948
+ accuracy. However, the proposed approach just utilized the
949
+ consecutive 2D reconstructed transverse US images to detect
950
+ plaque cases, thus some cases with small plaque size or severe
951
+ artifacts were wrongly classified as no plaque. In the future, we
952
+ will take the z axis information into account and use the whole
953
+ 3D volume as input instead of detecting plaque by limited
954
+ consecutive transverse slices to improve the accuracy of the
955
+ diagnosis module.
956
+ We utilized a reprojection algorithm to project the carotid
957
+ artery volume to longitudinal planes, so that the clinical metric
958
+ such plaque length, thickness could be directly measured from
959
+ the 3D volume with no need of new acquisition in sagittal
960
+ direction. The traditional clinical carotid artery US examination
961
+ required appropriate positioning and angle between transducer
962
+ and neck, which greatly relies on the operator’s experience to
963
+ localize the plaque and obtain a high-quality US image, the
964
+ proposed reprojection approach in our method was not only
965
+ relatively convenient but could reveal the complete structure of
966
+ the carotid artery with only one scan, and the images obtained
967
+ by our automatic method achieved great agreement with the
968
+ images obtained by expert using clinical US device.
969
+ In segmentation module, we used U-Net to segment the
970
+ MAB and LIB in 2D US image sequence. Every image in the
971
+ sequence was treated as a single image for the segmentation
972
+ network. However, this approach didn’t exploit the context
973
+ information in the adjacent frames. In addition, some cases with
974
+ severe noise or shadowing would result in wrong segmentation
975
+ as Fig. 15 showed. In the future, 3D convolution will be
976
+ considered to correct the segmentation mistake by utilizing the
977
+ context information of the adjacent frames, and sample size will
978
+ be enlarged to improve the accuracy and robustness of the
979
+ segmentation algorithm. More 3D metrics such total plaque
980
+ volume, vessel wall volume, etc. would be evaluated for more
981
+ accurate validation. On the other hand, the learning-based 3D
982
+ reconstruction algorithm would be taken into account to
983
+ improve the performance of reconstruction.
984
+ VI. CONCLUSION
985
+ We have proposed an automatic 3D carotid artery imaging
986
+ and diagnosis technique specially designed for the portable
987
+ freehand ultrasound device. The technique applied a novel 3D
988
+ reconstructed algorithm and a robust segmentation algorithm
989
+ for automatic carotid atherosclerosis analysis. The results
990
+ demonstrated that the technique achieved good agreement with
991
+ manual expert examination on plaque diagnosis and stenosis
992
+ grade measurement, which showed the potential application on
993
+ fast carotid atherosclerosis examination and the follow-ups,
994
+ especially for those scenarios where professional medical
995
+ device and experienced clinicians are hard to acquire such as
996
+ rural area or community with large population.
997
+ ACKNOWLEDGEMENT
998
+ This work was sponsored by Natural Science Foundation of
999
+ China (NSFC) under Grant No.12074258.
1000
+ REFERENCES
1001
+ [1]
1002
+ M. L. Flaherty et al., “Carotid artery stenosis as a cause of stroke,”
1003
+ Neuroepidemiology, vol. 40, no. 1, pp. 36–41, 2013.
1004
+ [2]
1005
+ L.-Y. Ma et al., “China cardiovascular diseases report 2018: an
1006
+ updated summary,” J. Geriatr. Cardiol. JGC, vol. 17, no. 1, p. 1,
1007
+ 2020.
1008
+ [3]
1009
+ H. R. Underhill, T. S. Hatsukami, Z. A. Fayad, V. Fuster, and C.
1010
+ Yuan, “MRI of carotid atherosclerosis: clinical implications and future
1011
+ directions,” Nat. Rev. Cardiol., vol. 7, no. 3, pp. 165–173, 2010.
1012
+ [4]
1013
+ M. Wintermark et al., “High-resolution CT imaging of carotid artery
1014
+ atherosclerotic plaques,” Am. J. Neuroradiol., vol. 29, no. 5, pp. 875–
1015
+ 882, 2008.
1016
+ [5]
1017
+ P. J. Nederkoorn, Y. van der Graaf, and M. M. Hunink, “Duplex
1018
+ ultrasound and magnetic resonance angiography compared with
1019
+ digital subtraction angiography in carotid artery stenosis: a systematic
1020
+ review,” Stroke, vol. 34, no. 5, pp. 1324–1331, 2003.
1021
+ [6]
1022
+ Y. Inaba, J. A. Chen, and S. R. Bergmann, “Carotid plaque, compared
1023
+ with carotid intima-media thickness, more accurately predicts
1024
+ coronary artery disease events: a meta-analysis,” Atherosclerosis, vol.
1025
+ 220, no. 1, pp. 128–133, 2012.
1026
+ [7]
1027
+ M. W. Lorenz, H. S. Markus, M. L. Bots, M. Rosvall, and M. Sitzer,
1028
+ “Prediction of clinical cardiovascular events with carotid intima-
1029
+
1030
+
1031
+ Fig. 15. Two representative wrong segmentation examples. The red line
1032
+ represented the automatic segmentation results by the segmentation
1033
+ module and the green line represented the human labels. The plaque was
1034
+ identified as the adventitia in the first case. In the second case, the vessel
1035
+ wall structure was disappeared and the segmentation network resulted in
1036
+ wrong segmentation.
1037
+
1038
+
1039
+ 11
1040
+ media thickness: a systematic review and meta-analysis,” Circulation,
1041
+ vol. 115, no. 4, pp. 459–467, 2007.
1042
+ [8]
1043
+ A. Fenster, G. Parraga, and J. Bax, “Three-dimensional ultrasound
1044
+ scanning,” Interface Focus, vol. 1, no. 4, pp. 503–519, 2011.
1045
+ [9]
1046
+ T. Wannarong et al., “Progression of carotid plaque volume predicts
1047
+ cardiovascular events,” Stroke, vol. 44, no. 7, pp. 1859–1865, 2013.
1048
+ [10]
1049
+ A. Fenster, C. Blake, I. Gyacskov, A. Landry, and J. Spence, “3D
1050
+ ultrasound analysis of carotid plaque volume and surface
1051
+ morphology,” Ultrasonics, vol. 44, pp. e153–e157, 2006.
1052
+ [11]
1053
+ G. C. Makris, A. Lavida, M. Griffin, G. Geroulakos, and A. N.
1054
+ Nicolaides, “Three-dimensional ultrasound imaging for the evaluation
1055
+ of carotid atherosclerosis,” Atherosclerosis, vol. 219, no. 2, pp. 377–
1056
+ 383, 2011.
1057
+ [12]
1058
+ K. AlMuhanna et al., “Carotid plaque morphometric assessment with
1059
+ three-dimensional ultrasound imaging,” J. Vasc. Surg., vol. 61, no. 3,
1060
+ pp. 690–697, 2015.
1061
+ [13]
1062
+ R. M. Botnar, M. Stuber, K. V. Kissinger, W. Y. Kim, E. Spuentrup,
1063
+ and W. J. Manning, “Noninvasive coronary vessel wall and plaque
1064
+ imaging with magnetic resonance imaging,” Circulation, vol. 102, no.
1065
+ 21, pp. 2582–2587, 2000.
1066
+ [14]
1067
+ M. Herder, S. H. Johnsen, K. A. Arntzen, and E. B. Mathiesen, “Risk
1068
+ factors for progression of carotid intima-media thickness and total
1069
+ plaque area: a 13-year follow-up study: the Tromsø Study,” Stroke,
1070
+ vol. 43, no. 7, pp. 1818–1823, 2012.
1071
+ [15]
1072
+ B. Chiu, M. Egger, J. D. Spence, G. Parraga, and A. Fenster,
1073
+ “Quantification of carotid vessel wall and plaque thickness change
1074
+ using 3D ultrasound images,” Med. Phys., vol. 35, no. 8, pp. 3691–
1075
+ 3710, 2008.
1076
+ [16]
1077
+ X. Yang, J. Jin, W. He, M. Yuchi, and M. Ding, “Segmentation of the
1078
+ common carotid artery with active shape models from 3D ultrasound
1079
+ images,” in Medical Imaging 2012: Computer-Aided Diagnosis, 2012,
1080
+ vol. 8315, p. 83152H.
1081
+ [17]
1082
+ A. M. A. Lorza et al., “Carotid artery lumen segmentation in 3D free-
1083
+ hand ultrasound images using surface graph cuts,” in International
1084
+ conference on medical image computing and computer-assisted
1085
+ intervention, 2013, pp. 542–549.
1086
+ [18]
1087
+ J. de Ruijter, M. van Sambeek, F. van de Vosse, and R. Lopata,
1088
+ “Automated 3D geometry segmentation of the healthy and diseased
1089
+ carotid artery in free-hand, probe tracked ultrasound images,” Med.
1090
+ Phys., vol. 47, no. 3, pp. 1034–1047, 2020.
1091
+ [19]
1092
+ M. Jiang, J. D. Spence, and B. Chiu, “Segmentation of 3D ultrasound
1093
+ carotid vessel wall using U-Net and segmentation average network,”
1094
+ in 2020 42nd Annual International Conference of the IEEE
1095
+ Engineering in Medicine & Biology Society (EMBC), 2020, pp. 2043–
1096
+ 2046.
1097
+ [20]
1098
+ R. Zhou, A. Fenster, Y. Xia, J. D. Spence, and M. Ding, “Deep
1099
+ learning-based carotid media-adventitia and lumen-intima boundary
1100
+ segmentation from three-dimensional ultrasound images,” Med. Phys.,
1101
+ vol. 46, no. 7, pp. 3180–3193, 2019.
1102
+ [21]
1103
+ J. De Ruijter, J. J. Muijsers, F. N. Van de Vosse, M. R. Van Sambeek,
1104
+ and R. G. Lopata, “A generalized approach for automatic 3-D
1105
+ geometry assessment of blood vessels in transverse ultrasound images
1106
+ using convolutional neural networks,” IEEE Trans. Ultrason.
1107
+ Ferroelectr. Freq. Control, vol. 68, no. 11, pp. 3326–3335, 2021.
1108
+ [22]
1109
+ L. van Knippenberg, R. J. van Sloun, M. Mischi, J. de Ruijter, R.
1110
+ Lopata, and R. A. Bouwman, “Unsupervised domain adaptation
1111
+ method for segmenting cross-sectional CCA images,” Comput.
1112
+ Methods Programs Biomed., vol. 225, p. 107037, 2022.
1113
+ [23]
1114
+ C. Azzopardi, K. P. Camilleri, and Y. A. Hicks, “Bimodal automated
1115
+ carotid ultrasound segmentation using geometrically constrained deep
1116
+ neural networks,” IEEE J. Biomed. Health Inform., vol. 24, no. 4, pp.
1117
+ 1004–1015, 2020.
1118
+ [24]
1119
+ R. Zhou et al., “A voxel-based fully convolution network and
1120
+ continuous max-flow for carotid vessel-wall-volume segmentation
1121
+ from 3D ultrasound images,” IEEE Trans. Med. Imaging, vol. 39, no.
1122
+ 9, pp. 2844–2855, 2020.
1123
+ [25]
1124
+ R. Zhou et al., “Deep learning-based measurement of total plaque area
1125
+ in B-mode ultrasound images,” IEEE J. Biomed. Health Inform., vol.
1126
+ 25, no. 8, pp. 2967–2977, 2021.
1127
+ [26]
1128
+ R. Zhou et al., “Deep learning-based carotid plaque segmentation
1129
+ from B-mode ultrasound images,” Ultrasound Med. Biol., vol. 47, no.
1130
+ 9, pp. 2723–2733, 2021.
1131
+ [27]
1132
+ Y. Xia, X. Cheng, A. Fenster, and M. Ding, “Automatic classification
1133
+ of carotid ultrasound images based on convolutional neural network,”
1134
+ in Medical Imaging 2020: Computer-Aided Diagnosis, 2020, vol.
1135
+ 11314, p. 1131441.
1136
+ [28]
1137
+ W. Ma et al., “Multilevel strip pooling-based convolutional neural
1138
+ network for the classification of carotid plaque echogenicity,”
1139
+ Comput. Math. Methods Med., vol. 2021, 2021.
1140
+ [29]
1141
+ H. Shen, W. Zhang, H. Wang, G. Ding, and J. Xie, “NDDR-LCS: A
1142
+ Multi-Task Learning Method for Classification of Carotid Plaques,” in
1143
+ 2020 IEEE International Conference on Image Processing (ICIP),
1144
+ 2020, pp. 2461–2465.
1145
+ [30]
1146
+ Y. Zhao, J. D. Spence, and B. Chiu, “Three-dimensional ultrasound
1147
+ assessment of effects of therapies on carotid atherosclerosis using
1148
+ vessel wall thickness maps,” Ultrasound Med. Biol., vol. 47, no. 9, pp.
1149
+ 2502–2513, 2021.
1150
+ [31]
1151
+ R. Zhou, W. Ma, A. Fenster, and M. Ding, “U-Net based automatic
1152
+ carotid plaque segmentation from 3D ultrasound images,” in Medical
1153
+ Imaging 2019: Computer-Aided Diagnosis, 2019, vol. 10950, pp.
1154
+ 1119–1125.
1155
+ [32]
1156
+ L. Saba et al., “Ultrasound-based carotid stenosis measurement and
1157
+ risk stratification in diabetic cohort: a deep learning paradigm,”
1158
+ Cardiovasc. Diagn. Ther., vol. 9, no. 5, p. 439, 2019.
1159
+ [33]
1160
+ M. Biswas et al., “Two-stage artificial intelligence model for jointly
1161
+ measurement of atherosclerotic wall thickness and plaque burden in
1162
+ carotid ultrasound: A screening tool for cardiovascular/stroke risk
1163
+ assessment,” Comput. Biol. Med., vol. 123, p. 103847, 2020.
1164
+ [34]
1165
+ T. Wen et al., “An accurate and effective FMM-based approach for
1166
+ freehand 3D ultrasound reconstruction,” Biomed. Signal Process.
1167
+ Control, vol. 8, no. 6, pp. 645–656, 2013.
1168
+ [35]
1169
+ O. V. Solberg, F. Lindseth, H. Torp, R. E. Blake, and T. A. N. Hernes,
1170
+ “Freehand 3D ultrasound reconstruction algorithms—a review,”
1171
+ Ultrasound Med. Biol., vol. 33, no. 7, pp. 991–1009, 2007.
1172
+ [36]
1173
+ H. R. Roth et al., “A new 2.5 D representation for lymph node
1174
+ detection using random sets of deep convolutional neural network
1175
+ observations,” in International conference on medical image
1176
+ computing and computer-assisted intervention, 2014, pp. 520–527.
1177
+ [37]
1178
+ D.-H. Lee and others, “Pseudo-label: The simple and efficient semi-
1179
+ supervised learning method for deep neural networks,” in Workshop
1180
+ on challenges in representation learning, ICML, 2013, vol. 3, no. 2, p.
1181
+ 896.
1182
+ [38]
1183
+ O. Ronneberger, P. Fischer, and T. Brox, “U-net: Convolutional
1184
+ networks for biomedical image segmentation,” in International
1185
+ Conference on Medical image computing and computer-assisted
1186
+ intervention, 2015, pp. 234–241.
1187
+ [39]
1188
+ H.-B. Chen, R. Zheng, L.-Y. Qian, F.-Y. Liu, S. Song, and H.-Y.
1189
+ Zeng, “Improvement of 3-D Ultrasound Spine Imaging Technique
1190
+ Using Fast Reconstruction Algorithm,” IEEE Trans. Ultrason.
1191
+ Ferroelectr. Freq. Control, vol. 68, no. 10, pp. 3104–3113, 2021.
1192
+ [40]
1193
+ S. Song, Y. Huang, J. Li, M. Chen, and R. Zheng, “Development of
1194
+ Implicit Representation Method for Freehand 3D Ultrasound Image
1195
+ Reconstruction of Carotid Vessel,” in 2022 IEEE International
1196
+ Ultrasonics Symposium (IUS), 2022, pp. 1–4.
1197
+ [41]
1198
+ M. Esposito et al., “Total variation regularization of pose signals with
1199
+ an application to 3D freehand ultrasound,” IEEE Trans. Med.
1200
+ Imaging, vol. 38, no. 10, pp. 2245–2258, 2019.
1201
+ [42]
1202
+ J. Wu et al., “Deep morphology aided diagnosis network for
1203
+ segmentation of carotid artery vessel wall and diagnosis of carotid
1204
+ atherosclerosis on black-blood vessel wall MRI,” Med. Phys., vol. 46,
1205
+ no. 12, pp. 5544–5561, 2019.
1206
+ [43]
1207
+ H. Chen, R. Zheng, E. Lou, and L. H. Le, “Compact and Wireless
1208
+ Freehand 3D Ultrasound Real-time Spine Imaging System: A pilot
1209
+ study.,” in Annual International Conference of the IEEE Engineering
1210
+ in Medicine and Biology Society. IEEE Engineering in Medicine and
1211
+ Biology Society. Annual International Conference, 2020, vol. 2020,
1212
+ pp. 2105–2108.
1213
+
1214
+
7tE1T4oBgHgl3EQfTwM_/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
89E3T4oBgHgl3EQfSAmG/content/2301.04428v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bee50fb8413e8930cc506f0f8ee5f77087e9bf87e26605d53bcb542341f3056a
3
+ size 309197
89E3T4oBgHgl3EQfSAmG/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4505dbc4643dc1f819d9b2dcff01dbe3d313b5c5defca6c57c51e69b9d2ad7e
3
+ size 4587565
89E3T4oBgHgl3EQfSAmG/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5583f983c65b6592e61bf826ad68d82857932a64c3732602187e675f40792182
3
+ size 156234
8dE3T4oBgHgl3EQfqgpk/content/2301.04652v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ff047208e99a0e02aecce35e0cf8470d7f44540564e6fdd8cd245b1ed45ceab
3
+ size 2887720
8dFLT4oBgHgl3EQfBS4r/content/tmp_files/2301.11969v1.pdf.txt ADDED
@@ -0,0 +1,759 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Noise Resistant Phase Imaging with Intensity Correlation
2
+ Jerzy Szuniewicz1, Stanisław Kurdziałek1, Sanjukta Kundu1, Wojciech Zwolinski1,
3
+ Radosław Chrapkiewicz2, Mayukh Lahiri3, Radek Lapkiewicz1∗
4
+ 1Institute of Experimental Physics, Faculty of Physics, University of Warsaw,
5
+ ul. Pasteura 5, 02-093 Warszawa, Poland,
6
+ 2CNC Program, Stanford University, Palo Alto, CA 94304, United States
7
+ 3Oklahoma State University, Stillwater, OK 74078-3072, United States
8
9
+ Interferometric methods, renowned for their reliability and precision, play a vital role
10
+ in phase imaging. Interferometry typically requires high coherence and stability be-
11
+ tween the measured and the reference beam. The presence of rapid phase fluctua-
12
+ tions averages out the interferogram, erasing the spatial phase information. This diffi-
13
+ culty can be circumvented by shortening the measurement time. However, shortening
14
+ the measurement time results in smaller photon counting rates precluding its applica-
15
+ bility to low-intensity phase imaging. We introduce and experimentally demonstrate
16
+ a phase imaging technique that is immune to position-independent, time-dependent
17
+ phase fluctuation. We accomplish this by measuring intensity correlation instead of
18
+ intensity. Our method enables using long measurement times and is therefore advan-
19
+ tageous when the photon flux is very low. We use a Fisher information-based approach
20
+ to show that the precision of phase reconstruction achieved using our method is in fact
21
+ the best achievable precision in the scenario when two photons are detected per phase
22
+ stability time.
23
+ Introduction
24
+ Phase imaging is important for applications spanning many diverse fields, including biological imaging
25
+ (1), and phase microscopy (2,3). Measurements of the phase shifts within samples can yield information
26
+ about the refractive index, thickness, and structure of an object. Interferometry (4) is a very powerful tool
27
+ that is often used in phase imaging of an object (5). Interferometric measurements allow the detection
28
+ of small variations in optical paths. There are numerous interferometric techniques such as the ones
29
+ regularly used in optical coherence tomography (6,7) or quantitative phase microscopy (8). Some of the
30
+ techniques, especially those related to biology, require very low photon fluxes. For an interferometric
31
+ measurement a wave field that has interacted with an object is superposed with a reference field and the
32
+ resulting interference pattern is detected by a camera. If the object field (probe field) and the reference
33
+ 1
34
+ arXiv:2301.11969v1 [physics.optics] 27 Jan 2023
35
+
36
+ field are mutually coherent, the time-averaged intensity on camera is given by (9,10):
37
+ I(x, y) = Ir + Io + 2
38
+
39
+ IrIo cos[φin + φ(x, y)],
40
+ (1)
41
+ where Ir and Io are the averaged intensity of the reference and the object fields, respectively, φin is the
42
+ interferometric phase that can be changed by introducing spatial or temporal delays between the two
43
+ fields, and φ(x, y) is the phase map of the object. Standard interferometric phase imaging techniques are
44
+ based on the signature of φ(x, y) left in the detected intensity pattern. However, for any such method
45
+ to be applicable, the object field and the reference field need to be mutually coherent. Time-dependent,
46
+ uncontrollable phase fluctuations introduce incoherence between object and reference fields. The method
47
+ is therefore vulnerable to time-dependent, uncontrollable phase fluctuations that introduce incoherence
48
+ between object and reference fields.
49
+ When the phase fluctuates much faster compared to the detection time, the coherence between the
50
+ object and image fields is practically lost and, no interference will be observed, i.e.,
51
+ I(x, y) = Ir + Io.
52
+ (2)
53
+ Since there is no information of φ(x, y) in this intensity pattern, the standard phase imaging scheme
54
+ becomes inapplicable to this case. One way to avoid the effect of this time-dependent phase fluctuation
55
+ is to shorten the duration of measurement (11). A short measurement time, however, reduces the amount
56
+ of detected light and is therefore impractical for imaging photo-sensitive biological specimens, which
57
+ require low-intensity light. Furthermore, for interferometric fluorescence super-resolution microscopy
58
+ (12), often very low-intensity light (13) needs to be superposed. In such cases, any time-dependent
59
+ phase fluctuations must be avoided due to the relatively long detection time requirement.
60
+ Here, we introduce a method of phase imaging that is fully resistant to time-dependent phase fluctu-
61
+ ations as long as it is possible to measure at least two photons per phase stability time. Our method is
62
+ fundamentally different from the standard phase imaging techniques (14), as we do not need interfero-
63
+ metric phase stability due to the fact that we measure intensity correlation instead of intensity.
64
+ The scheme of our experiment is illustrated in Fig. 1. A wave field that has interacted with an object
65
+ (object field) is superposed with a reference field and the resulting interference pattern is detected by
66
+ a camera. A time-dependent phase fluctuation Θ(t) is introduced in the reference field. Under these
67
+ circumstances, no information on φ(x, y) can be retrieved from the intensity pattern given by Eq. (2),
68
+ and therefore the standard phase imaging techniques become inapplicable. In the present article, we
69
+ introduce a method of phase imaging that is resistant to time-dependent phase fluctuations, provided that
70
+ phase change is uniform throughout the entire sample (15). Our method relies on measuring intensity
71
+ correlations of light and is inspired by the intensity interferometry technique introduced by Hanbury
72
+ Brown and Twiss (HBT) (16). The HBT method and its generalizations were applied to a variety of light
73
+ sources (17–25) and similarly our technique might be applied in various scenarios including laser and
74
+ thermal light as important examples.
75
+ We determine the correlation function between the intensities measured at a pair of points (x, y) and
76
+ (x′, y′)
77
+ �˜I(x, y; t)˜I(x′, y′; t)
78
+
79
+ ∝ 1 ± 1
80
+ 2 cos [φ(x, y) − φ(x′, y′)] ,
81
+ (3)
82
+ where ˜I(x, y; t) is the instantaneous intensity measured at a point (x, y) at time t. On the right hand side
83
+ 2
84
+
85
+ Figure 1: (a) Simplified schematic of the experiment: we divide input light into two paths, an object
86
+ path(φ(x)), and a reference path. In the object path, we introduce a spatially varying phase that we want
87
+ to image. A time-fluctuating interferometric phase can be introduced to the system (Θ(t)) with no loss
88
+ in the quality of the phase retrieval. For slowly fluctuating phase Θ(t), we can measure high visibility
89
+ interference fringes (b), but no interferogram can be recorded due to insufficient photon statistics and
90
+ rapid fluctuations of (Θ(t)) - depicted in the image (c) - where fringes average to the intensity profile of
91
+ the beam having no phase information. Images (b) and (c) depict normalized one photon interference
92
+ fringes for slowly and highly fluctuating cases respectively. We also depict second-order correlation
93
+ interferograms (d) for the same photons constituting the interferograms in image (c). Even for this
94
+ highly fluctuating case, where we record only a few photons within the stability time of the phase Θ(t),
95
+ we can retrieve high visibility second-order correlation interferograms preserving full phase information
96
+ about the measured phase φ(x).
97
+ of Eq. (3), the plus (+) and minus (−) signs apply when the two points of measurement are in the same
98
+ and different beam splitter outputs, respectively. We also assume, Ir = Io. Note that the information
99
+ about the phase map of the object, which was lost in the intensity pattern [Eq. (2)], reappears in the
100
+ intensity correlation [Eq. (3)].
101
+ 3
102
+
103
+ %The 2nd-order intensity correlations map contains the full information required to optimally recon-
104
+ struct φ(x, y) in the extreme case when only two photons are detected during the phase stability time.
105
+ Our strategy of reconstructing the actual phase distribution in this scenario is optimal, which we prove
106
+ rigorously using estimation theory tools, namely Fisher Information and Cram´er-Rao bound (see Sup-
107
+ plementary S1 for detail).
108
+ Laser
109
+ I-sCMOS
110
+ camera
111
+ Calcite
112
+ L1
113
+ l/2
114
+ L2
115
+ l/4
116
+ Sample
117
+ j(x)/2
118
+ l/4
119
+ Delay line
120
+ l/4 l/2
121
+ PBS
122
+ l/2
123
+ Figure 2: Experimental setup for noise-resistant phase imaging. The incoming beam of Laser after pass-
124
+ ing through a λ
125
+ 2 - half-wave plate, λ
126
+ 4 - quarter wave plate, PBS - polarization beam splitter, and another
127
+ λ
128
+ 2 plate, the beam enters a Michelson type interferometer. Each of the two paths in the interferometer is
129
+ encoded with orthogonal polarization. In one arm the spatial phase φ(x) is introduced next to the surface
130
+ of the interferometer mirror. The interferometric mirror in the other arm is given a phase fluctuation by
131
+ attaching it to a piezoelectric actuator. The two beams of the interferometric arms after combining at the
132
+ PBS pass through L1, and L2 lenses. The calcite polarizer acts as a 50/50 Beamsplitter. The I-sCMOS
133
+ - Intensified sCMOS camera records single photons at both outputs of the interferometer. The use of
134
+ short exposure time of the I-sCMOS, in the single nanosecond timescale, gives it stability and resistance
135
+ against fluctuations up to tens of MHz.
136
+ Experimental setup
137
+ The experimental setup is depicted in Fig.2. Light from a polarized, coherent source (780 nm laser) is
138
+ attenuated, spatially filtered, and directed to two arms of a polarization-based Michelson interferometer.
139
+ In order to prepare the object beam, in one of the arms, we place a phase mask to imprint spatially varying
140
+ phase φ(x) to the beam. We perform experiments with three kinds of different phase masks applied to
141
+ our object beam. We imprint a 1D quadratic local phase profile to the beam by placing a cylindrical lens
142
+ of focal length, f = 1000 mm in proximity to the mirror (Fig. 2). Additionally, we also use a spatial
143
+ light modulator (SLM) as a phase mask, as it can display any arbitrary phase profile. We imprint 1D
144
+ 4
145
+
146
+ exponential and sinusoidal phases to our object beam by the SLM display (see supplementary S2 for
147
+ detail).
148
+ A time-dependent phase fluctuation is introduced in the other arm (the reference beam) to make
149
+ it incoherent with the object beam. This is realized with a piezoelectric actuator driven by a RAMP.
150
+ Light is combined on the PBS. Object and the reference planes are imaged onto two regions of an
151
+ Intensified sCMOS (I-sCMOS) (26) camera with a 4f system using lenses L1 and L2. After the PBS,
152
+ the object and the reference beams are distinguishable due to their orthogonal polarization. In order to
153
+ observe interference we rotate their polarization by 45 degrees with a half-wave plate and we perform the
154
+ projective measurement in the original bases with a calcite crystal. This mixes the light from both outputs
155
+ and allows us to observe interference in both outputs of the beam splitter. The visibility is reduced due
156
+ to imperfect imaging because of the path length difference in the calcite. In order to register very low
157
+ photon flux and to minimize exposure time to circumvent fluctuations, we use an Intensified-sCMOS
158
+ camera. We collect the data with a frame rate of 200 Hz. choosing a low exposure time Texp ∼ ns
159
+ allows performing measurement under phase fluctuations with frequency up to (fn ∼ 1/Texp) tens of
160
+ MHz.
161
+ Results
162
+ Data measured in our experiment consist of an average of 15 photons at both outputs of the interferome-
163
+ ter per frame. We remove temporal correlations between subsequent frames by randomly permuting the
164
+ order of frames before further processing—this process does not change the performance of our method
165
+ but allows us to simulate the conditions, in which the global phase fluctuates faster than the camera
166
+ frame rate. In such conditions, it is impossible to retrieve phases using standard interferometric methods.
167
+ Averaging recorded intensities over multiple frames or increasing measurement time would result in a
168
+ loss of the visibility of the interference fringes. In contrast, we average correlations of detected pho-
169
+ tons’ positions without any loss of the phase information. Such averaging over multiple frames results
170
+ in the reproduction of the correlation function (Eq.3), from which we can retrieve the phase profile us-
171
+ ing the standard digital holography method, Fourier off-axis holography (27). The correlation function
172
+ is measured from the coincidence map of the detected photons’ positions. This analyzing mechanism
173
+ is the essence of our noise-resistant phase imaging technique. 1D quadratic phase measurement intro-
174
+ duced by the cylindrical lens is shown in Fig. 3. The measured coincidence map (Fig. 3(a)) consists of
175
+ approximately 107 registered photon pairs with the mean number of coincidences per pixel as 100. We
176
+ estimate the phase profile shape using the collected data, and compute the Mean Squared Error (MSE)
177
+ between the measured and real value. As we show in Fig. 3(c), the MSE drops down with the total
178
+ number of measured photons, and eventually reaches the theoretical minimum, obtained with the help of
179
+ the Cram´er-Rao bound (see Supplement 2 for details). This proves, that our method of phase estimation
180
+ is optimal when at most two photons are measured during the phase stability time—notice, that this is
181
+ the most extreme limit in which one can gain any information about the phase profile.
182
+ SLM-encoded phase measurements shown in Fig. 4(a), (b), and (c) represent the measured hologram,
183
+ the retrieved phase, and the error per pixel respectively when the sinusoidal phase is applied. Similarly,
184
+ Figs. 4(e), (f), and (g) represent the measured hologram, the retrieved phase, and the error per pixel re-
185
+ spectively when the 1D exponential phase is applied. Errors in the retrieved respective phases (Fig. 4(c),
186
+ Fig. 4(g)) are due to a finite number of pixels on the SLM and discreet values of the displayed phases.
187
+ 5
188
+
189
+ a
190
+ b
191
+ c
192
+ Figure 3:
193
+ (a) represents the measured coincidence map for a 1D quadratic phase profile, plotted with
194
+ a solid line in (b). The reconstructed phase with error bars is also shown in (b). The visibility of
195
+ the fringes in the correlation map (a) is equal to 0.62/2 (theoretical maximum with classical light is
196
+ 1/2). The total number of coincidences detected in the experiment is ∼ 107. By randomly removing
197
+ a part of the collected signal, we can check how the Mean Squared Error (MSE) associated with the
198
+ phase reconstruction scales with the mean number of photons detected in one pixel during the whole
199
+ experiment (c). The MSE from the experiment is then compared with the MSE obtained using simulated
200
+ hologram, with the same parameters as in the experiment. We calculate the fundamental Cram´er-Rao
201
+ (C-R) lower bound on the MSE, assuming the visibility of hologram fringes to be equal to 0.62/2 (as in
202
+ our experiment). When no noise apart from shot noise is present (as in simulation), our method allows to
203
+ saturate this fundamental limit for large enough (∼ 5 · 104) number of photons detected per pixel. Other
204
+ possible sources of noise (e.g.) dark counts may slightly affect the MSE obtained experimentally.
205
+ Here we show that it is possible to retrieve complete phase profiles only with an average of two photons
206
+ detected per frame which is an absolute minimum of detected photons per frame.
207
+ Conclusion and Discussion
208
+ In conclusion, we demonstrate a complete retrieval of phase patterns in the presence of high-frequency
209
+ random phase fluctuations up to the order of tens of MHz when standard phase imaging techniques
210
+ fail due to the scarcity of photons within a stable phase time interval. Our method is applicable to light
211
+ sources described with different statistics, such as for example thermal light sources, and can be extended
212
+ to interference between independent sources (21,28).
213
+ 6
214
+
215
+ Figure 4: Experimental measurement of the spatial phases with the SLM - spatial light modulator. Mea-
216
+ sured coincidence maps (correlation functions) between outputs of the interferometer for (a) sinusoidal,
217
+ and (d) exponential phases set on SLM. Each axis of coincidence maps represents the positions of pho-
218
+ tons detected along one output of the interferometer. (b) and (e) represent the aforementioned recon-
219
+ structed phases. (c) and (f) show errors and square-root of the intensities.
220
+ 7
221
+
222
+ We want to highlight, that the presented method optimality is proven using the Cramer-Rao bound –
223
+ all the spatial phase information stored in the detected photons is retrieved (29).
224
+ High temporal resolution (short gating time) is necessary for overcoming the problem of the rapidly
225
+ fluctuating temporal phases. Such high temporal resolution in our experiment was obtained using an
226
+ image-intensified camera, which allows us to collect data with short exposure times down to a few
227
+ nanoseconds. However, our method is not limited to this camera type and can be implemented using
228
+ various high-temporal resolution detection platforms. Because of high quantum efficiency, temporal
229
+ resolution, and low noise level in recent single-photon avalanche diode (SPAD) array technology (30)
230
+ development, our method can also be implemented by SPAD arrays in the future. We stress that the tech-
231
+ nique can be implemented both in the photon counting regime and by employing less accurate intensity
232
+ measurements, yet it is the most remarkable for cases where registering more than two photons per phase
233
+ stability time is rare. Our method can be readily generalized to two-dimensional spatial phase profiles
234
+ by creating higher-dimensional correlation maps. It also allows for implementation in different degrees
235
+ of freedom, such as temporal or spectral, allowing the creation of joint probability maps both for photon
236
+ detection times or their detected wavelengths. It is also possible to incorporate an additional degree of
237
+ freedom to a measurement, measuring for instance joint temporal-spatial correlations maps.
238
+ Additionally, this method could be expanded for different situations, in which multiple photons are
239
+ detected or photons are registered at the same output. Each pair of photons can be treated as a separate
240
+ coincidence, so the number of coincidences scales with a number of detected photons n as
241
+ �n
242
+ 2
243
+ �. We can
244
+ also create such coincidence maps for multiple photons within each of the interferometer outputs as well
245
+ as between them. Such holograms can build up much faster and shorten measurement time while the
246
+ physics behind them is the same.
247
+ This method is only valid when all values of the global phase Θ have the same probability of ap-
248
+ pearing during the time interval in which the whole measurement is performed. To satisfy this condition
249
+ for arbitrary temporal phase noise, it is enough to add random uniformly distributed signal oscillating
250
+ between 0 and 2π to the unknown global phase fluctuations Θ(t). In fact, the added noise can be much
251
+ slower than the rate of phase global phase fluctuations Θ(t).
252
+ Our method opens up possible applications in wavefront sensing under low light conditions for mi-
253
+ croscopy as well as fundamental research. Unbalanced interferometers, such as ones used in the time–
254
+ bin encoding could be of particular interest, as our method enables using additional degrees of freedom
255
+ (multi-dimensional information encoding) while filtering out phase fluctuations arising, for instance,
256
+ from unmatched optical paths. In addition, because of the shorter wavelengths of X-rays (also of neu-
257
+ trons or electrons), X-ray interferometry (31,32) requires much tighter alignment and better mechanical
258
+ stability of the interferometer. We emphasize that because our technique is phase noise resistant, it holds
259
+ a potential for phase-sensitive imaging using X-ray interferometry. In addition, analogous techniques
260
+ might also find applications in matter-wave interferometry (33,34).
261
+ Acknowledgments
262
+ We acknowledge discussions with Piotr Wegrzyn, Lukasz Zinkiewicz, Michal Jachura, Wojciech Wasilewski,
263
+ and Marek Zukowski. This work was supported by the Foundation for Polish Science under the FIRST
264
+ TEAM project ’Spatiotemporal photon correlation measurements for quantum metrology and super-
265
+ resolution microscopy’ co-financed by the European Union under the European Regional Development
266
+ 8
267
+
268
+ Fund (POIR.04.04.00-00-3004/17-00), and by the National Laboratory for Photonics and Quantum Tech-
269
+ nologies—NLPQT (POIR.04.02.00.00-B003/18).
270
+ Supplementary materials
271
+ S1 - Fundamental precision limits of phase imaging with fluctuating reference arm
272
+ S2 - Experimental setup details
273
+ References
274
+ 1. Y. Park, C. Depeursinge, G. Popescu, Nature Photonics 12, 578 (2018).
275
+ 2. G. Popescu, T. Ikeda, R. R. Dasari, M. S. Feld, Optics Letters 31, 775 (2006).
276
+ 3. Z. Wang, et al., Optics Express 19, 1016 (2011).
277
+ 4. P. Hariharan, Ed. 2, Optical Interferometry (Academic Press, 2003).
278
+ 5. T. Ikeda, G. Popescu, R. R. Dasari, M. S. Feld, Optics Letters 30, 1165 (2005).
279
+ 6. D. Huang, et al., Science 254, 1178 (1991).
280
+ 7. M. Sticker, C. K. Hitzenberger, R. Leitgeb, A. F. Fercher, Optics Letters 26, 518 (2001).
281
+ 8. E. Cuche, F. Bevilacqua, C. Depeursinge, Optics Letters 24, 291 (1999).
282
+ 9. E. Hecht, Ed. 5, Chapter 9, Optics (Pearson Education Limited, 2017).
283
+ 10. G. Popescu, Quantitative Phase Imaging of Cells and Tissues (McGraw-Hill, New York, 2011).
284
+ 11. G. Magyar, L. Mandel, Nature 198, 255 (1963).
285
+ 12. L. A. Rozema, et al., Phys. Rev. Lett. 112, 223602 (2014).
286
+ 13. P. A. Morris, R. S. Aspden, J. E. C. Bell, R. W. Boyd, M. J. Padgett, Nature Communications 6
287
+ (2015).
288
+ 14. R. J. Collier, C. B. Burckhardt, L. H. Lin, Optical Holography (Academic, 1971).
289
+ 15. J. Szuniewicz, et al., Rochester Conference on Coherence and Quantum Optics (CQO-11) (OSA,
290
+ 2019).
291
+ 16. R. Hanbury Brown, R. Q. Twiss, Nature 177, 27 (1956).
292
+ 17. C. K. Hong, Z. Y. Ou, L. Mandel, Physical Review Letters 59, 2044 (1987).
293
+ 18. Z. Y. Ou, E. C. Gage, B. E. Magill, L. Mandel, Journal of the Optical Society of America B 6, 100
294
+ (1989).
295
+ 9
296
+
297
+ 19. T. B. Pittman, J. D. Franson, Physical Review Letters 90 (2003).
298
+ 20. R. L. Pfleegor, L. Mandel, Physical Review 159, 1084 (1967).
299
+ 21. J. G. Rarity, P. R. Tapster, R. Loudon, Journal of Optics B: Quantum and Semiclassical Optics 7,
300
+ S171 (2005).
301
+ 22. X. Li, L. Yang, L. Cui, Z. Y. Ou, D. Yu, Optics Express 16, 12505 (2008).
302
+ 23. A. J. Bennett, R. B. Patel, C. A. Nicoll, D. A. Ritchie, A. J. Shields, Nature Physics 5, 715 (2009).
303
+ 24. Y.-S. Kim, O. Slattery, P. S. Kuo, X. Tang, Physical Review A 87 (2013).
304
+ 25. R. Chrapkiewicz, M. Jachura, K. Banaszek, W. Wasilewski, Nature Photonics 10, 576 (2016).
305
+ 26. R. Chrapkiewicz, W. Wasilewski, K. Banaszek, Optics Letters 39, 5090 (2014).
306
+ 27. J. Mertz, Introduction to Optical Microscopy (Roberts and Company Publishers, 2009.).
307
+ 28. H. Paul, Rev. Mod. Phys. 58, 209 (1986).
308
+ 29. H. Cram´er, Mathematical methods of statistics, vol. 26 (Princeton university press, 1999).
309
+ 30. I. M. Antolovic, C. Bruschini, E. Charbon, Optics Express 26, 22234 (2018).
310
+ 31. I. Zanette, T. Weitkamp, T. Donath, S. Rutishauser, C. David, Physical Review Letters 105 (2010).
311
+ 32. T. Weitkamp, B. N¨ohammer, A. Diaz, C. David, E. Ziegler, Applied Physics Letters 86, 054101
312
+ (2005).
313
+ 33. E. M. Rasel, M. K. Oberthaler, H. Batelaan, J. Schmiedmayer, A. Zeilinger, Physical Review Letters
314
+ 75, 2633 (1995).
315
+ 34. M. Arndt, A. Ekers, W. von Klitzing, H. Ulbricht, New Journal of Physics 14, 125006 (2012).
316
+ 10
317
+
318
+ Supplementary materials and methods
319
+ 1
320
+ S1: Fundamental precision limits of phase imaging with
321
+ fluctuating reference arm
322
+ 1.1
323
+ The measurement model
324
+ Two cameras are set on the two outputs of the interferometer, each of them consists of
325
+ the same number of pixels npix. The sample area giving the additional phase φi is imaged
326
+ to the pixel number i on both cameras. Only two photons are received per the stability
327
+ time of the interferometer phase. A single measurement consists of a detection of these
328
+ two photons. The output of the single measurement is a pair (i+/−, j+/−). Numbers i, j
329
+ stand for the numbers of pixels in which photons were detected, whereas indices + or
330
+ − indicates in which of the two outputs the corresponding photon was measured. The
331
+ probability of measuring a single photon in a pixel i+/− is:
332
+ p(i+/−) = ˜NIi
333
+ 1
334
+ 2(1 ± v cos(φi + θ)),
335
+ (1)
336
+ where ˜N is a normalization factor, v is interferometer visibility, θ is an extra, global,
337
+ fluctuating phase, and Ii is the intensity of the illuminating the phase mask in the are
338
+ corresponding to pixel i. Phase θ is stable during the detection of each photon pair,
339
+ its value for each pair is independently drawn from the continuous uniform probability
340
+ distribution U(0, 2π). There is no information about θ value in each experiment, so the
341
+ observed probability of obtaining pair (i+/−, j+/−) in every single frame is:
342
+ p(i+/−, j+/−) =
343
+ � 2π
344
+ 0
345
+ p(i+/−, j+/−, θ)dθ
346
+ (2)
347
+ p(i+/−, j+/−, θ) is a joint probability distribution of measuring pair (i+/−, j+/−) with the
348
+ fixed value of θ. From equation 2 we obtain the formulas:
349
+ p(i+, j+) = p(i−, j−) = NIiIj(2 + v2 cos(φi − φj))
350
+ (3)
351
+ p(i+, j−) = p(i−, j+) = NIiIj(2 − v2 cos(φi − φj))
352
+ (4)
353
+ N is a new normalization factor. The above equations are our starting point to further
354
+ inference about the maximal precision of the measurement. Full information about each
355
+ single measurement is included in the dependendence of the probability p of the specific
356
+ result of a measurement (i±, j±) on the estimated parameters φi.
357
+ 1
358
+ arXiv:2301.11969v1 [physics.optics] 27 Jan 2023
359
+
360
+ 1.2
361
+ Cramér-Rao bound
362
+ In order to calculate maximal precision of estimation of the parameters φi, Fisher Infor-
363
+ mation (FI) matrix will be calculated. There are 4 different types of events, which can
364
+ occur during one experiment - two photons may be detected in one output (+ or −) or
365
+ in different outputs ( we distinguish between +− and −+). We can distinguish between
366
+ these 4 types, so the FI is the sum of FI matrices for all events’ types:
367
+ Ftot = F++ + F−− + F+− + F−+
368
+ (5)
369
+ From equations 3 and 4 we can simply conclude, that F++ = F−− and F+− = F−+. In
370
+ the next part of the article F++ matrix will be calculated.
371
+ In order to simplify the formulas, the following notation will be used:
372
+ p(i+, j+) ≡ p(i, j),
373
+
374
+ ∂φk
375
+ ≡ ∂k,
376
+ F ≡ F++
377
+ The element of the FI matrix can be written in the following form:
378
+ Fkl =
379
+ npix
380
+
381
+ i,j=1
382
+ ∂kp(i, j)∂lp(i, j)
383
+ p(i, j)
384
+ ,
385
+ (6)
386
+ Subsequently:
387
+ ∂kp(i, j) = NIiIjv2(δjk − δik) sin(φi − φj)
388
+ (7)
389
+ ∂kp(i, j)∂lp(i, j) = (δjk − δik)(δjl − δil)N2I2
390
+ i I2
391
+ j v4 sin2(φi − φj)
392
+ (8)
393
+ Consequently, the matrix element is:
394
+ Fkl =
395
+ npix
396
+
397
+ i,j=1
398
+ (δjk − δik)(δjl − δil)NIiIjv4 sin2(φi − φj)
399
+ 2 + v2 cos(φi − φj)
400
+ (9)
401
+ If k ̸= l, then for any m we have δmkδml = 0, so (δjk − δik)(δjl − δil) = −δjkδil − δikδjl.
402
+ That means, that non-diagonal matrix elements are:
403
+ Fkl = −2NIkIlv4 sin2(φk − φl)
404
+ 2 + v2 cos(φk − φl)
405
+ ,
406
+ k ̸= l
407
+ (10)
408
+ With the help of the equality (δjk − δik)2 = δjk + δik − 2δikδjk we can obtain diagonal
409
+ terms of F:
410
+ Fkk = 2NIkv4
411
+ npix
412
+
413
+ i=1
414
+ Ii sin2(φi − φk)
415
+ 2 + v2 cos(φi − φk)
416
+ (11)
417
+ For any function f:
418
+ npix
419
+
420
+ i=1
421
+ f(φi, Ii) = npix⟨f(φi, Ii)⟩i,
422
+ (12)
423
+ 2
424
+
425
+ where ⟨f(φi, Ii)⟩i is the mean value of the function over all pixels. In the next steps,
426
+ the number of pixels is assumed to be big and each phase in the sample occurs with the
427
+ same frequency. What is more, intensity of illuminating beam Ii is assumed to change
428
+ slowly compared to the change of phase φi. In other words, many different phases occur
429
+ in the region with approximately constant intensity. From these assumptions we obtain
430
+ the equality:
431
+ ⟨f(φi, Ii)⟩i = 1
432
+
433
+ � 2π
434
+ 0
435
+ f(φ, ⟨I⟩)dφ,
436
+ (13)
437
+ where ⟨I⟩ stands for the mean intensity of the illuminating beam.
438
+ Using the above
439
+ assumptions, we can rewrite equation 11 as:
440
+ Fkk = 2NIk⟨I⟩v4 npix
441
+
442
+ � 2π
443
+ 0
444
+ sin2(φ − φk)
445
+ 2 + v2 cos(φ − φk)dφ
446
+ (14)
447
+ Consequently all diagonal terms of F are the same:
448
+ Fkk = 2N⟨I⟩Iknpix(2 −
449
+
450
+ 4 − v4)
451
+ (15)
452
+ Now we need to calculate the value of a normalization factor N. We will use the fact,
453
+ that sum of propabilities of all events must be equal to one:
454
+ npix
455
+
456
+ i,j=1
457
+ p(i+, j+) + p(i+, j−) + p(i−, j+) + p(i−, j−) = 1
458
+ (16)
459
+ Using equations 3 and 4 we obtain:
460
+ 8N
461
+ npix
462
+
463
+ i,j=1
464
+ IiIj = 1
465
+ (17)
466
+ We can rewrite the sum in the above equation as:
467
+ npix
468
+
469
+ i,j=1
470
+ IiIj =
471
+ �npix
472
+
473
+ i=1
474
+ Ii
475
+ �2
476
+ = n2
477
+ pix⟨I⟩2
478
+ (18)
479
+ and obtain:
480
+ N =
481
+ 1
482
+ 8n2
483
+ pix⟨I⟩2
484
+ (19)
485
+ Finally F++ matrix can be written in the form:
486
+ Fkl =
487
+
488
+
489
+
490
+
491
+
492
+
493
+
494
+ 1
495
+ 4npix
496
+ Ik
497
+ ⟨I⟩(2 −
498
+
499
+ 4 − v4)
500
+ for k = l
501
+
502
+ 1
503
+ 4n2
504
+ pix
505
+ IkIl
506
+ ⟨I⟩2
507
+ 2v4 sin2(φk−φl)
508
+ 2+v2 cos(φk−φl)
509
+ for k ̸= l
510
+ (20)
511
+ 3
512
+
513
+ We have calculated F++ matrix, which is obviously similar to F−− matrix, because
514
+ formulas for propabilities in both cases are the same. Analogous calculation show, that
515
+ also F+− = F−+ = F++. Using the FI additivity we obtain the terms of Ftot matrix:
516
+ Ftot = 4F++
517
+ (21)
518
+ This is the FI matrix for a single measurement. If the whole experiment consists of nmes
519
+ independent repetitions of the single measurement, we obtain the FI:
520
+ F (nmes)
521
+ tot
522
+ = 4nmesF++ = 2nphotF++,
523
+ (22)
524
+ where nphot stands for the total number of measured photons during the experiment. In
525
+ the next part F stands for the whole FI associated with detection of nphot number of
526
+ photons. Terms of this matrix are:
527
+ Fkl =
528
+
529
+
530
+
531
+
532
+
533
+
534
+
535
+ nphot
536
+ npix
537
+ Ik
538
+ ⟨I⟩(1 −
539
+
540
+ 1 − v4/4)
541
+ for k = l
542
+ − nphot
543
+ 2n2
544
+ pix
545
+ IkIl
546
+ ⟨I⟩2
547
+ 2v4 sin2(φk−φl)
548
+ 2+v2 cos(φk−φl)
549
+ for k ̸= l
550
+ (23)
551
+ From the Cramer-Rao bound, the minimal possible variance for estimating φk satisfy
552
+ the inequality:
553
+ ∆2φk ≥ (F −1)kk
554
+ (24)
555
+ In general, the estimator which satisfy the above inequality may not exist, however, it is
556
+ possible to get arbitrary close to the above bound if the number of measurement is big
557
+ enough. That means, that the inequality becomes an equality if nphot → ∞. To simplify
558
+ the calculations we also use the inequality:
559
+ (F −1)kk ≥ (Fkk)−1,
560
+ (25)
561
+ which is true for all hermitian F. It’s clear, that in the general case the above inequality is
562
+ not saturable. However, in our case the non-diagonal terms are asymptotically npix times
563
+ smaller than diagonal terms. npix is also size of the F matrix. It may be proven, that for
564
+ such scaling of non-diagonal terms with the size of matrix, the above inequality becomes
565
+ saturable for npix → ∞. Using both of above inequalities, we obtain the following bound:
566
+ ∆φk ≥
567
+
568
+ npix⟨I⟩
569
+ nphotIk
570
+ 1
571
+
572
+ 1 −
573
+
574
+ 1 − v4/4
575
+ (26)
576
+ The value nk = nphotIk
577
+ npix⟨I⟩ may be interpreted as the expected value of photons detected in
578
+ pixel number k ( in any output). The above bound may be rewritten in the intuitive
579
+ form:
580
+ ∆φk ≥
581
+
582
+ 1
583
+ nk
584
+ 1
585
+
586
+ 1 −
587
+
588
+ 1 − v4/4
589
+ (27)
590
+ From this form of the inequality it’s clear, that the accuracy of measuring the value of the
591
+ particular phase depends directly on the numer of photons interacting with the measured
592
+ area.
593
+ 4
594
+
595
+ 1.3
596
+ Comparison with long-stability-time interferometer
597
+ Let’s compare our result with the phase estimation precision limit for an interferometer
598
+ with slowly fluctuating phase θ. First of all, let’s notice that we can’t beat the accuracy
599
+ achievable in the situation, in which extra phase θ is known for all the detected photons.
600
+ Indeed, the information we get in a situation with unknown θ is always smaller, even
601
+ if the stability time if the interferometer is bigger. If θ values are known, each single
602
+ photon detection could be treated as an independent event (which was not the case in
603
+ the previous section). Let’s calculate the FI matrix for the single photon detection when
604
+ θ is fixed. Single measurement is fully described by the probability distribution from
605
+ equation 1. Further we obtain:
606
+ ∂kp(i+/−) = ∓1
607
+ 2δki ˜NIiv sin(φi + θ)
608
+ (28)
609
+ In this case FI matrix has the form:
610
+ Fkl =
611
+ npix
612
+
613
+ i=1
614
+ ∂kp(i+)∂lp(i+)
615
+ p(i+)
616
+ +
617
+ npix
618
+
619
+ i=1
620
+ ∂kp(i−)∂lp(i−)
621
+ p(i−)
622
+ (29)
623
+ From equation 28 it’s clear, that all non-diagonal terms of the F matrix are equal to
624
+ zero. This is because we obtain information about the φi phase only in case of detection
625
+ a photon in the pixel i+/−. The diagonal terms are:
626
+ Fkk = ˜NIi
627
+ v2 sin2(φi + θ)
628
+ 1 − v2 cos2(φi + θ)
629
+ (30)
630
+ To make this case similar to the case descriped in the previous section let’s assume, that
631
+ θ fluctuates and each value of θ appears with the same frequency ( the difference is that θ
632
+ fluctuates slowly and we know it’s value). Then the mean FI for the single measurement
633
+ is:
634
+ ⟨Fkk⟩θ = 1
635
+
636
+ � 2π
637
+ 0
638
+ Fkkdθ =
639
+ Ii
640
+ npix⟨I⟩
641
+
642
+ 1 −
643
+
644
+ 1 − v2
645
+
646
+ ,
647
+ (31)
648
+ where formula ˜N =
649
+ 1
650
+ npix⟨I⟩ obtained from the normalization condition was used. If nmes
651
+ measurements were made, nphot photons were consumed. If we define nk = nphotIk
652
+ npix⟨I⟩ as in
653
+ the previous section, we obtain the best possible accuracy of measuring each phase φk:
654
+ ∆φk ≥
655
+
656
+ 1
657
+ nk
658
+ 1
659
+
660
+ 1 −
661
+
662
+ 1 − v2
663
+ (32)
664
+ Equation 32 is very similar to the equation 27- the only difference is that term v4
665
+ 4 is
666
+ substitude by the term v2. That means, that having only two photons per phase fluc-
667
+ tuations stability time, leads to decrease of the effective visibility of the interferometer
668
+ from v to v2
669
+ 2 . As it was mentioned, it’s not possible to beat the bound from equation
670
+ 32 if θ value is not known in each measurement, even if the number of detected photons
671
+ 5
672
+
673
+ in a phase stability time was increased. However, we can get close to that bound, if the
674
+ phase stability time is big enough. Indeed, if we can measure many photons, when θ is
675
+ stable, we don’t really need to care about its unknown value and obtain relative values
676
+ of φk using the same method as in case of known θ ( it might be assumed to equal 0).
677
+ This scheme is repeated independently for each θ . The bound from the equation 30 is
678
+ saturated, because the number of measurements is big enough. That means, that we can
679
+ also saturate the bound resulting from the mean FI (equation 32).
680
+ 2
681
+ S2: Experimental setup details
682
+ This is a polarization-based Michelson interferometer. As a light source, we use a diode
683
+ laser at a wavelength of 780 nm coupled to a single-mode fiber. At the output of the
684
+ fiber, for polarization control, the attenuated beam passes through a half-wave plate, a
685
+ quarter-wave plate, and polarizing beam splitter (PBS), and another half-wave plate, and
686
+ then enters a Michelson-type interferometer. Each of the two paths in the interferometer
687
+ is encoded with orthogonal polarization. In order to prepare the object beam, in one
688
+ of the arms of the interferometer, we build two kinds of slightly modified setups - one
689
+ with a cylindrical lens placed in front of one of the mirror in the horizontally polarized
690
+ light beam path in the Michelson interferometer while in the other setup we replace
691
+ the mirror in the same path with a spatial light modulator (SLM), thereby introducing
692
+ spatially varying phase φ(x) onto the beam in that path. In one arm the spatial phase
693
+ φ(x) is introduced next to the surface of the interferometer mirror. The interferometeric
694
+ mirror in the other arm is given a phase fluctuation by attaching it to a piezoelectric
695
+ actuator.
696
+ We perform experiments with three kinds of different phase masks applied to our
697
+ object beam. Our first configuration is to imprint a one dimensional quadratic local phase
698
+ profile to the beam by placing a cylindrical lens of focal length, f = 1000 mm in proximity
699
+ to the mirror (Fig. 2 in the main text). Additionally, in our second configuration with
700
+ SLM (from the HOLOEYE PLUTO) as a phase mask, we can display any arbitrary phase
701
+ profile. As an example, we imprint one dimensional exponential and sinusoidal phases
702
+ to our object beam by the SLM display.
703
+ We introduce a time-dependent phase fluctuation is in the other arm (the reference
704
+ beam - vertically polarized beam path in the interferometer) to make it incoherent with
705
+ the object beam. This is realized with a piezoelectric actuator driven by a RAMP of 1.234
706
+ Hz. This shouldn’t be confused with the maximal noise frequency for which our method
707
+ works. Both of the object and reference beams are combined on the polarizing beam
708
+ splitter (PBS). Afterthat, they are imaged onto two regions of an Intensified sCMOS
709
+ (I-sCMOS - with the image intensifier from Hamamatsu V7090D-71-G272 and sCMOS
710
+ from Andor Zyla) camera with a 4f system using lenses L3 and L4 of focal length 200 mm.
711
+ To observe the interference, the orthogonally polarized object and the reference beam
712
+ are required to be indistinguishable, and to do so, we rotate the polarization of both
713
+ beams by 45 degrees with a half-wave plate and we perform projective measurement in
714
+ the original bases with a calcite crystal. Here, the calcite acts as a 50/50 Beamsplitter.
715
+ 6
716
+
717
+ I-sCMOS
718
+ Camera
719
+ Calcite
720
+ λ/2
721
+ λ/4
722
+ Laser
723
+ PBS
724
+ λ/2
725
+ L2
726
+ PH
727
+ M
728
+ L1
729
+ λ/4
730
+ λ/4
731
+ PBS
732
+ λ/2
733
+ Delay line
734
+ M
735
+ SLM
736
+ L3
737
+ L4
738
+ M
739
+ M
740
+ Figure 1:
741
+ Experimental setup for noise-resistant phase imaging. The incoming beam
742
+ of Laser after passing through a λ/2 - half-wave plate, λ/4 - quarter wave plate, PBS
743
+ - polarization beam splitter, and another λ/2 plate, the beam enters a Michelson type
744
+ interferometer. Each of the two paths in the interferometer is encoded with orthogonal
745
+ polarization. In one arm the spatial phase φ(x) is introduced by the spatial light modu-
746
+ lator (SLM). The interferometric mirror in the other arm is given a phase fluctuation by
747
+ attaching it to a piezoelectric actuator. The two beams of the interferometric arms after
748
+ combining at the PBS pass through L3, and L4 lenses. The calcite polarizer acts as a
749
+ 50/50 Beamsplitter. The I-sCMOS - Intensified sCMOS camera records single photons
750
+ at both outputs of the interferometer. The use of short exposure time of the I-sCMOS,
751
+ in the single nanosecond timescale, gives it stability and resistance against fluctuations
752
+ up to tens of MHz..
753
+ This mixes the light from both outputs and allows us to observe interference in both
754
+ outputs of the splitter. The I-sCMOS camera records single photons at both outputs
755
+ of the interferometer. The use of short exposure time of the I-sCMOS, in the single
756
+ nanosecond timescale, gives it stability and resistance against fluctuations up to tens of
757
+ MHz. We collect the data with 200 Hz of frame rate.
758
+ 7
759
+
8dFLT4oBgHgl3EQfBS4r/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
A9AzT4oBgHgl3EQf__9t/content/tmp_files/2301.01956v1.pdf.txt ADDED
@@ -0,0 +1,1542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ High-level semantic feature matters few-shot unsupervised domain adaptation
2
+ Lei Yu1, Wanqi Yang1*, Shengqi Huang1, Lei Wang2, Ming Yang1
3
+ 1School of Computer and Electronic Information, Nanjing Normal University, China
4
+ 2School of Computing and Information Technology, University of Wollongong, Australia
5
6
+ Abstract
7
+ In few-shot unsupervised domain adaptation (FS-UDA), most
8
+ existing methods followed the few-shot learning (FSL) meth-
9
+ ods to leverage the low-level local features (learned from con-
10
+ ventional convolutional models, e.g., ResNet) for classifica-
11
+ tion. However, the goal of FS-UDA and FSL are relevant yet
12
+ distinct, since FS-UDA aims to classify the samples in target
13
+ domain rather than source domain. We found that the local
14
+ features are insufficient to FS-UDA, which could introduce
15
+ noise or bias against classification, and not be used to effec-
16
+ tively align the domains. To address the above issues, we aim
17
+ to refine the local features to be more discriminative and rele-
18
+ vant to classification. Thus, we propose a novel task-specific
19
+ semantic feature learning method (TSECS) for FS-UDA.
20
+ TSECS learns high-level semantic features for image-to-class
21
+ similarity measurement. Based on the high-level features, we
22
+ design a cross-domain self-training strategy to leverage the
23
+ few labeled samples in source domain to build the classi-
24
+ fier in target domain. In addition, we minimize the KL diver-
25
+ gence of the high-level feature distributions between source
26
+ and target domains to shorten the distance of the samples be-
27
+ tween the two domains. Extensive experiments on Domain-
28
+ Net show that the proposed method significantly outperforms
29
+ SOTA methods in FS-UDA by a large margin (i.e., ∼ 10%).
30
+ keywords
31
+ Few-shot unsupervised domain adaptation, image-to-class
32
+ similarity, high-level semantic features, cross-domain self-
33
+ training, cross-attention.
34
+ Introduction
35
+ Currently, a setting namely few-shot unsupervised domain
36
+ adaptation (FS-UDA) (Huang et al. 2021)(Yang et al. 2022),
37
+ which utilizes few labeled data in source domain to train
38
+ a model to classify unlabeled data in target domain, owns
39
+ its potential feasibility. Typically, a FS-UDA model could
40
+ learn general knowledge from base classes during training
41
+ to guide classification in novel classes during testing. It is
42
+ known that both insufficient labels in source domain and
43
+ large domain shift make FS-UDA as a challenging task.
44
+ Previous studies, e.g., IMSE (Huang et al. 2021), first fol-
45
+ lowed several few-shot learning (FSL) methods (Li et al.
46
+ *The corresponding author is Wanqi Yang.
47
+ Copyright © 2023, Association for the Advancement of Artificial
48
+ Intelligence (www.aaai.org). All rights reserved.
49
+ Figure 1: A 5-way 1-shot task for FS-UDA where the sup-
50
+ port set includes five classes and one sample for each class.
51
+ The figure shows the similarity of query images to every
52
+ support classes and the spatial similarity of query images
53
+ to the predicted support class. We found using local fea-
54
+ tures could cause some inaccurate regions of query images
55
+ to match the incorrect classes, while our semantic features
56
+ make the object region in query images similar with their
57
+ true class, thus achieving correct classification.
58
+ 2019)(Tzeng et al. 2017) to learn the local features by us-
59
+ ing convolutional models (e.g., ResNet) and then leveraged
60
+ them to learn image-to-class similarity pattern for classifica-
61
+ tion. However, we wish to clarify that the goal of FS-UDA
62
+ and FSL are relevant yet distinct, since both of them suf-
63
+ fer from insufficient labeled training data whereas FS-UDA
64
+ aims to classify the samples in target domain rather than
65
+ source domain. As shown in Fig. 1, by visualizing the spatial
66
+ similarity of query images to predicted support classes, we
67
+ found using local features causes the inaccurate regions of
68
+ query images to match incorrect classes. This reason might
69
+ be that few labeled samples and large domain shift between
70
+ the support and query sets simultaneously result in the con-
71
+ ventional local features in FSL to fail in classification. In this
72
+ sense, the local features are insufficient to FS-UDA, which
73
+ could introduce noise or bias against the classification in tar-
74
+ get domain and not be used to effectively align the domains.
75
+ To address this issue, we aim to refine the low-level local
76
+ arXiv:2301.01956v1 [cs.CV] 5 Jan 2023
77
+
78
+ support set in the source domain (sketch)
79
+ sailboat
80
+ bed
81
+ glasses
82
+ television
83
+ snowman
84
+ query set in the target domain (clipart)
85
+ local features
86
+ semantic features (ours)
87
+ 0.6
88
+ 0.6
89
+ 0.5
90
+ 0.5
91
+ 0.4
92
+ 0.4
93
+ 0.3
94
+ 0.3
95
+ 0.2
96
+ 0.2
97
+ 0.1
98
+ bed
99
+ local features
100
+ semantic features (ours)
101
+ 0.6
102
+ 0.6
103
+ 0.5
104
+ 0.5
105
+ 0.4
106
+ 0.4
107
+ 0.3
108
+ 0.3
109
+ 0.2
110
+ 0.2
111
+ 0.1
112
+ television
113
+ as
114
+ saFigure 2: Illustration of the process for cross-domain self-
115
+ training in TSECS. Different shapes represent different do-
116
+ mains. We first select the ‘confidence’ target samples (e.g.,
117
+ a) that are very similar to support classes, and then regard
118
+ them as the new class prototypes to further classify the other
119
+ target samples (e.g., b, c). This process is executed itera-
120
+ tively with using class matching loss to narrow the distance
121
+ of query images and their most similar support classes.
122
+ features to be more discriminative and relevant to classifica-
123
+ tion, i.e., high-level semantic features, and meanwhile align
124
+ the semantic features for domain adaptation. Therefore,
125
+ we propose a novel task-specific semantic feature method
126
+ (TSECS) that learns the semantic features for each task by
127
+ clustering the local features of support set and query set. To
128
+ obtain the related semantics from previous tasks, the cluster
129
+ centroids of the current task are then fused by cross-attention
130
+ with that of the previous task to generate high-level semantic
131
+ features to boost classification performance.
132
+ Moreover, for the domain shift between source and tar-
133
+ get domains, many domain adaptation methods (Saito et al.
134
+ 2018)(Tzeng et al. 2017)(Tzeng et al. 2014) reduced the dis-
135
+ tribution discrepancy between domains by using a discrim-
136
+ inator to adverse against feature embedding. However, this
137
+ way could fail in aligning the samples of the same class be-
138
+ tween domains due to label missing in target domain, which
139
+ could make the classes of two domains mismatched and thus
140
+ affect the classification. Therefore, we aim to align the high-
141
+ level semantic features by minimizing the KL divergence
142
+ of the semantic feature distributions between domains, and
143
+ meanwhile design a cross-domain self-training strategy to
144
+ train the classifier in target domain.
145
+ We hypothesis that there are usually several ‘confidence’
146
+ samples in target domain that could be classified correctly by
147
+ support set in source domain, in other words, they are very
148
+ similar to their class prototypes in source domain. Mean-
149
+ while, the target domain samples in the same class are more
150
+ similar to each other than that of other classes. Based on this,
151
+ we regard these ‘confidence’ samples in the target domain as
152
+ new prototypes of the classes, which replace those from the
153
+ support set of source domain. As shown in Fig. 2, several
154
+ ‘confidence’ samples (e.g., a) can be selected as prototypes
155
+ of their similar classes for classification (e.g., b and c) in tar-
156
+ get domain. Moreover, the process is conducted iteratively
157
+ by using class matching loss for better domain alignment.
158
+ In sum, we propose the novel method, namely TSECS,
159
+ for FS-UDA. It refines the local features of convolutional
160
+ network to generate specific semantic features of each task,
161
+ and meanwhile perform cross-domain self-training to trans-
162
+ port labels from support set in the source domain to query
163
+ set in the target domain to effectively classify the samples in
164
+ target domain. Our contributions can be summarized as:
165
+ (1) A novel solution for FS-UDA. TSECS aims to learn
166
+ high-level semantic features for classification and do-
167
+ main alignment, which could be regarded as a more ef-
168
+ fective and efficient way than using local features.
169
+ (2) Task-specific semantic embedding for few-shot set-
170
+ ting. It can be seamlessly add to existing FSL/FS-UDA
171
+ models, which could alleviate the bias of classification.
172
+ (3) Cross-domain self-training for domain alignment. It
173
+ is designed to bring the samples of the same class close,
174
+ which could guide effective domain alignment.
175
+ We conduct extensive experiments on DomainNet. Our
176
+ method significantly outperforms SOTA methods in FS-
177
+ UDA by a large margin up to ∼ 10%.
178
+ Related Works
179
+ Unsupervised domain adaptation. The conventional UDA
180
+ methods aim to reduce discrepancy between source domain
181
+ and target domain in the feature space and utilize suffi-
182
+ ciently labeled source domain data to classify data from tar-
183
+ get domain. The difference between unsupervised domain
184
+ adaptation methods often lies in the evaluation of domain
185
+ discrepancy and the objective function of model training.
186
+ Several researchers (Long et al. 2015)(Tzeng et al. 2014)
187
+ minimize the feature discrepancy by using maximum mean
188
+ discrepancy to measure the discrepancy between the distri-
189
+ bution of domains. Moreover, adversarial training (Tzeng
190
+ et al. 2017)(Ganin et al. 2016) to learn domain-invariant fea-
191
+ tures is usually used to tackle domain shift. Several meth-
192
+ ods (Tang, Chen, and Jia 2020)(Zou et al. 2019)(Zou et al.
193
+ 2018)(Kim et al. 2021)train the classifier in both source do-
194
+ main and target domain and utilize pseudo-labels from target
195
+ domain to calculate classification loss. Overall, these UDA
196
+ methods all require sufficiently labeled source domain data
197
+ to realize domain alignment and classification, but they per-
198
+ form poor when labeled source domain data are insufficient.
199
+ Few-shot learning. Few-shot learning has two main
200
+ streams, metric-based and optimization-based approaches.
201
+ Optimization-based methods (Bertinetto et al. 2019)(Finn,
202
+ Abbeel, and Levine 2017)(Ravi and Larochelle 2017) usu-
203
+ ally train a meta learner over auxiliary dataset to learn
204
+ a general initialization model, which can fine-tune and
205
+ adapt to new tasks very soon. The main purpose of metric-
206
+ based methods (Li et al. 2019)(Snell, Swersky, and Zemel
207
+ 2017)(Vinyals et al. 2016)(Ye et al. 2020) is that learn a gen-
208
+ eralizable feature embedding for metric learning, which can
209
+ immediately adapt to new tasks without any fine-tune and
210
+ retraining. Typically, ProtoNet (Snell, Swersky, and Zemel
211
+ 2017) learns the class prototypes in the support set and clas-
212
+ sifies the query images based on the maximum similarity
213
+ to these prototypes. Other than these metric-based methods
214
+ on feature maps, many methods on local features have ap-
215
+ peared. DN4 (Li et al. 2019) utilizes large amount of local
216
+ features to measure the similarity between support and query
217
+
218
+ select 'confidence" sanples
219
+ use new prototypes for
220
+ as new prototypes
221
+ classification in target domain
222
+ O
223
+ b
224
+ O
225
+ 0
226
+ 00
227
+ 00
228
+ lass natching loss
229
+ 0
230
+ 00
231
+ dims prototypes
232
+ doeifiad query imega
233
+ [sonmce dm ngin)
234
+ (trt domain)
235
+ at din)
236
+ (trt domain)sets instead of flattening the feature map into a long vec-
237
+ tor. Based on local features, DeepEMD (Zhang et al. 2020)
238
+ adopts Earth Mover’s Distance distance to measure the re-
239
+ lationship between query and support sets. Furthermore, a
240
+ few recent works focus on the issue of cross-domain FSL in
241
+ which domain shift exists between data of meta tasks and
242
+ new tasks. The baseline models (Chen et al. 2019) are used
243
+ to do cross-domain FSL. LFT (Tseng et al. 2020) performs
244
+ adaptive feature transformation to tackle the domain shift.
245
+ Few-shot unsupervised domain adaptation. Compared
246
+ with UDA, FS-UDA is to deal with many UDA tasks by
247
+ leveraging few labeled source domain samples for each. And
248
+ compared with cross-domain FSL, FS-UDA are capable of
249
+ handling the circumstances of no available labels in the tar-
250
+ get domain, and large domain gap between the support and
251
+ query sets in every task. For the one-shot UDA (Luo et al.
252
+ 2020), it deals with the case that only one unlabeled target
253
+ sample is available, but does not require the source domain
254
+ to be few-shot, which is different from ours. Recently, there
255
+ are a few attempts in FS-UDA. PCS (Yue et al. 2021) per-
256
+ forms prototype self-supervised learning in cross-domain,
257
+ but they require enough unlabeled source samples to learn
258
+ prototypes and ignore task-level transfer, which is also dif-
259
+ ferent from ours. meta-FUDA (Yang et al. 2022) lever-
260
+ ages meta learning-based optimization to perform task-level
261
+ transfer and domain-level transfer jointly. IMSE (Huang
262
+ et al. 2021) utilizes local features to learn similarity patterns
263
+ for cross-domain similarity measurement. However, they did
264
+ not consider that local features could bring the noise or bias
265
+ to affect classification and domain alignment. Thus, we pro-
266
+ pose task-specific semantic features to solve this problem.
267
+ Methodology
268
+ Problem Definition
269
+ A N-way, K-shot FS-UDA task. Table 1 shows the main
270
+ symbols used in this paper. The FS-UDA setting includes
271
+ two domains: a source domain S and a target domain T.
272
+ A N-way, K-shot FS-UDA task includes a support set XS
273
+ from S and a query set QT from T. The support set XS
274
+ contains N classes and K samples per class in the source
275
+ domain. The query set QT contains the same N classes as
276
+ in XS and Nq target domain samples per class. To classify
277
+ query images in QT to the correct class in XS, it is popular
278
+ to train a general model from base classes to adapt to handle
279
+ new N-way, K-shot FS-UDA tasks for testing.
280
+ Auxiliary dataset and episodic training. As in (Huang
281
+ et al. 2021), the base classes are collected from an auxil-
282
+ iary dataset Daux to perform episodic training to learn the
283
+ general model. Note that the base classes in Daux are com-
284
+ pletely different from new classes in testing tasks, which are
285
+ unseen during episodic training. Moreover, Daux includes
286
+ labeled source domain data and unlabeled target domain
287
+ data for FS-UDA. We construct large amounts of episodes,
288
+ each containing {XS, QS, QT } as in (Huang et al. 2021), to
289
+ simulate the testing tasks for task-level generalization. Note
290
+ that QS is introduced into episodic training to calculate clas-
291
+ sification loss and perform domain alignment with QT .
292
+ The flowchart of our method. Fig. 3 illustrates our
293
+ Table 1: Notations
294
+ Notations
295
+ Descriptions
296
+ N ∈ R
297
+ The number of classes in the task.
298
+ K ∈ R
299
+ The number of samples per class in support set.
300
+ XS, QS, QT
301
+ Support set of source domain, and query sets
302
+ of source domain and target domain.
303
+ H, W, d ∈ R
304
+ The height, width, and channel of feature map.
305
+ L ∈ RHW ×d
306
+ The local feature vectors in the feature map.
307
+ k ∈ R
308
+ The number of semantic clusters for an episode.
309
+ C ∈ Rk×d
310
+ The centroids of the clusters.
311
+ F, ˆF,
312
+ The semantic feature map, semantic features and
313
+ ˆFXS, ˆFQS, ˆFQT
314
+ the parts of support and query sets in both domains.
315
+ M c
316
+ q ∈ RH×W ×N
317
+ The 3-D similarity matrix for classification.
318
+ pc
319
+ q ∈ RKHW
320
+ Similarity pattern vectors of a query image q
321
+ pi
322
+ q ∈ RHW
323
+ with a support class c and a support image i,
324
+ ppos
325
+ q
326
+ , pneg
327
+ q
328
+ ∈ RKHW
329
+ and the most similar class and the second one for q.
330
+ µA, µB ∈ RHW ×d
331
+ The mean of semantic features or similarity patterns.
332
+ ΣA, ΣB ∈ RHW ×HW
333
+ Covariance matrix of semantic features
334
+ or similarity patterns.
335
+ λsfa, λspa, λclm
336
+ Weight parameters of three loss terms in Eq. (6).
337
+ method for 5-way, 1-shot FS-UDA tasks. In each episode,
338
+ a support set (XS) and two query sets (QS and QT ) are first
339
+ through the convolution network (e.g., ResNet) to extract
340
+ their local features. Then, the task-specific semantic embed-
341
+ ding module refines the local features to generate semantic
342
+ features, which is computational efficient due to dimension
343
+ reduction. Also, based on semantic features of QS and QT ,
344
+ we leverage their similarity patterns (Huang et al. 2021) to
345
+ calculate image-to-class similarity for classification with the
346
+ loss Lcls. To improve its performance, cross-domain self-
347
+ training module is performed to introduce the class proto-
348
+ types of target domain and train a target domain classifier
349
+ with a class matching loss Lclm. In addition, the seman-
350
+ tic features and similarity patterns from both domains are
351
+ further aligned by calculating their alignment losses Lsfa
352
+ and Lspa, respectively. Finally, the losses above are back-
353
+ propagated to update our model. After episodic training over
354
+ all episodes, we utilize the learned model to test new FS-
355
+ UDA tasks. Then, we calculate the averaged classification
356
+ accuracy on these tasks for performance evaluation.
357
+ Task-specific Semantic Feature Learning
358
+ Most FSL methods and FS-UDA methods learned local fea-
359
+ tures from convolutional networks for classification. How-
360
+ ever, we found that the local features could introduce noise
361
+ or bias that is valid for classification and domain alignment.
362
+ Thus, we aim to refine the local features to generate high-
363
+ level semantic features for each task. In the following, we
364
+ will introduce our semantic feature embedding module.
365
+ First of all, in each episode, all local features L
366
+
367
+ R(|XS|+|QS|+|QT |)HW ×d are extracted from the convolu-
368
+ tional network, where | · | is the number of samples in a
369
+ set. Then, we cluster the local features to generate different
370
+ semantic clusters for support set and query set, respectively,
371
+ since clustering the two sets together could result in the clus-
372
+ ters that relate to the domains due to the presence of large do-
373
+ main gap. For simplification, we adopt K-means for cluster-
374
+ ing, and meanwhile utilize the singular value decomposition
375
+ (SVD) to adaptively take the number of eigenvalues greater
376
+ than a certain threshold as the cluster number k (k ≪ d) for
377
+ each task. Afterwards, we calculate the task-specific seman-
378
+
379
+ Figure 3: Illustration of our method training per episode for 1-shot FS-UDA tasks. First, support classes and query images
380
+ from both domains are through a convolution network to extract their local features, followed by the task-specific semantic
381
+ embedding module to learn high-level semantic features. Then, these semantic features are fed into the cross-domain self-
382
+ training module to update the class prototypes for target domain classification and calculate the class matching loss Lclm.
383
+ Meanwhile, these semantic features are also used to generate similarity patterns in IMSE (Huang et al. 2021) for classification
384
+ loss Lcls. In addition, both semantic features and similarity patterns from both domains are aligned by the domain alignment
385
+ module with the alignment losses Lsfa and Lspa, respectively. Finally, all the losses are backpropagated to update our model.
386
+ tic feature map F ∈ R(|XS|+|QS|+|QT |)HW ×k by measuring
387
+ the Cosine similarity between the local features L and the
388
+ centroids C ∈ Rk×d of all semantic clusters, i.e., F =
389
+ L
390
+ ||L||2 ·
391
+ C⊤
392
+ ||C||2 . Finally, we split F to 2×2 blocks based on height and
393
+ weight dimension of the feature map, and then concatenate
394
+ the four blocks together along the channel to generate se-
395
+ mantic features ˆF ∈ R
396
+ 1
397
+ 4 (|XS|+|QS|+|QT |)HW ×4k. This is a
398
+ simple yet effective way to maintain discriminative ability
399
+ and spatial information of semantic features.
400
+ Moreover, to leverage the semantics from previous tasks
401
+ to guide the semantic feature learning of the current task, we
402
+ utilize the centroids of previous clusters to update the initial-
403
+ ization of clustering centroids by cross-attention (Li et al.
404
+ 2020). This makes K-means clustering converge rapidly.
405
+ After obtaining the semantic features ˆF, we use them for
406
+ domain alignment and classification. Firstly, ˆF is partitioned
407
+ into ˆFXS, ˆFQS, ˆFQT along with the first dimension. Then,
408
+ we align ˆFQS and ˆFQT by minimizing the KL divergence of
409
+ their distributions that will be introduced later. Meanwhile,
410
+ we utilize ˆFXS, ˆFQS and ˆFQT to build 3-D similarity matrix
411
+ M c
412
+ q (Huang et al. 2021) between support and query sets. Fi-
413
+ nally, we calculate the similarity pattern pc
414
+ q (measuring the
415
+ similarity between query sample q and support class c) for
416
+ classification (Huang et al. 2021). The classification loss us-
417
+ ing cross-entropy can be written by:
418
+ Lcls = −
419
+ 1
420
+ |QS|
421
+
422
+ q∈QS
423
+ log(
424
+ exp(1 · pc
425
+ q)
426
+ �K
427
+ i=1 exp(1 · piq)
428
+ )
429
+ (1)
430
+ Cross-domain Self-training
431
+ Since there is large domain shift between source and target
432
+ domains, as well as label missing in target domain, adver-
433
+ sarial domain adaptation on low-level local features cannot
434
+ make samples of the same class between domains close, and
435
+ thus could make the classes of two domains mismatched.
436
+ To alleviate the mismatching issue, we aim to find the
437
+ most similar ‘confidence’ samples in QT with XS to guide
438
+ classification in target domain. We assume that it usually
439
+ exists that the ‘confidence’ samples in QT could be clas-
440
+ sified correctly by XS, when the distributions between do-
441
+ mains are aligned. We iteratively select the ‘confidence’
442
+ samples in QT as the new prototypes to replace that in XS
443
+ for classification, as shown in Fig. 2. We call the process as
444
+ cross-domain self-training. The process can find more ‘con-
445
+ fidence’ samples from QT than that in XS for the same
446
+ class, which could correct some misclassified samples in
447
+ QT , thereby lightening the impact of domain gap.
448
+ Moreover, to improve the performance of the target do-
449
+ main classifier, we aim to make target domain samples q
450
+ in QT closer to their most similar class and meanwhile far
451
+ away from the other classes. Thus, we first calculate its sim-
452
+ ilarity patterns ppos
453
+ q
454
+ (with the most similar class) and pneg
455
+ q
456
+ (with the second similar class), and then design the class
457
+ matching loss with a margin m, which can be written by
458
+ Lclm =
459
+
460
+ q∈QT
461
+ max(softmax(pneg
462
+ q
463
+ )−softmax(ppos
464
+ q
465
+ )+m, 0),
466
+ (2)
467
+ where the similarity to the most similar class should be
468
+ greater by m than the second similar class.
469
+ Two-level Domain Alignment
470
+ Conventional
471
+ adversarial
472
+ domain
473
+ adaptation
474
+ methods
475
+ (Ganin et al. 2016)(Tzeng et al. 2017) iteratively train a
476
+ discriminator to align the distribution of domains by adver-
477
+ sarial training among tasks. However, they cannot be used
478
+ to align the semantic features, because our semantic features
479
+ are relevant to tasks, the semantics of the same channel
480
+
481
+ Task-specific semantic embedding
482
+ Local features
483
+ Semantic feature maps
484
+ High-level semantic features
485
+ Support class
486
+ (Source domain)
487
+ Similarity patterns in IMSE
488
+ Qurey image
489
+ MH
490
+ (Target domain)
491
+ conv
492
+ Lcls
493
+ Classification loss
494
+ Query image
495
+ (Source domain)
496
+ Split into 2 x 2 blocks
497
+ I Update the class
498
+ and concatenate them
499
+ prototypes
500
+ k
501
+ Cross-domain self-training
502
+ Domain alignment
503
+ I Semantic features Similarity paterns' I
504
+ Centers of k clusters
505
+ class prototype
506
+ ( confidence
507
+ sample
508
+ 1
509
+ KL(*,*)
510
+ KL(*,*)
511
+ Cp
512
+ Source domain in support set
513
+ Clustering
514
+ Target domain in query set
515
+ Lclm
516
+ Lsfa
517
+ Lspa
518
+ Class matching loss
519
+ + Source domain in query set
520
+ Aligment loss
521
+ Loss backpropagationcould be varied for different tasks. Meanwhile, symmetrical
522
+ alignment could bring the inference information of the
523
+ target domain to the source domain (Li et al. 2020). Thus,
524
+ we use asymmetrical KL divergence to align the distribution
525
+ of domains on both semantic features and similarity patterns
526
+ within a task. Then, KL divergence can be calculated by:
527
+ KL(A, B) =1
528
+ 2
529
+
530
+ tr(Σ-1
531
+ AΣB) + ln(ΣA
532
+ ΣB
533
+ )
534
+ +(µA − µB)Σ-1
535
+ A(µA − µB)⊤ − d
536
+
537
+ ,
538
+ (3)
539
+ where µA, µB, ΣA and ΣB are the mean vectors and the co-
540
+ variance matrices of sample matrix A and B, respectively.
541
+ Thus, we minimize the KL divergence between semantic
542
+ features ˆHQS and ˆHQT by
543
+ Lsfa = KL( ˆFQS, ˆFQT ).
544
+ (4)
545
+ Meanwhile, we also minimize the KL divergence to align
546
+ the similarity patterns {pc
547
+ qS} of QS and {pc
548
+ qT } of QT with
549
+ class c, which can be written by
550
+ Lspa =
551
+ N
552
+
553
+ c=1
554
+ KL({pc
555
+ qS}, {pc
556
+ qT }).
557
+ (5)
558
+ In sum, we combine all the above losses, w.r.t. classifi-
559
+ cation (Eq. (1)), class matching (Eq. (2)) and KL-based do-
560
+ main alignment (Eqs. (4) and (5)) to train our model on many
561
+ episodes. The total objective function can be written by:
562
+ min Lcls + λsfaLsfa + λspaLspa + λclmLclm,
563
+ (6)
564
+ where the hyper-parameters λsfa, λspa and λclm are intro-
565
+ duced to balance the effect of different loss terms.
566
+ Experiment
567
+ DomainNet dataset. We conduct extensive experiments on a
568
+ multi-domain benchmark dataset DomainNet to demonstrate
569
+ the efficacy of our method. It was released in 2019 for the re-
570
+ search of multi-source domain adaptation (Peng et al. 2019).
571
+ It contains 345 categories and six domains per category, i.e.,
572
+ quickdraw, clipart, real, sketch, painting and infograph do-
573
+ mains. In our experiments, we follow the setting of IMSE
574
+ in (Huang et al. 2021) to remove data insufficient domain
575
+ infograph. There are 20 combinations totally for evaluation,
576
+ and the dataset is split into 217, 43 and 48 categories for
577
+ episodic training, model validation and testing new tasks,
578
+ respectively. Note that in each split every category contains
579
+ the five-domain images.
580
+ Network architecture and setting. We employ ResNet-
581
+ 12 as the backbone of feature embedding network, which is
582
+ widely used in few-shot learning (Huang et al. 2021) (Gi-
583
+ daris et al. 2020). We obtain semantic features by first clus-
584
+ tering the local features from each class of support set and
585
+ two query sets and then concatenating them. During this pro-
586
+ cess, we adopt cross-attention that consists of three convo-
587
+ lution parameters to generate (Q, K, V ) for attention cal-
588
+ culation. In cross-domain self-training module, we set the
589
+ threshold 1.7 of similarity score to select the ‘confidence’
590
+ samples in target domain. The margin m in Eq. (2) is empir-
591
+ ically set to 1.5. In addition, we follow the setting of IMSE
592
+ (Huang et al. 2021) to obtain similarity patterns. The hyper-
593
+ parameters λsfa, λspa and λclm are set to 0.1, 0.05 and 0.01,
594
+ by grid search, respectively.
595
+ Model training, validation and testing. To improve the
596
+ performance, before episodic training, the feature embed-
597
+ ding network is pretrained by using source domain data in
598
+ the auxiliary dataset, as in (Huang et al. 2021). Afterwards,
599
+ we perform episodic training on 280 episodes, following the
600
+ setting of (Huang et al. 2021). During episode training, the
601
+ total loss in Eq. (6) is minimized to optimize the network
602
+ parameters for each episode. Also, we employ Adam opti-
603
+ mizer with an initial learning rate of 10-4, and meanwhile re-
604
+ duce the learning rate by half every 280 episodes. For model
605
+ validation, we compare the performance of different model
606
+ parameters on 100 tasks, which is randomly sampled from
607
+ the validate set containing 43 categories. Then, we select the
608
+ model parameters with the best validation accuracy for test-
609
+ ing. During the testing, we randomly select 3000 tasks to
610
+ calculate the averaged top-1 accuracy on these tasks as the
611
+ evaluation criterion.
612
+ Comparison Experiments for FS-UDA
613
+ We conduct extensive experiments on DomainNet to com-
614
+ pare our method with five FSL methods (ProtoNet (Snell,
615
+ Swersky, and Zemel 2017), DN4 (Li et al. 2019), ADM
616
+ (Li et al. 2020), FEAT (Ye et al. 2020), DeepEMD (Zhang
617
+ et al. 2020)), three UDA methods, (MCD (Saito et al. 2018),
618
+ ADDA (Tzeng et al. 2017), DWT (Roy et al. 2019)), their
619
+ combinations and the most related method IMSE (Huang
620
+ et al. 2021). For fair comparison, the results of these above
621
+ methods are all reported from (Huang et al. 2021) with the
622
+ same setting. Moreover, we also modify IMSE by using
623
+ our semantic features for classification and domain adver-
624
+ sary, namely IMSE+TSE. For fair comparison, these com-
625
+ pared methods also pretrain the embedding network before
626
+ episodic training, and they are trained on 1000 episodes.
627
+ Comparison analysis. Table 2 shows the results of all
628
+ the compared methods for 20 cross-domain combinations,
629
+ which records the averaged classification accuracy of tar-
630
+ get domain samples over 3000 5-way 1-shot/5-shot FS-
631
+ UDA tasks. As observed, our TSECS achieves the best per-
632
+ formance for all combinations and their average. Specifi-
633
+ cally, the UDA and FSL baselines in the first two parts per-
634
+ form the worst. In the third part, the combination methods
635
+ with ADDA (Tzeng et al. 2017) perform domain adversarial
636
+ training each episode, thus generally better than the above
637
+ two parts, but still inferior to IMSE (Huang et al. 2021)
638
+ and our TSECS. This is because the combination methods
639
+ only perform domain alignment based on original feature
640
+ maps, not considering the alignment of similarity patterns
641
+ (related to classification predictions). Also, IMSE is worse
642
+ than IMSE+TSE, which indicates high-level semantic fea-
643
+ tures are more effective for FS-UDA than local features.
644
+ However, they are still much worse than our method, show-
645
+ ing the efficacy of high-level semantic features and cross-
646
+ domain self-training for FS-UDA.
647
+ On the other hand, we can see that the 20 cross-domain
648
+ combinations have considerably different performances.
649
+ This is because several domains (e.g., quickdraw) are sig-
650
+ nificantly different from other domains, while several other
651
+ domains (e.g. real, clipart) are with the similar styles and
652
+ features. Thus, for most compared methods, the perfor-
653
+
654
+ Table 2: Comparison of our method with the related methods for 5-way 1-shot or 5-shot FS-UDA tasks. The first three blocks
655
+ and IMSE are reported from (Huang et al. 2021), while the last two are the variant of IMSE we designed and ours, respectively.
656
+ Each row represents the accuracy (%) of a compared method adapting between two domains, where the skt, rel, qdr, pnt, and
657
+ cli denote the sketch, real, quickdraw, painting, and clipart domains in DomainNet, respectively. The best results are in bold.
658
+ 5-way, 1-shot
659
+ Methods
660
+ skt ←→ rel
661
+ skt ←→ qdr
662
+ skt ←→ pnt
663
+ skt ←→ cli
664
+ rel ←→ qdr
665
+ rel ←→ pnt
666
+ rel ←→ cli
667
+ qdr ←→ pnt
668
+ qdr ←→ cli
669
+ pnt ←→ cli
670
+ avg
671
+ → / ←
672
+ → / ←
673
+ → / ←
674
+ → / ←
675
+ → / ←
676
+ → / ←
677
+ → / ←
678
+ → / ←
679
+ → / ←
680
+ → / ←
681
+ -
682
+ MCD
683
+ 48.07/37.74
684
+ 38.90/34.51
685
+ 39.31/35.59
686
+ 51.43/38.98
687
+ 24.17/29.85
688
+ 43.36/47.32
689
+ 44.71/45.68
690
+ 26.14/25.02
691
+ 42.00/34.69
692
+ 39.49/37.28
693
+ 38.21
694
+ ADDA
695
+ 48.82/46.06
696
+ 38.42/40.43
697
+ 42.52/39.88
698
+ 50.67/47.16
699
+ 31.78/35.47
700
+ 43.93/45.51
701
+ 46.30/47.66
702
+ 26.57/27.46
703
+ 46.51/32.19
704
+ 39.76/41.24
705
+ 40.91
706
+ DWT
707
+ 49.43/38.67
708
+ 40.94/38.00
709
+ 44.73/39.24
710
+ 52.02/50.69
711
+ 29.82/29.99
712
+ 45.81/50.10
713
+ 52.43/51.55
714
+ 24.33/25.90
715
+ 41.47/39.56
716
+ 42.55/40.52
717
+ 41.38
718
+ ProtoNet
719
+ 50.48/43.15
720
+ 41.20/32.63
721
+ 46.33/39.69
722
+ 53.45/48.17
723
+ 32.48/25.06
724
+ 49.06/50.30
725
+ 49.98/51.95
726
+ 22.55/28.76
727
+ 36.93/40.98
728
+ 40.13/41.10
729
+ 41.21
730
+ DN4
731
+ 52.42/47.29
732
+ 41.46/35.24
733
+ 46.64/46.55
734
+ 54.10/51.25
735
+ 33.41/27.48
736
+ 52.90/53.24
737
+ 53.84/52.84
738
+ 22.82/29.11
739
+ 36.88/43.61
740
+ 47.42/43.81
741
+ 43.61
742
+ ADM
743
+ 49.36/42.27
744
+ 40.45/30.14
745
+ 42.62/36.93
746
+ 51.34/46.64
747
+ 32.77/24.30
748
+ 45.13/51.37
749
+ 46.8/50.15
750
+ 21.43/30.12
751
+ 35.64/43.33
752
+ 41.49/40.02
753
+ 40.11
754
+ FEAT
755
+ 51.72/45.66
756
+ 40.29/35.45
757
+ 47.09/42.99
758
+ 53.69/50.59
759
+ 33.81/27.58
760
+ 52.74/53.82
761
+ 53.21/53.31
762
+ 23.10/29.39
763
+ 37.27/42.54
764
+ 44.15/44.49
765
+ 43.14
766
+ DeepEMD
767
+ 52.24/46.84
768
+ 42.12/34.77
769
+ 46.64/43.89
770
+ 55.10/49.56
771
+ 34.28/28.02
772
+ 52.73/53.26
773
+ 54.25/54.91
774
+ 22.86/28.79
775
+ 37.65/42.92
776
+ 44.11/44.38
777
+ 43.46
778
+ ADDA+ProtoNet
779
+ 51.30/43.43
780
+ 41.79/35.40
781
+ 46.02/41.40
782
+ 52.68/48.91
783
+ 37.28/27.68
784
+ 50.04/49.68
785
+ 49.83/52.58
786
+ 23.72/32.03
787
+ 38.54/44.14
788
+ 41.06/41.59
789
+ 42.45
790
+ ADDA+DN4
791
+ 53.04/46.08
792
+ 42.64/36.46
793
+ 46.38/47.08
794
+ 54.97/51.28
795
+ 34.80/29.84
796
+ 53.09/54.05
797
+ 54.81/55.08
798
+ 23.67/31.62
799
+ 42.24/45.24
800
+ 46.25/44.40
801
+ 44.65
802
+ ADDA+ADM
803
+ 51.87/45.08
804
+ 43.91/32.38
805
+ 47.48/43.37
806
+ 54.81/51.14
807
+ 35.86/28.15
808
+ 48.88/51.61
809
+ 49.95/54.29
810
+ 23.95/33.30
811
+ 43.59/48.21
812
+ 43.52/43.83
813
+ 43.76
814
+ ADDA+FEAT
815
+ 52.72/46.08
816
+ 47.00/36.94
817
+ 47.77/45.01
818
+ 56.77/52.10
819
+ 36.32/30.50
820
+ 49.14/52.36
821
+ 52.91/53.86
822
+ 24.76/35.38
823
+ 44.66/48.82
824
+ 45.03/45.92
825
+ 45.20
826
+ ADDA+DeepEMD
827
+ 53.98/47.55
828
+ 44.64/36.19
829
+ 46.29/45.14
830
+ 55.93/50.45
831
+ 37.47/30.14
832
+ 52.21/53.32
833
+ 54.86/54.80
834
+ 23.46/32.89
835
+ 39.06/46.76
836
+ 45.39/44.65
837
+ 44.75
838
+ IMSE
839
+ 57.21/51.30
840
+ 49.71/40.91
841
+ 50.36/46.35
842
+ 59.44/54.06
843
+ 44.43/36.55
844
+ 52.98/55.06
845
+ 57.09/57.98
846
+ 30.73/38.70
847
+ 48.94/51.47
848
+ 47.42/46.52
849
+ 48.86
850
+ IMSE+TSE
851
+ 60.71/56.15
852
+ 53.78/48.57
853
+ 56.50/48.59
854
+ 61.59/56.59
855
+ 45.48/49.45
856
+ 55.44/57.45
857
+ 59.60/59.52
858
+ 37.94/39.83
859
+ 58.83/56.22
860
+ 49.19/51.01
861
+ 52.79
862
+ TSECS (ours)
863
+ 65.00/58.22
864
+ 62.25/51.97
865
+ 56.51/53.70
866
+ 69.45/64.59
867
+ 56.66/49.82
868
+ 58.76/63.18
869
+ 67.98/67.89
870
+ 38.26/46.15
871
+ 60.51/69.03
872
+ 54.40/52.76
873
+ 58.20
874
+ 5-way, 5-shot
875
+ Methods
876
+ skt ←→ rel
877
+ skt ←→ qdr
878
+ skt ←→ pnt
879
+ skt ←→ cli
880
+ rel ←→ qdr
881
+ rel ←→ pnt
882
+ rel ←→ cli
883
+ qdr ←→ pnt
884
+ qdr ←→ cli
885
+ pnt ←→ cli
886
+ avg
887
+ → / ←
888
+ → / ←
889
+ → / ←
890
+ → / ←
891
+ → / ←
892
+ → / ←
893
+ → / ←
894
+ → / ←
895
+ → / ←
896
+ → / ←
897
+ -
898
+ MCD
899
+ 66.42/47.73
900
+ 51.84/39.73
901
+ 54.63/47.75
902
+ 72.17/53.23
903
+ 28.02/33.98
904
+ 55.74/66.43
905
+ 56.80/63.07
906
+ 28.71/29.17
907
+ 50.46/45.02
908
+ 53.99/48.24
909
+ 49.65
910
+ ADDA
911
+ 66.46/56.66
912
+ 51.37/42.33
913
+ 56.61/53.95
914
+ 69.57/65.81
915
+ 35.94/36.87
916
+ 58.11/63.56
917
+ 59.16/65.7
918
+ 723.16/33.50
919
+ 41.94/43.40
920
+ 55.21/55.86
921
+ 51.76
922
+ DWT
923
+ 67.75/54.85
924
+ 48.59/40.98
925
+ 55.40/50.64
926
+ 69.87/59.33
927
+ 36.19/36.45
928
+ 60.26/68.72
929
+ 62.92/67.28
930
+ 22.64/32.34
931
+ 47.88/50.47
932
+ 49.76/52.52
933
+ 51.74
934
+ ProtoNet
935
+ 65.07/56.21
936
+ 52.65/39.75
937
+ 55.13/52.77
938
+ 65.43/62.62
939
+ 37.77/31.01
940
+ 61.73/66.85
941
+ 63.52/66.45
942
+ 20.74/30.55
943
+ 45.49/55.86
944
+ 53.60/52.92
945
+ 51.80
946
+ DN4 (Li et al. 2019)
947
+ 63.89/51.96
948
+ 48.23/38.68
949
+ 52.57/51.62
950
+ 62.88/58.33
951
+ 37.25/29.56
952
+ 58.03/64.72
953
+ 61.10/62.25
954
+ 23.86/33.03
955
+ 41.77/49.46
956
+ 50.63/48.56
957
+ 49.41
958
+ ADM
959
+ 66.25/54.20
960
+ 53.15/35.69
961
+ 57.39/55.60
962
+ 71.73/63.42
963
+ 44.61/24.83
964
+ 59.48/69.17
965
+ 62.54/67.39
966
+ 21.13/38.83
967
+ 42.74/58.36
968
+ 56.34/52.83
969
+ 52.78
970
+ FEAT
971
+ 67.91/58.56
972
+ 52.27/40.97
973
+ 59.01/55.44
974
+ 69.37/65.95
975
+ 40.71/28.65
976
+ 63.85/71.25
977
+ 65.76/68.96
978
+ 23.73/34.02
979
+ 42.84/53.56
980
+ 57.95/54.84
981
+ 53.78
982
+ DeepEMD
983
+ 67.96/58.11
984
+ 53.34/39.70
985
+ 59.31/56.60
986
+ 70.56/64.60
987
+ 39.70/29.95
988
+ 62.99/70.93
989
+ 65.07/69.06
990
+ 23.86/34.34
991
+ 45.48/53.93
992
+ 57.60/55.61
993
+ 53.93
994
+ ADDA+ProtoNet
995
+ 66.11/58.72
996
+ 52.92/43.60
997
+ 57.23/53.90
998
+ 68.44/61.84
999
+ 45.59/38.77
1000
+ 60.94/69.47
1001
+ 66.30/66.10
1002
+ 25.45/41.30
1003
+ 46.67/56.22
1004
+ 58.20/52.65
1005
+ 54.52
1006
+ ADDA+DN4
1007
+ 63.40/52.40
1008
+ 48.37/40.12
1009
+ 53.51/49.69
1010
+ 64.93/58.39
1011
+ 36.92/31.03
1012
+ 57.08/65.92
1013
+ 60.74/63.13
1014
+ 25.36/34.23
1015
+ 48.52/51.19
1016
+ 52.16/49.62
1017
+ 50.33
1018
+ ADDA+ADM
1019
+ 64.64/54.65
1020
+ 52.56/33.42
1021
+ 56.33/54.85
1022
+ 70.70/63.57
1023
+ 39.93/27.17
1024
+ 58.63/68.70
1025
+ 61.96/67.29
1026
+ 21.91/39.12
1027
+ 41.96/59.03
1028
+ 55.57/53.39
1029
+ 52.27
1030
+ ADDA+FEAT
1031
+ 67.80/56.71
1032
+ 60.33/43.34
1033
+ 57.32/58.08
1034
+ 70.06/64.57
1035
+ 44.13/35.62
1036
+ 62.09/70.32
1037
+ 57.46/67.77
1038
+ 29.08/44.15
1039
+ 49.62/63.38
1040
+ 57.34/52.13
1041
+ 55.56
1042
+ ADDA+DeepEMD
1043
+ 68.52/59.28
1044
+ 56.78/40.03
1045
+ 58.18/57.86
1046
+ 70.83/65.39
1047
+ 42.63/32.18
1048
+ 63.82/71.54
1049
+ 66.51/69.21
1050
+ 26.89/42.33
1051
+ 47.00/57.92
1052
+ 57.81/55.23
1053
+ 55.49
1054
+ IMSE
1055
+ 70.46/61.09
1056
+ 61.57/46.86
1057
+ 62.30/59.15
1058
+ 76.13/67.27
1059
+ 53.07/40.17
1060
+ 64.41/70.63
1061
+ 67.60/71.76
1062
+ 33.44/48.89
1063
+ 53.38/65.90
1064
+ 61.28/56.74
1065
+ 59.60
1066
+ IMSE+TSE
1067
+ 72.75/62.24
1068
+ 64.49/55.04
1069
+ 62.86/61.10
1070
+ 77.39/69.87
1071
+ 53.88/54.48
1072
+ 63.97/72.46
1073
+ 69.86/72.49
1074
+ 37.43/51.66
1075
+ 64.43/67.46
1076
+ 63.40/57.89
1077
+ 62.76
1078
+ TSECS (ours)
1079
+ 78.23/70.44
1080
+ 77.90/55.77
1081
+ 66.70/68.03
1082
+ 83.82/74.28
1083
+ 64.33/55.16
1084
+ 68.40/79.74
1085
+ 78.23/77.69
1086
+ 39.74/63.02
1087
+ 67.99/80.31
1088
+ 73.67/61.63
1089
+ 69.25
1090
+ Table 3: Ablation study (%) of the modules designed in
1091
+ TSECS, where the FS-UDA tasks are evaluated from a do-
1092
+ main (sketch) to the other four domains in DomainNet.
1093
+ Components
1094
+ Target Domains
1095
+ TSE
1096
+ catt
1097
+ CS
1098
+ cli
1099
+ rel
1100
+ qdr
1101
+ pnt
1102
+
1103
+ 61.98
1104
+ 60.00
1105
+ 52.21
1106
+ 51.62
1107
+
1108
+ 57.07
1109
+ 53.31
1110
+ 41.93
1111
+ 46.66
1112
+
1113
+
1114
+ 62.74
1115
+ 60.54
1116
+ 53.64
1117
+ 54.23
1118
+
1119
+
1120
+ 68.25
1121
+ 61.15
1122
+ 58.31
1123
+ 53.34
1124
+
1125
+
1126
+
1127
+ 69.45
1128
+ 65.00
1129
+ 62.25
1130
+ 56.51
1131
+ mance becomes relatively low when the domain gap is large.
1132
+ For example, from quickdraw to painting, it performs the
1133
+ worst in all the other combinations because of larger domain
1134
+ gap, but our TSECS outperforms IMSE and the other com-
1135
+ pared methods by 8% and 12%, respectively. We found that
1136
+ our method has the larger performance improvement over
1137
+ IMSE, for these combinations containing quickdraw, which
1138
+ shows the efficacy of our method for large domain gap. Also,
1139
+ like TSECS, IMSE+TSE performs much better than IMSE
1140
+ for large domain gap, which indicates the high-level seman-
1141
+ tic features could conduct domain adaptation better than lo-
1142
+ cal features. In sum, these results reflect the advantages of
1143
+ our TSECS to deal with domain shift and task generaliza-
1144
+ tion in FS-UDA, no matter how large the domain gap is.
1145
+ Ablation study of our method. We conduct various ex-
1146
+ periments on DomainNet to evaluate the effect of our mod-
1147
+ ules: task-specific semantic embedding (TSE), cross-domain
1148
+ self-training (CS) and cross-attention in TSE (catt). The ac-
1149
+ curacies on the four target domains are reported in Table
1150
+ 3. As seen, our method achieve the best performance when
1151
+ three modules are all used. The performance of the single
1152
+ CS is the worst that shows that local features cannot align
1153
+ the distributions of the two domains, thus affecting cross-
1154
+ domain self-training. The module TSE is introduced into
1155
+ four combinations, all improving the performance, which
1156
+ validates the efficacy of our task-specific semantic features
1157
+ for FS-UDA again. Also, the addition of cross-attention into
1158
+ TSE will further improve the performance, which can help
1159
+ discover more semantics from previous tasks.
1160
+ Ablation study of different losses. We conduct various
1161
+ experiments on DomainNet to further evaluate the effect of
1162
+ different losses in Eq. (6). Besides the classification loss
1163
+ (Lcls), we combine the remaining three loss terms: 1) se-
1164
+ mantic features alignment loss (Lsfa), 2) similarity pattern
1165
+ alignment loss (Lspa), and 3) class matching loss (Lclm).
1166
+ We evaluate 5-way 1-shot FS-UDA tasks from sketch to the
1167
+ other four domains, respectively, and their accuracies are re-
1168
+ ported in Table 4. As observed, the more the number of loss
1169
+ terms involved, the higher the accuracy. The combination of
1170
+ all the three losses is the best. For the single loss, both Lsfa
1171
+
1172
+ Table 4: Ablation study (%) of the three losses designed in
1173
+ TSECS, where the FS-UDA tasks are evaluated from a do-
1174
+ main (sketch) to the other four domains in DomainNet.
1175
+ Components
1176
+ Target Domains
1177
+ Lsfa
1178
+ Lspa
1179
+ Lclm
1180
+ cli
1181
+ rel
1182
+ qdr
1183
+ pnt
1184
+
1185
+ 66.67
1186
+ 58.84
1187
+ 56.91
1188
+ 43.28
1189
+
1190
+ 64.28
1191
+ 57.32
1192
+ 52.11
1193
+ 42.46
1194
+
1195
+ 66.83
1196
+ 58.29
1197
+ 56.51
1198
+ 44.25
1199
+
1200
+
1201
+ 66.64
1202
+ 62.64
1203
+ 57.41
1204
+ 53.40
1205
+
1206
+
1207
+ 68.04
1208
+ 63.98
1209
+ 59.13
1210
+ 55.39
1211
+
1212
+
1213
+ 67.61
1214
+ 62.47
1215
+ 53.07
1216
+ 54.14
1217
+
1218
+
1219
+
1220
+ 69.45
1221
+ 65.00
1222
+ 62.25
1223
+ 56.51
1224
+ Figure 4: Comparison of introducing our TSE module or not
1225
+ into two FSL methods with ADDA (Tzeng et al. 2017) com-
1226
+ bined, i.e., ADDA+ProtoNet and ADDA+DN4.
1227
+ and Lclm perform better than Lspa, and their combination is
1228
+ also considerably better than the other paired combinations,
1229
+ showing the efficacy of semantic feature domain alignment
1230
+ and class matching in target domain. Based on the above,
1231
+ adding Lspa further improves the performance, indicating
1232
+ positive effect of aligning the similarity patterns.
1233
+ Evaluation on the effect of our task-specific se-
1234
+ mantic embedding module on two FSL methods with
1235
+ ADDA (Tzeng et al. 2017) combined. Compared with
1236
+ ADDA+DN4 and ADDA+ProtoNet, we add our semantic
1237
+ embedding module (TSE) with the loss Lsfa into their fea-
1238
+ ture embedding models, and test them on 3000 new 5-way
1239
+ 1/5-shot FS-UDA tasks. For simplification and clarification,
1240
+ we calculate the averaged accuracies from every domain to
1241
+ the other four domains and show them in Fig. 4. As seen,
1242
+ the methods using TSE generally perform better than that
1243
+ without it, which validates that the semantic embedding in
1244
+ TSE could generate more discriminative semantic features
1245
+ for classification than original local features. In addition, the
1246
+ performances of these methods are still far from our method
1247
+ because using ADDA is insufficient to align the domains and
1248
+ could result in class mismatching, but our method can effec-
1249
+ tively solve it by cross-domain self-training.
1250
+ Evaluation of dataset generalization. We evaluate the
1251
+ generalization of our model trained on DomainNet to adapt
1252
+ to a substantially different dataset miniImageNet. We mod-
1253
+ ify miniImageNet by transferring a half of real images (rel)
1254
+ into sketch images (skt) by MUNIT (Huang et al. 2018) to
1255
+ Table 5: Evaluation (%) of dataset generalization for 5-way
1256
+ 1-shot FS-UDA tasks between domains real and sketch, per-
1257
+ forming episodic training on DomainNet and testing on ex-
1258
+ panded dataset miniImageNet.
1259
+ Methods
1260
+ skt → rel
1261
+ rel → skt
1262
+ ADDA+DN4
1263
+ 44.01 ± 0.87
1264
+ 40.61 ± 0.90
1265
+ ADDA+DeepEMD
1266
+ 46.14 ± 0.82
1267
+ 45.91 ± 0.77
1268
+ IMSE
1269
+ 48.78 ± 0.78
1270
+ 48.52 ± 0.81
1271
+ TSECS (ours)
1272
+ 53.33 ± 1.08
1273
+ 49.83 ± 0.96
1274
+ Figure 5: The tSNE visualization of our TSECS using cross-
1275
+ domain self-training or not for a 5-way 5-shot FS-UDA task
1276
+ from sketch to clipart. The samples with different colors be-
1277
+ long to different classes, and the stars in the left and right
1278
+ figures represent the class centroids of support set and se-
1279
+ lected target domain query samples, respectively.
1280
+ produce two domains for FS-UDA. We compare our method
1281
+ with ADDA+DN4, ADDA+DeepEMD and IMSE for 5-way
1282
+ 1-shot FS-UDA tasks for rel ↔ skt. The results are shown
1283
+ as Table 5. As observed, our method outperforms other
1284
+ methods, specially for ske → rel. For rel → skt, our method
1285
+ is slightly better than IMSE, because the style of sketch im-
1286
+ ages in miniImageNet is relatively different from that in Do-
1287
+ mainNet, which could effect the learned semantic features.
1288
+ Visualization of our method using cross-domain self-
1289
+ training or not. We illustrate the tSNE results of a 5-way 5-
1290
+ shot FS-UDA task from sketch to clipart in Fig. 5. Note that
1291
+ the class prototypes in the left subfigure belong to the sup-
1292
+ port set in source domain, while those in the right subfigure
1293
+ are generated by ‘confidence’ samples in target domain. It
1294
+ is obvious that two class prototypes in the left subfigure are
1295
+ fully overlapped so that many samples could not be correctly
1296
+ classified. In contrast, the right subfigure has the better class
1297
+ prototypes, and samples from different classes are more dis-
1298
+ tinguishable. This shows the efficacy of our cross-domain
1299
+ self-training that finds ‘confidence’ samples to train the tar-
1300
+ get domain classifier and uses class matching loss Lclm to
1301
+ shorten the distance of samples of the same class.
1302
+ Conclusion
1303
+ In this paper, we propose a novel method TSECS for FS-
1304
+ UDA. We extract high-level semantic features than local fea-
1305
+ tures to measure the similarity of query images in target do-
1306
+ main to support classes in source domain. Moreover, we de-
1307
+ sign cross-domain self-training to train a target domain clas-
1308
+ sifier. In addition, asymmetrical KL-divergence is used to
1309
+ align the semantic features between domains. Extensive ex-
1310
+ periments on DomainNet show the efficacy of our TSECS,
1311
+ significantly improving the performance for FS-UDA.
1312
+
1313
+ ProtoNet/1-shot
1314
+ .-
1315
+ DN4/1-shot
1316
+ 701
1317
+ 701
1318
+ 60
1319
+ 60
1320
+ 50
1321
+ 50
1322
+ 40
1323
+ 40
1324
+ 30
1325
+ 30
1326
+ skt
1327
+ cli rel qdr
1328
+ pnt
1329
+ skt
1330
+ clirel qdr
1331
+ pnt
1332
+ ProtoNet/5-shot
1333
+ DN4/5-shot
1334
+ 70
1335
+ 70
1336
+ 60
1337
+ 60
1338
+ 50
1339
+ 50
1340
+ 40
1341
+ 40
1342
+ 30
1343
+ 30
1344
+ skt
1345
+ clirel
1346
+ Ipb
1347
+ pnt
1348
+ skt
1349
+ clirel qdr
1350
+ pnt
1351
+ ADDA+ProtoNet
1352
+ ADDA+DN4
1353
+ ADDA+TSE+ProtoNet
1354
+ ADDA+TSE+DN4
1355
+ ours
1356
+ oursTSECS (no CS)
1357
+ TSECS
1358
+ 1.0
1359
+ 1.0
1360
+ 0.8
1361
+ 0.8
1362
+ 0.6
1363
+ 0.6
1364
+ 0.4
1365
+ 0.4
1366
+ .
1367
+
1368
+ 0.2
1369
+ .
1370
+ 0.2
1371
+ .
1372
+
1373
+ *
1374
+ .
1375
+ 0.0
1376
+ .
1377
+ 0.0
1378
+ .
1379
+ .
1380
+ .
1381
+ 0.0
1382
+ 0.2
1383
+ 0.4
1384
+ 0.6
1385
+ 0.8
1386
+ 1.0
1387
+ 0.0
1388
+ 0.2
1389
+ 0.4
1390
+ 0.6
1391
+ 0.8
1392
+ 1.0Acknowledgments
1393
+ Wanqi Yang and Ming Yang are supported by Na-
1394
+ tional Natural Science Foundation of China (Grant Nos.
1395
+ 62076135, 62276138, 61876087). Lei Wang is supported
1396
+ by an Australian Research Council Discovery Project (No.
1397
+ DP200101289) funded by the Australian Government.
1398
+ References
1399
+ Bertinetto, L.; Henriques, J. F.; Torr, P.; and Vedaldi,
1400
+ A. 2019.
1401
+ Meta-learning with differentiable closed-form
1402
+ solvers. In International Conference on Learning Represen-
1403
+ tations, 1–8.
1404
+ Chen, W.-Y.; Liu, Y.-C.; Kira, Z.; Wang, Y.-C. F.; and
1405
+ Huang, J.-B. 2019. A Closer Look at Few-shot Classifica-
1406
+ tion. In International Conference on Learning Representa-
1407
+ tions, 1–16.
1408
+ Finn, C.; Abbeel, P.; and Levine, S. 2017. Model-Agnostic
1409
+ Meta-Learning for Fast Adaptation of Deep Networks. In
1410
+ Precup, D.; and Teh, Y. W., eds., Proceedings of the 34th
1411
+ International Conference on Machine Learning, volume 70
1412
+ of Proceedings of Machine Learning Research, 1126–1135.
1413
+ Ganin, Y.; Ustinova, E.; Ajakan, H.; Germain, P.; Larochelle,
1414
+ H.; Laviolette, F.; March, M.; and Lempitsky, V. 2016.
1415
+ Domain-Adversarial Training of Neural Networks. Journal
1416
+ of Machine Learning Research, 17(59): 1–35.
1417
+ Gidaris, S.; Bursuc, A.; Komodakis, N.; Perez, P.; and Cord,
1418
+ M. 2020. Learning Representations by Predicting Bags of
1419
+ Visual Words.
1420
+ In Proceedings of the IEEE/CVF Confer-
1421
+ ence on Computer Vision and Pattern Recognition (CVPR),
1422
+ 6926–6936.
1423
+ Huang, S.; Yang, W.; Wang, L.; Zhou, L.; and Yang, M.
1424
+ 2021.
1425
+ Few-Shot Unsupervised Domain Adaptation with
1426
+ Image-to-Class Sparse Similarity Encoding. In Proceedings
1427
+ of the 29th ACM International Conference on Multimedia,
1428
+ MM ’21, 677–685. New York, NY, USA: Association for
1429
+ Computing Machinery. ISBN 9781450386517.
1430
+ Huang, X.; Liu, M.-Y.; Belongie, S.; and Kautz, J. 2018.
1431
+ Multimodal Unsupervised Image-to-Image Translation. In
1432
+ Ferrari, V.; Hebert, M.; Sminchisescu, C.; and Weiss, Y.,
1433
+ eds., Computer Vision – ECCV 2018, 179–196. Cham:
1434
+ Springer International Publishing. ISBN 978-3-030-01219-
1435
+ 9.
1436
+ Kim, D.; Saito, K.; Oh, T.-H.; Plummer, B. A.; Sclaroff, S.;
1437
+ and Saenko, K. 2021. CDS: Cross-Domain Self-supervised
1438
+ Pre-training. In 2021 IEEE/CVF International Conference
1439
+ on Computer Vision (ICCV), 9103–9112.
1440
+ Li, W.; Wang, L.; Huo, J.; Shi, Y.; Gao, Y.; and Luo, J. 2020.
1441
+ Asymmetric Distribution Measure for Few-shot Learning. In
1442
+ Bessiere, C., ed., Proceedings of the Twenty-Ninth Interna-
1443
+ tional Joint Conference on Artificial Intelligence, IJCAI-20,
1444
+ 2957–2963. International Joint Conferences on Artificial In-
1445
+ telligence Organization. Main track.
1446
+ Li, W.; Wang, L.; Xu, J.; Huo, J.; Gao, Y.; and Luo, J. 2019.
1447
+ Revisiting Local Descriptor Based Image-To-Class Measure
1448
+ for Few-Shot Learning. In Proceedings of the IEEE/CVF
1449
+ Conference on Computer Vision and Pattern Recognition
1450
+ (CVPR), 7260–7268.
1451
+ Long, M.; Cao, Y.; Wang, J.; and Jordan, M. 2015. Learn-
1452
+ ing Transferable Features with Deep Adaptation Networks.
1453
+ In Bach, F.; and Blei, D., eds., Proceedings of the 32nd In-
1454
+ ternational Conference on Machine Learning, volume 37 of
1455
+ Proceedings of Machine Learning Research, 97–105. Lille,
1456
+ France: PMLR.
1457
+ Luo, Y.; Liu, P.; Guan, T.; Yu, J.; and Yang, Y. 2020. Ad-
1458
+ versarial Style Mining for One-Shot Unsupervised Domain
1459
+ Adaptation.
1460
+ In Larochelle, H.; Ranzato, M.; Hadsell, R.;
1461
+ Balcan, M.; and Lin, H., eds., Advances in Neural Informa-
1462
+ tion Processing Systems, volume 33, 20612–20623. Curran
1463
+ Associates, Inc.
1464
+ Peng, X.; Bai, Q.; Xia, X.; Huang, Z.; Saenko, K.; and
1465
+ Wang, B. 2019. Moment Matching for Multi-Source Do-
1466
+ main Adaptation. In 2019 IEEE/CVF International Confer-
1467
+ ence on Computer Vision (ICCV), 1406–1415.
1468
+ Ravi, S.; and Larochelle, H. 2017. Optimization as a Model
1469
+ for Few-Shot Learning.
1470
+ In International Conference on
1471
+ Learning Representations.
1472
+ Roy, S.; Siarohin, A.; Sangineto, E.; Bulo, S. R.; Sebe, N.;
1473
+ and Ricci, E. 2019. Unsupervised Domain Adaptation Using
1474
+ Feature-Whitening and Consensus Loss. In Proceedings of
1475
+ the IEEE/CVF Conference on Computer Vision and Pattern
1476
+ Recognition (CVPR), 9471–9480.
1477
+ Saito, K.; Watanabe, K.; Ushiku, Y.; and Harada, T. 2018.
1478
+ Maximum Classifier Discrepancy for Unsupervised Domain
1479
+ Adaptation.
1480
+ In Proceedings of the IEEE Conference on
1481
+ Computer Vision and Pattern Recognition (CVPR), 3723–
1482
+ 3732.
1483
+ Snell, J.; Swersky, K.; and Zemel, R. 2017. Prototypical Net-
1484
+ works for Few-shot Learning. In Guyon, I.; Luxburg, U. V.;
1485
+ Bengio, S.; Wallach, H.; Fergus, R.; Vishwanathan, S.; and
1486
+ Garnett, R., eds., Advances in Neural Information Process-
1487
+ ing Systems, volume 30, 4077–4087. Curran Associates, Inc.
1488
+ Tang, H.; Chen, K.; and Jia, K. 2020. Unsupervised Domain
1489
+ Adaptation via Structurally Regularized Deep Clustering. In
1490
+ Proceedings of the IEEE/CVF Conference on Computer Vi-
1491
+ sion and Pattern Recognition (CVPR).
1492
+ Tseng, H.-Y.; Lee, H.-Y.; Huang, J.-B.; and Yang, M.-H.
1493
+ 2020. Cross-Domain Few-Shot Classification via Learned
1494
+ Feature-Wise Transformation. In International Conference
1495
+ on Learning Representations.
1496
+ Tzeng, E.; Hoffman, J.; Saenko, K.; and Darrell, T. 2017.
1497
+ Adversarial Discriminative Domain Adaptation.
1498
+ In 2017
1499
+ IEEE Conference on Computer Vision and Pattern Recog-
1500
+ nition (CVPR), 2962–2971.
1501
+ Tzeng, E.; Hoffman, J.; Zhang, N.; Saenko, K.; and Darrell,
1502
+ T. 2014. Deep Domain Confusion: Maximizing for Domain
1503
+ Invariance. CoRR, abs/1412.3474: 1–9.
1504
+ Vinyals, O.; Blundell, C.; Lillicrap, T.; kavukcuoglu, k.; and
1505
+ Wierstra, D. 2016. Matching Networks for One Shot Learn-
1506
+ ing. In Lee, D.; Sugiyama, M.; Luxburg, U.; Guyon, I.; and
1507
+ Garnett, R., eds., Advances in Neural Information Process-
1508
+ ing Systems, volume 29, 3630–3638. Curran Associates, Inc.
1509
+ Yang, W.; Yang, C.; Huang, S.; Wang, L.; and Yang, M.
1510
+ 2022. Few-shot Unsupervised Domain Adaptation via Meta
1511
+
1512
+ Learning. In IEEE International Conference on Multimedia
1513
+ and Expo (ICME).
1514
+ Ye, H.-J.; Hu, H.; Zhan, D.-C.; and Sha, F. 2020. Few-Shot
1515
+ Learning via Embedding Adaptation With Set-to-Set Func-
1516
+ tions. In Proceedings of the IEEE/CVF Conference on Com-
1517
+ puter Vision and Pattern Recognition (CVPR), 8805–8814.
1518
+ Yue, X.; Zheng, Z.; Zhang, S.; Gao, Y.; Darrell, T.; Keutzer,
1519
+ K.; and Vincentelli, A. S. 2021. Prototypical Cross-Domain
1520
+ Self-Supervised Learning for Few-Shot Unsupervised Do-
1521
+ main Adaptation. In Proceedings of the IEEE/CVF Confer-
1522
+ ence on Computer Vision and Pattern Recognition (CVPR),
1523
+ 13834–13844.
1524
+ Zhang, C.; Cai, Y.; Lin, G.; and Shen, C. 2020.
1525
+ Deep-
1526
+ EMD: Few-Shot Image Classification With Differentiable
1527
+ Earth Mover’s Distance and Structured Classifiers. In Pro-
1528
+ ceedings of the IEEE/CVF Conference on Computer Vision
1529
+ and Pattern Recognition (CVPR), 12200–12210.
1530
+ Zou, Y.; Yu, Z.; Liu, X.; Kumar, B. V. K. V.; and Wang,
1531
+ J. 2019.
1532
+ Confidence Regularized Self-Training.
1533
+ In 2019
1534
+ IEEE/CVF International Conference on Computer Vision
1535
+ (ICCV), 5981–5990.
1536
+ Zou, Y.; Yu, Z.; Vijaya Kumar, B. V. K.; and Wang, J. 2018.
1537
+ Unsupervised Domain Adaptation for Semantic Segmenta-
1538
+ tion via Class-Balanced Self-training. In Ferrari, V.; Hebert,
1539
+ M.; Sminchisescu, C.; and Weiss, Y., eds., Computer Vision
1540
+ – ECCV 2018, 297–313. Cham: Springer International Pub-
1541
+ lishing. ISBN 978-3-030-01219-9.
1542
+
A9AzT4oBgHgl3EQf__9t/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
BNFIT4oBgHgl3EQf_iwr/content/2301.11415v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f32181a5d2da7bab9db35759db218937e50e88ebfddf794e2127a8752a3a96ee
3
+ size 480537
BNFIT4oBgHgl3EQf_iwr/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d73f19658a4227e74cee15efe1ce9500fc1f01616cad4a6d86ae786c5396db47
3
+ size 4128813
BNFIT4oBgHgl3EQf_iwr/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:859a594382b3c112b0b5a44142938e1540d837e77a866e677d177016b4e135df
3
+ size 155022
BtAzT4oBgHgl3EQfGPuX/content/2301.01025v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d65347cdb7df1c60e877267f7ef4e9e36d645407a6fafa4a768247c4060c73c
3
+ size 2300240
BtAzT4oBgHgl3EQfGPuX/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0662189c5a3771773ca34723b8283037440cdd6de7450e8b105cdff51f6142d0
3
+ size 227529
CtE3T4oBgHgl3EQfUgpf/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2f888ad32fdd2489153f540d204339c5f6b5166861682b99440d695b824b76e
3
+ size 122290
D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7916752f7e85b013bb91dfb01d0c317d5c601bb09a35499c455187183275697
3
+ size 11552857
DdE1T4oBgHgl3EQfEAP6/content/tmp_files/2301.02886v1.pdf.txt ADDED
@@ -0,0 +1,710 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PERCEPTUAL–NEURAL–PHYSICAL SOUND MATCHING
2
+ Han Han, Vincent Lostanlen, and Mathieu Lagrange
3
+ Nantes Universit´e, ´Ecole Centrale Nantes, CNRS, LS2N, UMR 6004, F-44000 Nantes, France
4
+ ABSTRACT
5
+ Sound matching algorithms seek to approximate a target waveform
6
+ by parametric audio synthesis. Deep neural networks have achieved
7
+ promising results in matching sustained harmonic tones. However,
8
+ the task is more challenging when targets are nonstationary and inhar-
9
+ monic, e.g., percussion. We attribute this problem to the inadequacy
10
+ of loss function. On one hand, mean square error in the parametric
11
+ domain, known as “P-loss”, is simple and fast but fails to accommo-
12
+ date the differing perceptual significance of each parameter. On the
13
+ other hand, mean square error in the spectrotemporal domain, known
14
+ as “spectral loss”, is perceptually motivated and serves in differen-
15
+ tiable digital signal processing (DDSP). Yet, spectral loss has more
16
+ local minima than P-loss and its gradient may be computationally
17
+ expensive; hence a slow convergence. Against this conundrum, we
18
+ present Perceptual-Neural-Physical loss (PNP). PNP is the optimal
19
+ quadratic approximation of spectral loss while being as fast as P-loss
20
+ during training. We instantiate PNP with physical modeling synthesis
21
+ as decoder and joint time–frequency scattering transform (JTFS) as
22
+ spectral representation. We demonstrate its potential on matching
23
+ synthetic drum sounds in comparison with other loss functions.
24
+ Index Terms— auditory similarity, scattering transform, deep
25
+ convolutional networks, physical modeling synthesis.
26
+ 1. INTRODUCTION
27
+ Given an audio synthesizer g, the task of sound matching [1] consists
28
+ in retrieving the parameter setting θ that “matches” a target sound
29
+ x; i.e., such that a human ear judges the generated sound g(θ) to
30
+ resemble x. Sound matching has applications in automatic music
31
+ transcription, virtual reality, and audio engineering [2, 3]. Of particu-
32
+ lar interest is the case where g(θ) solves a known partial differential
33
+ equation (PDE) whose coefficients are contained in the vector θ. In
34
+ this case, θ reveals some key design choices in acoustical manufac-
35
+ turing, such as the shape and material properties of the resonator.
36
+ Over the past decade, the renewed interest for deep neural net-
37
+ works (DNN’s) in audio content analysis has led researchers to formu-
38
+ late sound matching as a supervised learning problem [4]. Intuitively,
39
+ the goal is to optimize the synaptic weights W of a DNN f W so
40
+ that f W(xn) = ˜θn approximates θn over a training set of pairs
41
+ (xn, θn). Because g automates the mapping from parameter θn to
42
+ sound xn, this training procedure incurs no real-world audio acquisi-
43
+ tion nor human annotation. However, prior publications have pointed
44
+ out that the approximation formula ˜θn ≈ θn lacks a perceptual mean-
45
+ ing: depending on the choice of target xn, some deviations (˜θn−θn)
46
+ may be judged to have a greater effect than others [5, 6, 7].
47
+ The paradigm of differentiable digital signal processing (DDSP)
48
+ has brought a principled methodology to address this issue [8]. The
49
+ key idea behind DDSP is to chain the learnable encoder f W with
50
+ the known decoder g and a non-learnable but differentiable feature
51
+ map Φ. In DDSP, f W is trained to minimize the perceptual distance
52
+ θ
53
+ Parametric
54
+ domain
55
+ x
56
+ original
57
+ Audio
58
+ domain
59
+ S
60
+ Perceptual
61
+ domain
62
+ ˜θ
63
+ ˜x
64
+ reconstruction
65
+ ˜S
66
+ DDSP
67
+ spectral ≈
68
+ loss
69
+ PNP
70
+ quadratic
71
+ form
72
+ θ
73
+ x
74
+ ˜θ
75
+ M(θ)
76
+ Riemannian
77
+ metric
78
+ g
79
+ Φ
80
+ f W
81
+ g
82
+ Φ
83
+ g
84
+ f W
85
+ ∇(Φ◦g)
86
+ Fig. 1. Graphical outline of the proposed method. Given a known
87
+ synthesizer g and feature map Φ, we train a neural network f W to
88
+ minimize the “perceptual–neural–physical” (PNP) quadratic form
89
+ ⟨˜θ − θ
90
+ ��M(θ)|˜θ − θ⟩ where M is the Riemannian metric associated
91
+ to (Φ◦g). Hence, PNP approximates DDSP spectral loss yet does not
92
+ need to backpropagate ∇(Φ◦g)(˜θ) at each epoch. Transformations in
93
+ solid (resp. dashed) lines can (resp. cannot) be cached during training.
94
+ between vectors Φ(˜xn) = (Φ◦g◦f W)(xn) and Φ(xn) on average
95
+ over samples xn. Yet, a practical shortcoming of DDSP is that it
96
+ requires to evaluate the Jacobian ∇(Φ◦g) over each DNN prediction
97
+ ˜θn; and so at every training step, as W is updated by stochastic
98
+ gradient descent (SGD).
99
+ In this article, we propose a new learning objective for sound
100
+ matching, named perceptual–neural–physical (PNP) autoencoding.
101
+ The main contribution of PNP is to compute the Riemannian met-
102
+ ric M associated to the Jacobian ∇(Φ◦g) over each sample θn (see
103
+ Section 2.1). The PNP encoder is penalized in terms of a first-order
104
+ Taylor expansion of the spectral loss ∥Φ(˜x) − Φ(x)∥2, making
105
+ it comparable to DDSP. Yet, unlike in DDSP, the computation of
106
+ ∇(Φ◦g) is independent from the encoder f W: thus, it may be par-
107
+ allelized and cached during DNN training. A second novelty of
108
+ our paper resides in its choice of application: namely, differentiable
109
+ sound matching for percussion instruments. This requires not only
110
+ a fine characterization of the spectral envelope, as in the DDSP of
111
+ sustained tones; but also of attack and release transients. For this
112
+ purpose, we need g and Φ to accommodate sharp spectrotemporal
113
+ modulations. Specifically, we rely on a differentiable implementation
114
+ of the functional transformation method (FTM) for g and the joint
115
+ time–frequency scattering transform (JTFS) for Φ.
116
+ 2. METHODS
117
+ 2.1. Approximating spectral loss with Riemannian geometry
118
+ We assume the synthesizer g and the feature map Φ to be contin-
119
+ uously differentiable. Let us denote by LDDSP the “spectral loss”
120
+ arXiv:2301.02886v1 [cs.SD] 7 Jan 2023
121
+
122
+ associated to the triplet (Φ, f W, g). Its value at a parameter set θ is:
123
+ LDDSP
124
+ θ
125
+ (W) = 1
126
+ 2∥Φ(˜x) − Φ(x)∥2
127
+ 2
128
+ = 1
129
+ 2
130
+ ��(Φ ◦ g ◦ f W ◦ g)(θ) − (Φ ◦ g)(θ)
131
+ ��2
132
+ 2
133
+ (1)
134
+ by definition of ˜x and x. Using ˜θ as shorthand for (f W ◦ g)(θ), we
135
+ conduct a first-order Taylor expansion of (Φ ◦ g) near θ. We obtain:
136
+ Φ(˜x) = Φ(x) + ∇(Φ◦g)(θ) · (˜θ − θ) + O(∥˜θ − θ∥2
137
+ 2),
138
+ (2)
139
+ where the Jacobian matrix ∇(Φ◦g)(θ) contains P = dim Φ(x) rows
140
+ and J = dim θ columns. The differentiable map (Φ ◦ g) induces a
141
+ weak Riemannian metric M onto the open set U ⊂ RJ of parameters
142
+ θ, whose matrix values derive from ∇(Φ◦g)(θ):
143
+ M(θ)j,j′ =
144
+ P
145
+
146
+ p=1
147
+
148
+ ∇(Φ◦g)(θ)p,j
149
+ � �
150
+ ∇(Φ◦g)(θ)p,j′�
151
+ .
152
+ (3)
153
+ The square matrix M(θ) ∈ GLJ(R) defines a positive semidefinite
154
+ kernel which, once plugged into Equation 2, serves to approximate
155
+ LDDSP
156
+ θ
157
+ (W) in terms of a quadratic form over (˜θ − θ):
158
+ ∥Φ(˜x) − Φ(x)∥2
159
+ 2 =
160
+ �˜θ − θ
161
+ ��M(θ)
162
+ ��˜θ − θ
163
+
164
+ + O
165
+
166
+ ∥˜θ − θ∥3
167
+ 2
168
+
169
+ . (4)
170
+ The advantage of the approximation above is that the metric
171
+ M may be computed over the training set once and for all. This is
172
+ because Equation 3 is independent of the encoder f W. Furthermore,
173
+ since θ is low-dimensional, we may store M(θ) on RAM. From
174
+ this perspective, we define the perceptual–neural–physical loss (PNP)
175
+ associated to (Φ, f W, g) as the linearization of spectral loss at θ:
176
+ LPNP
177
+ θ
178
+ (W) = 1
179
+ 2
180
+
181
+ (f W ◦ g)(θ) − θ
182
+ ��M(θ)
183
+ ��(f W ◦ g)(θ) − θ
184
+
185
+ = LDDSP
186
+ θ
187
+ (W) + O
188
+
189
+ ∥(f W ◦ g)(θ) − θ∥3
190
+ 2
191
+
192
+ .
193
+ (5)
194
+ According to the chain rule, the gradient of PNP loss at a given
195
+ training pair (xn, θn) with respect to some scalar weight Wi is:
196
+ ∂LPNP
197
+ θ
198
+ ∂Wi (θn) =
199
+
200
+ f W(xn) − θn
201
+ ���M(θn)
202
+ ���∂f W
203
+ ∂Wi (xn)
204
+
205
+ .
206
+ (6)
207
+ Observe that replacing M(θn) by the identity matrix in the equation
208
+ above would give the gradient of parameter loss (P-loss); that is,
209
+ the mean squared error between the predicted parameter ˜θ and the
210
+ true parameter θ. Hence, we may regard PNP as a perceptually
211
+ motivated extension of P-loss, in which parameter deviations are
212
+ locally recombined and rescaled so as to simulate a DDSP objective.
213
+ The matrix M(θ) is constant in W. Hence, its value may be
214
+ cached across training epochs, and even across hyperparameter set-
215
+ tings of the encoder. In comparison with P-loss, the only computa-
216
+ tional overhead of PNP is the bilinear form in Equation 6. However,
217
+ this computation is performed in the parametric domain, i.e., in low
218
+ dimension (J = dim θ). Hence, its cost is negligible in front of the
219
+ forward (f W) and backward pass (∂f W/∂Wi) of DNN training.
220
+ 2.2. Damped least squares
221
+ The principal components of the Jacobian ∇(Φ◦g)(θ) are the eigen-
222
+ vectors of M(θ). We denote them by vj and the corresponding
223
+ eigenvalues by σ2
224
+ j : for each of them, we have M(θ)vj = σ2
225
+ j vj. The
226
+ vj’s form an orthonormal basis of RJ, in which we can decompose
227
+ the parameter deviation (˜θ − θ). Recalling Equation 5, we obtain an
228
+ alternative formula for PNP loss:
229
+ LPNP
230
+ θ
231
+ (W) = 1
232
+ 2
233
+ J
234
+
235
+ j=1
236
+ σ2
237
+ j
238
+ ��⟨(f W ◦ g)(θ) − θ
239
+ ��vj⟩
240
+ ��2 v2
241
+ j
242
+ (7)
243
+ The eigenvalues σ2
244
+ j stretch and compress the error vector along their
245
+ associated direction vj, analogous to the magnification and suppres-
246
+ sion of perceptually relevant and irrelevant parameter deviations. In
247
+ practice however, when σ2
248
+ j cover drastic ranges or contain zeros, as
249
+ presented below in Section 4.3, the error vector is subject to extreme
250
+ distortion and potential instability due to numerical precision errors.
251
+ These scenarios, commonly referred to as M being ill-conditioned,
252
+ can lead to intractable learning objective LPNP
253
+ θ
254
+ .
255
+ Reminiscent of the damping mechanism introduced in Levenberg-
256
+ Marquardt algorithm when solving nonlinear optimization problems,
257
+ we update Equation 5 as
258
+ LPNP
259
+ θ
260
+ (W) = 1
261
+ 2
262
+ �˜θ − θ
263
+ ��M(θ) + λI
264
+ ��˜θ − θ
265
+
266
+ (8)
267
+ The damping term λI up-shifts all eigenvalues of M by a constant
268
+ positive amount λ, thereby changing its condition number. When
269
+ λ is huge, M(θ) + λI is close to an identity matrix with uniform
270
+ eigenvalues, LPNP
271
+ θ
272
+ is optimizing in parameter loss regime. On the
273
+ other hand when λ is small, small correctional effects keeps LPNP
274
+ θ
275
+ in the spectral loss regime. Alternatively, Equation 8 may also be
276
+ viewed as a L2 regularization with coefficient λ, which allows smooth
277
+ transition between spectral and parameter loss regimes.
278
+ To further address potential convergence issues, λ may be sched-
279
+ uled or adaptively changed according to epoch validation loss. We
280
+ adopt delayed gratification mechanism to decrease λ by a factor of 5
281
+ when loss is going down, and fix λ otherwise.
282
+ 3. APPLICATION TO DRUM SOUND MATCHING
283
+ 3.1. Perceptual: Joint time–frequency scattering (JTFS)
284
+ The joint time–frequency scattering transform (JTFS) is a nonlinear
285
+ convolutional operator which extracts spectrotemporal modulations
286
+ in the constant-Q scalogram [9]. Its kernels proceed from a separable
287
+ product between two complex-valued wavelet filterbanks, defined
288
+ over the time axis and over the log-frequency axis respectively. After
289
+ convolution, we apply pointwise complex modulus and temporal
290
+ averaging to each JTFS coefficient. These coefficients are known as
291
+ scattering “paths” p. We apply a logarithmic transformation to the
292
+ feature vector JTFS(xn) corresponding to each sound xn, yielding
293
+ Sn,p = (Φ ◦ g)(θn)p = log
294
+
295
+ 1 + JTFS(xn)p
296
+ ε
297
+
298
+ ,
299
+ (9)
300
+ where we have set the hyperparameter ε = 10−3 of the order of the
301
+ median value of JTFS across all examples xn and paths p.
302
+ The multiresolution structure of JTFS is reminiscent of spec-
303
+ trotemporal receptive fields (STRF), and thus may serve as a bio-
304
+ logically plausible predictor of neurophysiological responses in the
305
+ primary auditory cortex [10]. At a higher level of music cognition, a
306
+ recent study has shown that Euclidean distances in Φ space predict
307
+ auditory judgments of timbre similarity within a large vocabulary
308
+ of instrumental playing techniques, as collected from a group of
309
+ professional composers and non-expert music listeners [11].
310
+ We compute JTFS with same parameters as [11]: Q1 = 12,
311
+ Q2 = 1, and Qfr = 1 filters per octave respectively. We set the
312
+
313
+ temporal averaging to T = 3 seconds and the frequential averaging
314
+ to F = 2 octaves; hence a total of P = 20762 paths. We run Φ and
315
+ ∇(Φ◦g) in PyTorch on GPU via the implementation of [12, 13].
316
+ 3.2. Neural: Deep convolutional network (convnet)
317
+ EfficientNet is a convolutional neural network architecture that bal-
318
+ ances the scaling of the depth, width and input resolution of con-
319
+ secutive convolutional blocks [14]. Achieving state-of-the-art per-
320
+ formance on image classification with significantly less trainable
321
+ parameters, its most light-weight version EfficientNet-B0 also suc-
322
+ ceeded in benchmarking audio classification tasks [15]. We adopt
323
+ EfficientNet-B0 as our encoder f W, resulting in 4M learnable pa-
324
+ rameters. We append a linear dense layer of J = dim θ neurons
325
+ and a 1D batch normalization before tanh activation. The goal of
326
+ batch normalization is to gaussianize the input, such that the activated
327
+ output is capable of uniformly cover the normalized prediction range.
328
+ The input to f W is the log-scaled CQT coefficients of each example,
329
+ computed with a filterbank spanning 10 octaves with 12 filters per
330
+ octave.
331
+ 3.3. Physical: Functional transformation method (FTM)
332
+ We are interested in the perpendicular displacement X(t, u) on a
333
+ rectangular drum face, which can be solved from the following partial
334
+ differential equation defined in the Cartesian coordinate system u =
335
+ (u1, u2).
336
+ �∂2X
337
+ ∂t2 (t, u) − c2∇2X(t, u)
338
+
339
+ + S4�
340
+ ∇4X(t, u)
341
+
342
+ + ∂
343
+ ∂t
344
+
345
+ d1X(t, u) + d3∇2X(t, u)
346
+
347
+ = Y(t, u)
348
+ (10)
349
+ In addition to the standard traveling wave equation in the first above
350
+ parenthesis, the fourth-order spatial and first-order time derivatives
351
+ incorporate damping factors induced by stiffness, internal friction
352
+ in the drum material and air friction in the external environment,
353
+ rendering the solution a closer simulation to reality. Specifically,
354
+ α, S, c, d1, d3 designate respectively the side length ratio, stiffness,
355
+ traveling wave speed, frequency-independent damping and frequency-
356
+ dependent damping of the drum. Even though real world drums
357
+ are mostly circular, a rectangular drum model is equally capable of
358
+ eliciting representative percussive sounds in real world scenarios. The
359
+ circular drum model simply requires a conversion of Equation 10
360
+ into the Polar coordinate system. We bound the four sides of this l
361
+ by lα rectangular drum at zero at all time. Moreover, we assume its
362
+ excitation function to be separable and localized in space and time
363
+ Y(t, u) = yu(u)δ(t).
364
+ We implement generator g as a PDE solver to this high-order
365
+ damped wave equation, namely the functional transformation method
366
+ (FTM) [16, 17]. FTM solves the PDE by transforming the equation
367
+ into its Laplace and functional space domain, where an algebraic
368
+ solution can be obtained. It then finds the time-space domain solution
369
+ via inverse functional transforms, expressed in an infinite modal
370
+ summation form
371
+ x(t) = X(t, u) =
372
+
373
+ m∈N2
374
+ Km(u, t) exp(σmt) sin(ωmt)
375
+ (11)
376
+ The coefficients Km(u, t), σm, ωm are derived from the original
377
+ PDE parameters in the following ways.
378
+ ω2
379
+ m = (S4 − d2
380
+ 3
381
+ 4 )Γ2
382
+ m1,m2 + (c2 + d1d3
383
+ 2
384
+ )Γm1,m2 − d2
385
+ 1
386
+ 4
387
+ (12)
388
+ Fig. 2. Distributions of the sorted eigenvalues of M(θn). For the
389
+ sake of comparison between PNP and P-loss, the dashed line indicates
390
+ the eigenvalues of the identity matrix (see Equation 6).
391
+ σm = d3
392
+ 2 Γm1,m2 − d1
393
+ 2
394
+ (13)
395
+ Km(u, t) = ym
396
+ u δ(t) sin(πm1u1
397
+ l
398
+ ) sin
399
+ �πm2u2
400
+
401
+
402
+ (14)
403
+ where Γm1,m2 = π2m2
404
+ 1/l2 + π2m2
405
+ 2/(lα)2, and ym
406
+ u is the mth coef-
407
+ ficient associated to the eigenfunction sin(πmu/l) that decomposes
408
+ yu(u).
409
+ Without losing connections to the acoustical manufacturing of
410
+ the drum yet better relating g’s input with perceptual dimensions,
411
+ we reparametrize the PDE parameters {S, c, d1, d3, α} into θ =
412
+ {log ω1, τ1, log p, log D, α}, detailed in Section 3.4 of [18]. We pre-
413
+ scribe sonically-plausible ranges for each parameter in θ, normalize
414
+ them between −1 and 1, uniformly sample in the hyper-dimensional
415
+ cube, and obtain a dataset of 100k percussive sounds sampled at
416
+ 22050 HZ. The train/test/validation split is 8 : 1 : 1.
417
+ In particular, fundamental frequency ω1, duration τ1 falls into
418
+ ranges [40, 1000] Hz and [0.4, 3] seconds respectively. Inhomoge-
419
+ neous damping rate p, frequential dispersion D and aspect ratio α
420
+ ranges are [10−5, 0.2], [10−5, 0.3], and [10−5, 1].
421
+ 4. RESULTS
422
+ 4.1. Baselines
423
+ We train fW with 3 different losses - multi-scale spectral loss [19],
424
+ parameter loss, and PNP loss. We use a batch size of 64 samples
425
+ for spectral loss, and 256 samples for parameter and PNP loss. The
426
+ training proceeds for 70 epochs, where around 20% of the training
427
+ set is seen at each epoch. We use Adam optimizer with learning rate
428
+ 10−3. Table 1 reports the training time per epoch on a single Tesla
429
+ V100 16GB GPU.
430
+ 4.2. Evaluation with JTFS-based spectral loss
431
+ We propose to use the L2 norm of JTFS coefficients error averaged
432
+ over test set for evaluation. As a point of reference, we also include
433
+ the average multi-scale spectral error, implemented as in Section
434
+ 4.1. One of the key distinctions between Euclidean JTFS distance
435
+ and multi-scale spectral error is the former’s inclusion of spectro-
436
+ temporal modulations information. Meanwhile unlike mean squared
437
+ parameter error, both metrics reflect the perceptual closeness instead
438
+ of parametric retrieval accuracy for each proposed model.
439
+ 4.3. Discussion
440
+ Despite being the optimal quadratic approximation of spectral loss,
441
+ it is nontrivial to apply the bear PNP loss form as Equation 5 in
442
+
443
+ OCD
444
+ 15
445
+ 10
446
+ 5
447
+ 0
448
+ 5
449
+ log1o(on,j)Pitch
450
+ JTFS distance
451
+ (avg. on test set)
452
+ MSS
453
+ (avg. on test set)
454
+ Training time
455
+ per epoch
456
+ P-loss
457
+ Known
458
+ 22.23 ± 2.17
459
+ 0.31 ± 0.013
460
+ 49 minutes
461
+ DDSP with MSS loss
462
+ Known
463
+ 31.86 ± 0.332
464
+ 0.335 ± 0.005
465
+ 54 minutes
466
+ PNP with JTFS loss
467
+ Known
468
+ 23.58 ± 0.877
469
+ 0.335 ± 0.005
470
+ 49 minutes
471
+ DDSP with JTFS loss
472
+
473
+
474
+
475
+ est., > 1 day
476
+ P-loss
477
+ Unknown
478
+ 61.91 ± 6.26
479
+ 1.02 ± 0.094
480
+ 53 minutes
481
+ DDSP with MSS loss
482
+ Unknown
483
+ 138.95 ± 37.12
484
+ 1.59 ± 0.307
485
+ 59 minutes
486
+ PNP with JTFS loss
487
+ Unknown
488
+ 61.21 ± 1.207
489
+ 0.97 ± 0.019
490
+ 49 minutes
491
+ Table 1. Report of average JTFS distance and MSS metrics evaluated on test set. Six models are trained with two modalities: 1. the inclusion
492
+ of pitch retrieval i.e.regressing θ = {τ, log p, log D, α} vs. θ = {log ω1, τ, log p, log D, α}, and 2. the choice of loss function: P-loss, MSS
493
+ loss, or PNP loss with adaptive damping mechanism. The best performing models with known and unknown pitch are P-loss and PNP loss
494
+ respectively. Training with MSS loss is more time consuming than training with P-loss or PNP loss. Training with differentiable JTFS loss is
495
+ unrealistic in the interest of time.
496
+ experimental settings. On one hand, Φ◦g potentially has undesirable
497
+ property that exposes the Riemannian metric calculations to numeri-
498
+ cal precision errors. On the other hand, extreme deformation of the
499
+ optimization landscape may lead to the same numerical instability
500
+ facing stochastic gradient descent with spectral loss. We report on a
501
+ few remedies that helped stabilize learning with PNP loss, and offer
502
+ insights on future directions to take.
503
+ First and foremost, our preliminary experiments show that train-
504
+ ing PNP loss without damping λ = 0 subjects to serious convergence
505
+ issues. Recalling Section 2.2, indeed our empirical Ms suffer from
506
+ high condition numbers. Fig. 2 shows the sorted eigenvalue distribu-
507
+ tion of all Ms in test set, where Ms are rank-2,3 or 4 matrices with
508
+ eigenvalues ranging from 0 to 1020. This could be an implication
509
+ that entries of θ contain implicit linear dependencies in generator g,
510
+ or that local variations of certain θ fail to linearize differences in the
511
+ output of g or Φ ◦ g. As an example, the aspect ratio α influences the
512
+ modal frequencies and decay rates via [18, Equations 12–13], where
513
+ in fact its variant 1/α + 1/α2 could be a better choice of variable
514
+ that linearizes g.
515
+ To address Ms’ ill conditions we attempted at numerous damping
516
+ mechanisms to update λ, namely constant λ, scheduled λ decay, and
517
+ adaptive λ decay. The intuition is to have LPNP
518
+ θ
519
+ start in the parameter
520
+ loss regime and move towards the spectral loss regime while training.
521
+ The best performing model is achieved with adaptive λ decay, as
522
+ described in Section 2.2. We initialize λ to be 1020 to match the
523
+ largest empirical σ2
524
+ j , which later gets adaptively decayed to 3 × 1014
525
+ in 20 epochs, breaking records 7 times. This indicates that f W is
526
+ able to learn with damped PNP loss, under the condition that λ being
527
+ large enough to simulate parameter loss regime and compensate for
528
+ deficiency in M.
529
+ The diagonal elements of M(θ) can be regarded as both the ap-
530
+ plied weights’ magnitudes and proxies for θ’s perceptual significance.
531
+ To gain further insights on how each model regresses different pa-
532
+ rameters, we visualize in Fig.3 pairs of (|˜θ − θ|2
533
+ j, M(θ)j,j). Three
534
+ trends can be observed: First, τ and ω are regressed with the best
535
+ accuracy across all learning objectives. Second, spectral loss particu-
536
+ larly struggles in pitch ω and inharmonicity p retrievals. Third, we
537
+ may interpret x-axis as describing from left to right samples with
538
+ increasing perceptual significance. We observe that in Fig.3(b), PNP
539
+ loss is able to suppress more errors in samples with high M(θ)j,j
540
+ than parameter loss, by a nonnegligible margin.
541
+ We believe that more of PNP loss’ mathematical potential can
542
+ be exploited in the future, notably its ability to interpolate between
543
+ various loss regimes and its use in hybrid optimization schemes. To
544
+ start with, we plan to resort to a simpler differentiable synthesizer g
545
+ Fig. 3. X-axis: weight assigned by PNP to one of the physical
546
+ parameters in θn. Y-axis: log squared estimation error for that same
547
+ parameter. α is omitted due to its poor retrieval results from all
548
+ models.
549
+ that guarantees a well-conditioned Riemannian metric M(θ). More-
550
+ over, we plan to explore other damping schemes and optimizers. The
551
+ current update mechanism, originated from the Leverberg-Marquardt
552
+ Algorithm, aims to improve the conditioning of a matrix inversion
553
+ problem in the Gauss-Newton algorithm. However when used jointly
554
+ with stochastic gradient descent, each λ update may change the opti-
555
+ mization landscape drastically. The resulting optimization behavior
556
+ is thus not fully understood. We consider interfacing nonlinear least
557
+ squares solver with SGD and forming a hybrid learning scheme in
558
+ future work.
559
+ 5. CONCLUSION
560
+ In this article we have presented Perceptual-Neural-Physical (PNP)
561
+ autoencoding, a bilinear form learning objective for sound matching
562
+ task. In our application, PNP optimizes the retrieval of physical
563
+ parameters from sounds in a perceptually-motivated metric space,
564
+ enabled by differentiable implementations of domain knowledge in
565
+ physical modeling and computational proxy of neurophysiological
566
+ construct of human auditory system.
567
+ We demonstrated PNP’s mathematical proximity to spectral loss
568
+ and its generalizability to parameter loss. Using this formulation,
569
+ we motivated and established one way of enabling smooth transition
570
+ between optimizing in parameter and spectral loss regimes. We
571
+ have presented damping mechanisms to facilitate its learning under
572
+ ill-conditioned empirical settings and discussed its mathematical
573
+ potential.
574
+
575
+ w - w]2 vs. M[0,0]
576
+ - T|2 vs. M[1, 1]
577
+ 100
578
+ 10~2
579
+ 10-3
580
+ 10~5
581
+ Ploss
582
+ 10~6
583
+ 10-8
584
+ Spec
585
+ 109
586
+ PNP
587
+ 0.0
588
+ 0.5
589
+ 1.0
590
+ 1.5
591
+ 2.0
592
+ 2.5
593
+ 3.0
594
+ 0
595
+ 100000
596
+ 200000
597
+ 300000
598
+ 400000
599
+ (a)
600
+ le10
601
+ (b)
602
+ Ip -p]2 vs. M[2,2]
603
+ ID - D2 vs. M[3,3]
604
+ 101
605
+ 100
606
+ 10~3
607
+ 10~2
608
+ 10~6,
609
+ 10-5
610
+ 10-9
611
+ 10-8
612
+ 0
613
+ 50000
614
+ 100000
615
+ 150000
616
+ 200000
617
+ 0.0
618
+ 0.2
619
+ 0.4
620
+ 0.6
621
+ 0.8
622
+ 1.0
623
+ 1.2
624
+ 1.4
625
+ 1.6
626
+ (c)
627
+ (d)
628
+ le106. REFERENCES
629
+ [1] Andrew Horner, “Wavetable matching synthesis of dynamic
630
+ instruments with genetic algorithms,” Journal of the Audio
631
+ Engineering Society, vol. 43, no. 11, pp. 916–931, 1995.
632
+ [2] Jordie Shier, Kirk McNally, George Tzanetakis, and Ky Grace
633
+ Brooks,
634
+ “Manifold learning methods for visualization and
635
+ browsing of drum machine samples,” Journal of the Audio
636
+ Engineering Society, vol. 69, no. 1/2, pp. 40–53, 2021.
637
+ [3] Philippe Esling, Naotake Masuda, Adrien Bardet, Romeo De-
638
+ spres, Axel Chemla, et al., “Universal audio synthesizer control
639
+ with normalizing flows,” in Proceedings of the International
640
+ Conference on Digital Audio Effects (DAFX), 2019.
641
+ [4] Leonardo Gabrielli, Stefano Tomassetti, Carlo Zinato, and
642
+ Francesco Piazza,
643
+ “End-to-end learning for physics-based
644
+ acoustic modeling,” IEEE Transactions on Emerging Topics in
645
+ Computational Intelligence, vol. 2, no. 2, pp. 160–170, 2018.
646
+ [5] Naotake Masuda and Daisuke Saito, “Synthesizer sound match-
647
+ ing with differentiable DSP.,” in Proceedings of the Interna-
648
+ tional Society on Music Information Retrieval (ISMIR) Confer-
649
+ ence, 2021, pp. 428–434.
650
+ [6] Martin Roth and Matthew Yee-king, “A comparison of para-
651
+ metric optimization techniques for musical instrument tone
652
+ matching,” Journal of the Audio Engineering Society, May
653
+ 2011.
654
+ [7] Matthew Yee-King, Leon Fedden, and Mark d’Inverno, “Au-
655
+ tomatic programming of vst sound synthesizers using deep
656
+ networks and other techniques,” IEEE Transactions on Emerg-
657
+ ing Topics in Computational Intelligence, vol. 2, pp. 150–159,
658
+ 04 2018.
659
+ [8] Jesse Engel, Lamtharn (Hanoi) Hantrakul, Chenjie Gu, and
660
+ Adam Roberts, “DDSP: Differentiable Digital Signal Process-
661
+ ing,” in Proceedings of the International Conference on Learn-
662
+ ing Representations (ICLR), 2020.
663
+ [9] Joakim And´en, Vincent Lostanlen, and St´ephane Mallat, “Joint
664
+ time–frequency scattering,” IEEE Transactions on Signal Pro-
665
+ cessing, vol. 67, no. 14, pp. 3704–3718, 2019.
666
+ [10] Taishih Chi, Powen Ru, and Shihab A Shamma, “Multiresolu-
667
+ tion spectrotemporal analysis of complex sounds,” The Journal
668
+ of the Acoustical Society of America, vol. 118, no. 2, pp. 887–
669
+ 906, 2005.
670
+ [11] Vincent Lostanlen, Christian El-Hajj, Mathias Rossignol,
671
+ Gr´egoire Lafay, Joakim And´en, and Mathieu Lagrange, “Time–
672
+ frequency scattering accurately models auditory similarities
673
+ between instrumental playing techniques,” EURASIP Journal
674
+ on Audio, Speech, and Music Processing, vol. 2021, no. 1, pp.
675
+ 1–21, 2021.
676
+ [12] Mathieu Andreux, Tom´as Angles, Georgios Exarchakis,
677
+ Roberto Leonarduzzi, Gaspar Rochette, Louis Thiry, John
678
+ Zarka, St´ephane Mallat, Joakim And´en, Eugene Belilovsky,
679
+ Joan Bruna, Vincent Lostanlen, Muawiz Chaudhary, Matthew J.
680
+ Hirn, Edouard Oyallon, Sixin Zhang, Carmine Cella, and
681
+ Michael Eickenberg,
682
+ “Kymatio: Scattering transforms in
683
+ Python.,”
684
+ Journal of Machine Learning Research, vol. 21,
685
+ no. 60, pp. 1–6, 2020.
686
+ [13] John Muradeli, Cyrus Vahidi, Changhong Wang, Han Han, Vin-
687
+ cent Lostanlen, Mathieu Lagrange, and George Fazekas, “Dif-
688
+ ferentiable time-frequency scattering in kymatio,” in Proceed-
689
+ ings of the International Conference on Digital Audio Effects
690
+ (DAFX), 2022.
691
+ [14] Mingxing Tan and Quoc Le, “EfficientNet: Rethinking model
692
+ scaling for convolutional neural networks,” in Proceedings
693
+ of the International conference on Machine Learning (ICML).
694
+ PMLR, 2019, pp. 6105–6114.
695
+ [15] Neil Zeghidour, Olivier Teboul, F´elix de Chaumont Quitry, and
696
+ Marco Tagliasacchi, “Leaf: A learnable frontend for audio
697
+ classification,” ICLR, 2021.
698
+ [16] L. Trautmann and Rudolf Rabenstein, Digital Sound Synthesis
699
+ by Physical Modeling Using the Functional Transformation
700
+ Method, 01 2003.
701
+ [17] Maximilian Sch¨afer, Manuel Werner, and Rudolf Rabenstein,
702
+ “Physical modeling in sound synthesis: Vibrating plates,” 05
703
+ 2019.
704
+ [18] Han Han and Vincent Lostanlen, “wav2shape: Hearing the
705
+ Shape of a Drum Machine,” in Proceedings of Forum Acusticum,
706
+ 2020, pp. 647–654.
707
+ [19] Christian J. Steinmetz and Joshua D. Reiss, “auraloss: Audio
708
+ focused loss functions in PyTorch,” in Digital Music Research
709
+ Network One-day Workshop (DMRN+15), 2020.
710
+
DdE1T4oBgHgl3EQfEAP6/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf,len=312
2
+ page_content='PERCEPTUAL–NEURAL–PHYSICAL SOUND MATCHING Han Han, Vincent Lostanlen, and Mathieu Lagrange Nantes Universit´e, ´Ecole Centrale Nantes, CNRS, LS2N, UMR 6004, F-44000 Nantes, France ABSTRACT Sound matching algorithms seek to approximate a target waveform by parametric audio synthesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
3
+ page_content=' Deep neural networks have achieved promising results in matching sustained harmonic tones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
4
+ page_content=' However, the task is more challenging when targets are nonstationary and inhar- monic, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
5
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
6
+ page_content=', percussion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
7
+ page_content=' We attribute this problem to the inadequacy of loss function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
8
+ page_content=' On one hand, mean square error in the parametric domain, known as “P-loss”, is simple and fast but fails to accommo- date the differing perceptual significance of each parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
9
+ page_content=' On the other hand, mean square error in the spectrotemporal domain, known as “spectral loss”, is perceptually motivated and serves in differen- tiable digital signal processing (DDSP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
10
+ page_content=' Yet, spectral loss has more local minima than P-loss and its gradient may be computationally expensive;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
11
+ page_content=' hence a slow convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
12
+ page_content=' Against this conundrum, we present Perceptual-Neural-Physical loss (PNP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
13
+ page_content=' PNP is the optimal quadratic approximation of spectral loss while being as fast as P-loss during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
14
+ page_content=' We instantiate PNP with physical modeling synthesis as decoder and joint time–frequency scattering transform (JTFS) as spectral representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
15
+ page_content=' We demonstrate its potential on matching synthetic drum sounds in comparison with other loss functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
16
+ page_content=' Index Terms— auditory similarity, scattering transform, deep convolutional networks, physical modeling synthesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
17
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
18
+ page_content=' INTRODUCTION Given an audio synthesizer g, the task of sound matching [1] consists in retrieving the parameter setting θ that “matches” a target sound x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
19
+ page_content=' i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
20
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
21
+ page_content=', such that a human ear judges the generated sound g(θ) to resemble x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
22
+ page_content=' Sound matching has applications in automatic music transcription, virtual reality, and audio engineering [2, 3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
23
+ page_content=' Of particu- lar interest is the case where g(θ) solves a known partial differential equation (PDE) whose coefficients are contained in the vector θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
24
+ page_content=' In this case, θ reveals some key design choices in acoustical manufac- turing, such as the shape and material properties of the resonator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
25
+ page_content=' Over the past decade, the renewed interest for deep neural net- works (DNN’s) in audio content analysis has led researchers to formu- late sound matching as a supervised learning problem [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
26
+ page_content=' Intuitively, the goal is to optimize the synaptic weights W of a DNN f W so that f W(xn) = ˜θn approximates θn over a training set of pairs (xn, θn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
27
+ page_content=' Because g automates the mapping from parameter θn to sound xn, this training procedure incurs no real-world audio acquisi- tion nor human annotation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
28
+ page_content=' However, prior publications have pointed out that the approximation formula ˜θn ≈ θn lacks a perceptual mean- ing: depending on the choice of target xn, some deviations (˜θn−θn) may be judged to have a greater effect than others [5, 6, 7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
29
+ page_content=' The paradigm of differentiable digital signal processing (DDSP) has brought a principled methodology to address this issue [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
30
+ page_content=' The key idea behind DDSP is to chain the learnable encoder f W with the known decoder g and a non-learnable but differentiable feature map Φ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
31
+ page_content=' In DDSP, f W is trained to minimize the perceptual distance θ Parametric domain x original Audio domain S Perceptual domain ˜θ ˜x reconstruction ˜S DDSP spectral ≈ loss PNP quadratic form θ x ˜θ M(θ) Riemannian metric g Φ f W g Φ g f W ∇(Φ◦g) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
32
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
33
+ page_content=' Graphical outline of the proposed method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
34
+ page_content=' Given a known synthesizer g and feature map Φ, we train a neural network f W to minimize the “perceptual–neural–physical” (PNP) quadratic form ⟨˜θ − θ ��M(θ)|˜θ − θ⟩ where M is the Riemannian metric associated to (Φ◦g).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
35
+ page_content=' Hence, PNP approximates DDSP spectral loss yet does not need to backpropagate ∇(Φ◦g)(˜θ) at each epoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
36
+ page_content=' Transformations in solid (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
37
+ page_content=' dashed) lines can (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
38
+ page_content=' cannot) be cached during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
39
+ page_content=' between vectors Φ(˜xn) = (Φ◦g◦f W)(xn) and Φ(xn) on average over samples xn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
40
+ page_content=' Yet, a practical shortcoming of DDSP is that it requires to evaluate the Jacobian ∇(Φ◦g) over each DNN prediction ˜θn;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
41
+ page_content=' and so at every training step, as W is updated by stochastic gradient descent (SGD).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
42
+ page_content=' In this article, we propose a new learning objective for sound matching, named perceptual–neural–physical (PNP) autoencoding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
43
+ page_content=' The main contribution of PNP is to compute the Riemannian met- ric M associated to the Jacobian ∇(Φ◦g) over each sample θn (see Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
44
+ page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
45
+ page_content=' The PNP encoder is penalized in terms of a first-order Taylor expansion of the spectral loss ∥Φ(˜x) − Φ(x)∥2, making it comparable to DDSP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
46
+ page_content=' Yet, unlike in DDSP, the computation of ∇(Φ◦g) is independent from the encoder f W: thus, it may be par- allelized and cached during DNN training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
47
+ page_content=' A second novelty of our paper resides in its choice of application: namely, differentiable sound matching for percussion instruments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
48
+ page_content=' This requires not only a fine characterization of the spectral envelope, as in the DDSP of sustained tones;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
49
+ page_content=' but also of attack and release transients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
50
+ page_content=' For this purpose, we need g and Φ to accommodate sharp spectrotemporal modulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
51
+ page_content=' Specifically, we rely on a differentiable implementation of the functional transformation method (FTM) for g and the joint time–frequency scattering transform (JTFS) for Φ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
52
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
53
+ page_content=' METHODS 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
54
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
55
+ page_content=' Approximating spectral loss with Riemannian geometry We assume the synthesizer g and the feature map Φ to be contin- uously differentiable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
56
+ page_content=' Let us denote by LDDSP the “spectral loss” arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
57
+ page_content='02886v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
58
+ page_content='SD] 7 Jan 2023 associated to the triplet (Φ, f W, g).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
59
+ page_content=' Its value at a parameter set θ is: LDDSP θ (W) = 1 2∥Φ(˜x) − Φ(x)∥2 2 = 1 2 ��(Φ ◦ g ◦ f W ◦ g)(θ) − (Φ ◦ g)(θ) ��2 2 (1) by definition of ˜x and x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
60
+ page_content=' Using ˜θ as shorthand for (f W ◦ g)(θ), we conduct a first-order Taylor expansion of (Φ ◦ g) near θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
61
+ page_content=' We obtain: Φ(˜x) = Φ(x) + ∇(Φ◦g)(θ) · (˜θ − θ) + O(∥˜θ − θ∥2 2), (2) where the Jacobian matrix ∇(Φ◦g)(θ) contains P = dim Φ(x) rows and J = dim θ columns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
62
+ page_content=' The differentiable map (Φ ◦ g) induces a weak Riemannian metric M onto the open set U ⊂ RJ of parameters θ, whose matrix values derive from ∇(Φ◦g)(θ): M(θ)j,j′ = P � p=1 � ∇(Φ◦g)(θ)p,j � � ∇(Φ◦g)(θ)p,j′� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
63
+ page_content=' (3) The square matrix M(θ) ∈ GLJ(R) defines a positive semidefinite kernel which, once plugged into Equation 2, serves to approximate LDDSP θ (W) in terms of a quadratic form over (˜θ − θ): ∥Φ(˜x) − Φ(x)∥2 2 = �˜θ − θ ��M(θ) ��˜θ − θ � + O � ∥˜θ − θ∥3 2 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
64
+ page_content=' (4) The advantage of the approximation above is that the metric M may be computed over the training set once and for all.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
65
+ page_content=' This is because Equation 3 is independent of the encoder f W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
66
+ page_content=' Furthermore, since θ is low-dimensional, we may store M(θ) on RAM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
67
+ page_content=' From this perspective, we define the perceptual–neural–physical loss (PNP) associated to (Φ, f W, g) as the linearization of spectral loss at θ: LPNP θ (W) = 1 2 � (f W ◦ g)(θ) − θ ��M(θ) ��(f W ◦ g)(θ) − θ � = LDDSP θ (W) + O � ∥(f W ◦ g)(θ) − θ∥3 2 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
68
+ page_content=' (5) According to the chain rule, the gradient of PNP loss at a given training pair (xn, θn) with respect to some scalar weight Wi is: ∂LPNP θ ∂Wi (θn) = � f W(xn) − θn ���M(θn) ���∂f W ∂Wi (xn) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
69
+ page_content=' (6) Observe that replacing M(θn) by the identity matrix in the equation above would give the gradient of parameter loss (P-loss);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
70
+ page_content=' that is, the mean squared error between the predicted parameter ˜θ and the true parameter θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
71
+ page_content=' Hence, we may regard PNP as a perceptually motivated extension of P-loss, in which parameter deviations are locally recombined and rescaled so as to simulate a DDSP objective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
72
+ page_content=' The matrix M(θ) is constant in W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
73
+ page_content=' Hence, its value may be cached across training epochs, and even across hyperparameter set- tings of the encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
74
+ page_content=' In comparison with P-loss, the only computa- tional overhead of PNP is the bilinear form in Equation 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
75
+ page_content=' However, this computation is performed in the parametric domain, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
76
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
77
+ page_content=', in low dimension (J = dim θ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
78
+ page_content=' Hence, its cost is negligible in front of the forward (f W) and backward pass (∂f W/∂Wi) of DNN training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
79
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
80
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
81
+ page_content=' Damped least squares The principal components of the Jacobian ∇(Φ◦g)(θ) are the eigen- vectors of M(θ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
82
+ page_content=' We denote them by vj and the corresponding eigenvalues by σ2 j : for each of them, we have M(θ)vj = σ2 j vj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
83
+ page_content=' The vj’s form an orthonormal basis of RJ, in which we can decompose the parameter deviation (˜θ − θ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
84
+ page_content=' Recalling Equation 5, we obtain an alternative formula for PNP loss: LPNP θ (W) = 1 2 J � j=1 σ2 j ��⟨(f W ◦ g)(θ) − θ ��vj⟩ ��2 v2 j (7) The eigenvalues σ2 j stretch and compress the error vector along their associated direction vj, analogous to the magnification and suppres- sion of perceptually relevant and irrelevant parameter deviations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
85
+ page_content=' In practice however, when σ2 j cover drastic ranges or contain zeros, as presented below in Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
86
+ page_content='3, the error vector is subject to extreme distortion and potential instability due to numerical precision errors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
87
+ page_content=' These scenarios, commonly referred to as M being ill-conditioned, can lead to intractable learning objective LPNP θ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
88
+ page_content=' Reminiscent of the damping mechanism introduced in Levenberg- Marquardt algorithm when solving nonlinear optimization problems, we update Equation 5 as LPNP θ (W) = 1 2 �˜θ − θ ��M(θ) + λI ��˜θ − θ � (8) The damping term λI up-shifts all eigenvalues of M by a constant positive amount λ, thereby changing its condition number.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
89
+ page_content=' When λ is huge, M(θ) + λI is close to an identity matrix with uniform eigenvalues, LPNP θ is optimizing in parameter loss regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
90
+ page_content=' On the other hand when λ is small, small correctional effects keeps LPNP θ in the spectral loss regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
91
+ page_content=' Alternatively, Equation 8 may also be viewed as a L2 regularization with coefficient λ, which allows smooth transition between spectral and parameter loss regimes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
92
+ page_content=' To further address potential convergence issues, λ may be sched- uled or adaptively changed according to epoch validation loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
93
+ page_content=' We adopt delayed gratification mechanism to decrease λ by a factor of 5 when loss is going down, and fix λ otherwise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
94
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
95
+ page_content=' APPLICATION TO DRUM SOUND MATCHING 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
96
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
97
+ page_content=' Perceptual: Joint time–frequency scattering (JTFS) The joint time–frequency scattering transform (JTFS) is a nonlinear convolutional operator which extracts spectrotemporal modulations in the constant-Q scalogram [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
98
+ page_content=' Its kernels proceed from a separable product between two complex-valued wavelet filterbanks, defined over the time axis and over the log-frequency axis respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
99
+ page_content=' After convolution, we apply pointwise complex modulus and temporal averaging to each JTFS coefficient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
100
+ page_content=' These coefficients are known as scattering “paths” p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
101
+ page_content=' We apply a logarithmic transformation to the feature vector JTFS(xn) corresponding to each sound xn, yielding Sn,p = (Φ ◦ g)(θn)p = log � 1 + JTFS(xn)p ε � , (9) where we have set the hyperparameter ε = 10−3 of the order of the median value of JTFS across all examples xn and paths p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
102
+ page_content=' The multiresolution structure of JTFS is reminiscent of spec- trotemporal receptive fields (STRF), and thus may serve as a bio- logically plausible predictor of neurophysiological responses in the primary auditory cortex [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
103
+ page_content=' At a higher level of music cognition, a recent study has shown that Euclidean distances in Φ space predict auditory judgments of timbre similarity within a large vocabulary of instrumental playing techniques, as collected from a group of professional composers and non-expert music listeners [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
104
+ page_content=' We compute JTFS with same parameters as [11]: Q1 = 12, Q2 = 1, and Qfr = 1 filters per octave respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
105
+ page_content=' We set the temporal averaging to T = 3 seconds and the frequential averaging to F = 2 octaves;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
106
+ page_content=' hence a total of P = 20762 paths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
107
+ page_content=' We run Φ and ∇(Φ◦g) in PyTorch on GPU via the implementation of [12, 13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
108
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
109
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
110
+ page_content=' Neural: Deep convolutional network (convnet) EfficientNet is a convolutional neural network architecture that bal- ances the scaling of the depth, width and input resolution of con- secutive convolutional blocks [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
111
+ page_content=' Achieving state-of-the-art per- formance on image classification with significantly less trainable parameters, its most light-weight version EfficientNet-B0 also suc- ceeded in benchmarking audio classification tasks [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
112
+ page_content=' We adopt EfficientNet-B0 as our encoder f W, resulting in 4M learnable pa- rameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
113
+ page_content=' We append a linear dense layer of J = dim θ neurons and a 1D batch normalization before tanh activation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
114
+ page_content=' The goal of batch normalization is to gaussianize the input, such that the activated output is capable of uniformly cover the normalized prediction range.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
115
+ page_content=' The input to f W is the log-scaled CQT coefficients of each example, computed with a filterbank spanning 10 octaves with 12 filters per octave.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
116
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
117
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
118
+ page_content=' Physical: Functional transformation method (FTM) We are interested in the perpendicular displacement X(t, u) on a rectangular drum face, which can be solved from the following partial differential equation defined in the Cartesian coordinate system u = (u1, u2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
119
+ page_content=' �∂2X ∂t2 (t, u) − c2∇2X(t, u) � + S4� ∇4X(t, u) � + ∂ ∂t � d1X(t, u) + d3∇2X(t, u) � = Y(t, u) (10) In addition to the standard traveling wave equation in the first above parenthesis, the fourth-order spatial and first-order time derivatives incorporate damping factors induced by stiffness, internal friction in the drum material and air friction in the external environment, rendering the solution a closer simulation to reality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
120
+ page_content=' Specifically, α, S, c, d1, d3 designate respectively the side length ratio, stiffness, traveling wave speed, frequency-independent damping and frequency- dependent damping of the drum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
121
+ page_content=' Even though real world drums are mostly circular, a rectangular drum model is equally capable of eliciting representative percussive sounds in real world scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
122
+ page_content=' The circular drum model simply requires a conversion of Equation 10 into the Polar coordinate system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
123
+ page_content=' We bound the four sides of this l by lα rectangular drum at zero at all time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
124
+ page_content=' Moreover, we assume its excitation function to be separable and localized in space and time Y(t, u) = yu(u)δ(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
125
+ page_content=' We implement generator g as a PDE solver to this high-order damped wave equation, namely the functional transformation method (FTM) [16, 17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
126
+ page_content=' FTM solves the PDE by transforming the equation into its Laplace and functional space domain, where an algebraic solution can be obtained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
127
+ page_content=' It then finds the time-space domain solution via inverse functional transforms, expressed in an infinite modal summation form x(t) = X(t, u) = � m∈N2 Km(u, t) exp(σmt) sin(ωmt) (11) The coefficients Km(u, t), σm, ωm are derived from the original PDE parameters in the following ways.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
128
+ page_content=' ω2 m = (S4 − d2 3 4 )Γ2 m1,m2 + (c2 + d1d3 2 )Γm1,m2 − d2 1 4 (12) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
129
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
130
+ page_content=' Distributions of the sorted eigenvalues of M(θn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
131
+ page_content=' For the sake of comparison between PNP and P-loss, the dashed line indicates the eigenvalues of the identity matrix (see Equation 6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
132
+ page_content=' σm = d3 2 Γm1,m2 − d1 2 (13) Km(u, t) = ym u δ(t) sin(πm1u1 l ) sin �πm2u2 lα � (14) where Γm1,m2 = π2m2 1/l2 + π2m2 2/(lα)2, and ym u is the mth coef- ficient associated to the eigenfunction sin(πmu/l) that decomposes yu(u).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
133
+ page_content=' Without losing connections to the acoustical manufacturing of the drum yet better relating g’s input with perceptual dimensions, we reparametrize the PDE parameters {S, c, d1, d3, α} into θ = {log ω1, τ1, log p, log D, α}, detailed in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
134
+ page_content='4 of [18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
135
+ page_content=' We pre- scribe sonically-plausible ranges for each parameter in θ, normalize them between −1 and 1, uniformly sample in the hyper-dimensional cube, and obtain a dataset of 100k percussive sounds sampled at 22050 HZ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
136
+ page_content=' The train/test/validation split is 8 : 1 : 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
137
+ page_content=' In particular, fundamental frequency ω1, duration τ1 falls into ranges [40, 1000] Hz and [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
138
+ page_content='4, 3] seconds respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
139
+ page_content=' Inhomoge- neous damping rate p, frequential dispersion D and aspect ratio α ranges are [10−5, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
140
+ page_content='2], [10−5, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
141
+ page_content='3], and [10−5, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
142
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
143
+ page_content=' RESULTS 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
144
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
145
+ page_content=' Baselines We train fW with 3 different losses - multi-scale spectral loss [19], parameter loss, and PNP loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
146
+ page_content=' We use a batch size of 64 samples for spectral loss, and 256 samples for parameter and PNP loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
147
+ page_content=' The training proceeds for 70 epochs, where around 20% of the training set is seen at each epoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
148
+ page_content=' We use Adam optimizer with learning rate 10−3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
149
+ page_content=' Table 1 reports the training time per epoch on a single Tesla V100 16GB GPU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
150
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
151
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
152
+ page_content=' Evaluation with JTFS-based spectral loss We propose to use the L2 norm of JTFS coefficients error averaged over test set for evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
153
+ page_content=' As a point of reference, we also include the average multi-scale spectral error, implemented as in Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
154
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
155
+ page_content=' One of the key distinctions between Euclidean JTFS distance and multi-scale spectral error is the former’s inclusion of spectro- temporal modulations information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
156
+ page_content=' Meanwhile unlike mean squared parameter error, both metrics reflect the perceptual closeness instead of parametric retrieval accuracy for each proposed model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
157
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
158
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
159
+ page_content=' Discussion Despite being the optimal quadratic approximation of spectral loss, it is nontrivial to apply the bear PNP loss form as Equation 5 in OCD 15 10 5 0 5 log1o(on,j)Pitch JTFS distance (avg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
160
+ page_content=' on test set) MSS (avg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
161
+ page_content=' on test set) Training time per epoch P-loss Known 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
162
+ page_content='23 ± 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
163
+ page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
164
+ page_content='31 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
165
+ page_content='013 49 minutes DDSP with MSS loss Known 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
166
+ page_content='86 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
167
+ page_content='332 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
168
+ page_content='335 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
169
+ page_content='005 54 minutes PNP with JTFS loss Known 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
170
+ page_content='58 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
171
+ page_content='877 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
172
+ page_content='335 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
173
+ page_content='005 49 minutes DDSP with JTFS loss — — — est.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
174
+ page_content=', > 1 day P-loss Unknown 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
175
+ page_content='91 ± 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
176
+ page_content='26 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
177
+ page_content='02 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
178
+ page_content='094 53 minutes DDSP with MSS loss Unknown 138.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
179
+ page_content='95 ± 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
180
+ page_content='12 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
181
+ page_content='59 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
182
+ page_content='307 59 minutes PNP with JTFS loss Unknown 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
183
+ page_content='21 ± 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
184
+ page_content='207 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
185
+ page_content='97 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
186
+ page_content='019 49 minutes Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
187
+ page_content=' Report of average JTFS distance and MSS metrics evaluated on test set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
188
+ page_content=' Six models are trained with two modalities: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
189
+ page_content=' the inclusion of pitch retrieval i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
190
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
191
+ page_content='regressing θ = {τ, log p, log D, α} vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
192
+ page_content=' θ = {log ω1, τ, log p, log D, α}, and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
193
+ page_content=' the choice of loss function: P-loss, MSS loss, or PNP loss with adaptive damping mechanism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
194
+ page_content=' The best performing models with known and unknown pitch are P-loss and PNP loss respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
195
+ page_content=' Training with MSS loss is more time consuming than training with P-loss or PNP loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
196
+ page_content=' Training with differentiable JTFS loss is unrealistic in the interest of time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
197
+ page_content=' experimental settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
198
+ page_content=' On one hand, Φ◦g potentially has undesirable property that exposes the Riemannian metric calculations to numeri- cal precision errors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
199
+ page_content=' On the other hand, extreme deformation of the optimization landscape may lead to the same numerical instability facing stochastic gradient descent with spectral loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
200
+ page_content=' We report on a few remedies that helped stabilize learning with PNP loss, and offer insights on future directions to take.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
201
+ page_content=' First and foremost, our preliminary experiments show that train- ing PNP loss without damping λ = 0 subjects to serious convergence issues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
202
+ page_content=' Recalling Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
203
+ page_content='2, indeed our empirical Ms suffer from high condition numbers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
204
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
205
+ page_content=' 2 shows the sorted eigenvalue distribu- tion of all Ms in test set, where Ms are rank-2,3 or 4 matrices with eigenvalues ranging from 0 to 1020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
206
+ page_content=' This could be an implication that entries of θ contain implicit linear dependencies in generator g, or that local variations of certain θ fail to linearize differences in the output of g or Φ ◦ g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
207
+ page_content=' As an example, the aspect ratio α influences the modal frequencies and decay rates via [18, Equations 12–13], where in fact its variant 1/α + 1/α2 could be a better choice of variable that linearizes g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
208
+ page_content=' To address Ms’ ill conditions we attempted at numerous damping mechanisms to update λ, namely constant λ, scheduled λ decay, and adaptive λ decay.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
209
+ page_content=' The intuition is to have LPNP θ start in the parameter loss regime and move towards the spectral loss regime while training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
210
+ page_content=' The best performing model is achieved with adaptive λ decay, as described in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
211
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
212
+ page_content=' We initialize λ to be 1020 to match the largest empirical σ2 j , which later gets adaptively decayed to 3 × 1014 in 20 epochs, breaking records 7 times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
213
+ page_content=' This indicates that f W is able to learn with damped PNP loss, under the condition that λ being large enough to simulate parameter loss regime and compensate for deficiency in M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
214
+ page_content=' The diagonal elements of M(θ) can be regarded as both the ap- plied weights’ magnitudes and proxies for θ’s perceptual significance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
215
+ page_content=' To gain further insights on how each model regresses different pa- rameters, we visualize in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
216
+ page_content='3 pairs of (|˜θ − θ|2 j, M(θ)j,j).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
217
+ page_content=' Three trends can be observed: First, τ and ω are regressed with the best accuracy across all learning objectives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
218
+ page_content=' Second, spectral loss particu- larly struggles in pitch ω and inharmonicity p retrievals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
219
+ page_content=' Third, we may interpret x-axis as describing from left to right samples with increasing perceptual significance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
220
+ page_content=' We observe that in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
221
+ page_content='3(b), PNP loss is able to suppress more errors in samples with high M(θ)j,j than parameter loss, by a nonnegligible margin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
222
+ page_content=' We believe that more of PNP loss’ mathematical potential can be exploited in the future, notably its ability to interpolate between various loss regimes and its use in hybrid optimization schemes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
223
+ page_content=' To start with, we plan to resort to a simpler differentiable synthesizer g Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
224
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
225
+ page_content=' X-axis: weight assigned by PNP to one of the physical parameters in θn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
226
+ page_content=' Y-axis: log squared estimation error for that same parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
227
+ page_content=' α is omitted due to its poor retrieval results from all models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
228
+ page_content=' that guarantees a well-conditioned Riemannian metric M(θ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
229
+ page_content=' More- over, we plan to explore other damping schemes and optimizers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
230
+ page_content=' The current update mechanism, originated from the Leverberg-Marquardt Algorithm, aims to improve the conditioning of a matrix inversion problem in the Gauss-Newton algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
231
+ page_content=' However when used jointly with stochastic gradient descent, each λ update may change the opti- mization landscape drastically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
232
+ page_content=' The resulting optimization behavior is thus not fully understood.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
233
+ page_content=' We consider interfacing nonlinear least squares solver with SGD and forming a hybrid learning scheme in future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
234
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
235
+ page_content=' CONCLUSION In this article we have presented Perceptual-Neural-Physical (PNP) autoencoding, a bilinear form learning objective for sound matching task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
236
+ page_content=' In our application, PNP optimizes the retrieval of physical parameters from sounds in a perceptually-motivated metric space, enabled by differentiable implementations of domain knowledge in physical modeling and computational proxy of neurophysiological construct of human auditory system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
237
+ page_content=' We demonstrated PNP’s mathematical proximity to spectral loss and its generalizability to parameter loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
238
+ page_content=' Using this formulation, we motivated and established one way of enabling smooth transition between optimizing in parameter and spectral loss regimes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
239
+ page_content=' We have presented damping mechanisms to facilitate its learning under ill-conditioned empirical settings and discussed its mathematical potential.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
240
+ page_content=' w - w]2 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
241
+ page_content=' M[0,0] T|2 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
242
+ page_content=' M[1, 1] 100 10~2 10-3 10~5 Ploss 10~6 10-8 Spec 109 PNP 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
243
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
244
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
245
+ page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
246
+ page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
247
+ page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
248
+ page_content='5 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
249
+ page_content='0 0 100000 200000 300000 400000 (a) le10 (b) Ip -p]2 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
250
+ page_content=' M[2,2] ID - D2 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
251
+ page_content=' M[3,3] 101 100 10~3 10~2 10~6, 10-5 10-9 10-8 0 50000 100000 150000 200000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
252
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
253
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
254
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
255
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
256
+ page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
257
+ page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
258
+ page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
259
+ page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
260
+ page_content='6 (c) (d) le106.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
261
+ page_content=' REFERENCES [1] Andrew Horner, “Wavetable matching synthesis of dynamic instruments with genetic algorithms,” Journal of the Audio Engineering Society, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
262
+ page_content=' 43, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
263
+ page_content=' 11, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
264
+ page_content=' 916–931, 1995.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
265
+ page_content=' [2] Jordie Shier, Kirk McNally, George Tzanetakis, and Ky Grace Brooks, “Manifold learning methods for visualization and browsing of drum machine samples,” Journal of the Audio Engineering Society, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
266
+ page_content=' 69, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
267
+ page_content=' 1/2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
268
+ page_content=' 40–53, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
269
+ page_content=' [3] Philippe Esling, Naotake Masuda, Adrien Bardet, Romeo De- spres, Axel Chemla, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
270
+ page_content=', “Universal audio synthesizer control with normalizing flows,” in Proceedings of the International Conference on Digital Audio Effects (DAFX), 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
271
+ page_content=' [4] Leonardo Gabrielli, Stefano Tomassetti, Carlo Zinato, and Francesco Piazza, “End-to-end learning for physics-based acoustic modeling,” IEEE Transactions on Emerging Topics in Computational Intelligence, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
272
+ page_content=' 2, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
273
+ page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
274
+ page_content=' 160–170, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
275
+ page_content=' [5] Naotake Masuda and Daisuke Saito, “Synthesizer sound match- ing with differentiable DSP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
276
+ page_content=',” in Proceedings of the Interna- tional Society on Music Information Retrieval (ISMIR) Confer- ence, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
277
+ page_content=' 428–434.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
278
+ page_content=' [6] Martin Roth and Matthew Yee-king, “A comparison of para- metric optimization techniques for musical instrument tone matching,” Journal of the Audio Engineering Society, May 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
279
+ page_content=' [7] Matthew Yee-King, Leon Fedden, and Mark d’Inverno, “Au- tomatic programming of vst sound synthesizers using deep networks and other techniques,” IEEE Transactions on Emerg- ing Topics in Computational Intelligence, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
280
+ page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
281
+ page_content=' 150–159, 04 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
282
+ page_content=' [8] Jesse Engel, Lamtharn (Hanoi) Hantrakul, Chenjie Gu, and Adam Roberts, “DDSP: Differentiable Digital Signal Process- ing,” in Proceedings of the International Conference on Learn- ing Representations (ICLR), 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
283
+ page_content=' [9] Joakim And´en, Vincent Lostanlen, and St´ephane Mallat, “Joint time–frequency scattering,” IEEE Transactions on Signal Pro- cessing, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
284
+ page_content=' 67, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
285
+ page_content=' 14, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
286
+ page_content=' 3704–3718, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
287
+ page_content=' [10] Taishih Chi, Powen Ru, and Shihab A Shamma, “Multiresolu- tion spectrotemporal analysis of complex sounds,” The Journal of the Acoustical Society of America, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
288
+ page_content=' 118, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
289
+ page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
290
+ page_content=' 887– 906, 2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
291
+ page_content=' [11] Vincent Lostanlen, Christian El-Hajj, Mathias Rossignol, Gr´egoire Lafay, Joakim And´en, and Mathieu Lagrange, “Time– frequency scattering accurately models auditory similarities between instrumental playing techniques,” EURASIP Journal on Audio, Speech, and Music Processing, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
292
+ page_content=' 2021, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
293
+ page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
294
+ page_content=' 1–21, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
295
+ page_content=' [12] Mathieu Andreux, Tom´as Angles, Georgios Exarchakis, Roberto Leonarduzzi, Gaspar Rochette, Louis Thiry, John Zarka, St´ephane Mallat, Joakim And´en, Eugene Belilovsky, Joan Bruna, Vincent Lostanlen, Muawiz Chaudhary, Matthew J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
296
+ page_content=' Hirn, Edouard Oyallon, Sixin Zhang, Carmine Cella, and Michael Eickenberg, “Kymatio: Scattering transforms in Python.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
297
+ page_content=',” Journal of Machine Learning Research, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
298
+ page_content=' 21, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
299
+ page_content=' 60, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
300
+ page_content=' 1–6, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
301
+ page_content=' [13] John Muradeli, Cyrus Vahidi, Changhong Wang, Han Han, Vin- cent Lostanlen, Mathieu Lagrange, and George Fazekas, “Dif- ferentiable time-frequency scattering in kymatio,” in Proceed- ings of the International Conference on Digital Audio Effects (DAFX), 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
302
+ page_content=' [14] Mingxing Tan and Quoc Le, “EfficientNet: Rethinking model scaling for convolutional neural networks,” in Proceedings of the International conference on Machine Learning (ICML).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
303
+ page_content=' PMLR, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
304
+ page_content=' 6105–6114.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
305
+ page_content=' [15] Neil Zeghidour, Olivier Teboul, F´elix de Chaumont Quitry, and Marco Tagliasacchi, “Leaf: A learnable frontend for audio classification,” ICLR, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
306
+ page_content=' [16] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
307
+ page_content=' Trautmann and Rudolf Rabenstein, Digital Sound Synthesis by Physical Modeling Using the Functional Transformation Method, 01 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
308
+ page_content=' [17] Maximilian Sch¨afer, Manuel Werner, and Rudolf Rabenstein, “Physical modeling in sound synthesis: Vibrating plates,” 05 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
309
+ page_content=' [18] Han Han and Vincent Lostanlen, “wav2shape: Hearing the Shape of a Drum Machine,” in Proceedings of Forum Acusticum, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
310
+ page_content=' 647–654.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
311
+ page_content=' [19] Christian J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
312
+ page_content=' Steinmetz and Joshua D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
313
+ page_content=' Reiss, “auraloss: Audio focused loss functions in PyTorch,” in Digital Music Research Network One-day Workshop (DMRN+15), 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf'}
DdE3T4oBgHgl3EQfUwqw/content/tmp_files/2301.04454v1.pdf.txt ADDED
@@ -0,0 +1,593 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Allo-centric Occupancy Grid Prediction for Urban Traffic Scene Using
2
+ Video Prediction Networks
3
+ Rabbia Asghar1, Lukas Rummelhard1, Anne Spalanzani1, Christian Laugier1
4
+ Abstract— Prediction of dynamic environment is crucial to
5
+ safe navigation of an autonomous vehicle. Urban traffic scenes
6
+ are particularly challenging to forecast due to complex interac-
7
+ tions between various dynamic agents, such as vehicles and
8
+ vulnerable road users. Previous approaches have used ego-
9
+ centric occupancy grid maps to represent and predict dynamic
10
+ environments. However, these predictions suffer from blurri-
11
+ ness, loss of scene structure at turns, and vanishing of agents
12
+ over longer prediction horizon. In this work, we propose a
13
+ novel framework to make long-term predictions by representing
14
+ the traffic scene in a fixed frame, referred as allo-centric
15
+ occupancy grid. This allows for the static scene to remain fixed
16
+ and to represent motion of the ego-vehicle on the grid like
17
+ other agents’. We study the allo-centric grid prediction with
18
+ different video prediction networks and validate the approach
19
+ on the real-world Nuscenes dataset. The results demonstrate
20
+ that the allo-centric grid representation significantly improves
21
+ scene prediction, in comparison to the conventional ego-centric
22
+ grid approach.
23
+ Index Terms— Scene Prediction, Deep Learning, Autonomous
24
+ Vehicles
25
+ I. INTRODUCTION
26
+ Prediction of traffic scene evolution is essential to an
27
+ autonomous vehicle for planning as well as detecting dan-
28
+ gerous situations. In urban traffic scenarios, the vehicles not
29
+ only interact with other vehicles, but also share space with
30
+ vulnerable road users such as pedestrians and cyclists. Key
31
+ challenges involve the uncertainty and multi-modality of the
32
+ behaviour of agents in the environment, and complex multi-
33
+ agents interactions [1]. While human drivers show superior
34
+ ability to forecast the agents’ behaviour and interactions in
35
+ such traffic scenes, it remains a challenge for autonomous
36
+ vehicles.
37
+ Data-driven methods provide powerful tools to solve pre-
38
+ diction problems, particularly dealing with complex social
39
+ interactions [2]. Most conventional approaches are object
40
+ or agent-based and rely on heavily pre-processed data [3],
41
+ [4]. Dynamic Occupancy Grip Maps (DOGMs), on the other
42
+ hand, allow for end-to-end learning due to their discretized
43
+ spatial representation, without higher-level segmentation [5].
44
+ Additionally, DOGMs are versatile in terms of sensor depen-
45
+ dency, and can be generated from a variety of raw sensor
46
+ data, such as Lidar or camera images. In our work, we
47
+ use Bayesian-filter-based DOGM [6] that provide us with
48
+ a spatially-dense model representation of static and dynamic
49
+ space, as well as free and unknown space in the environment,
50
+ as shown in Fig1.
51
+ 1 Univ. Grenoble Alpes, Inria, 38000 Grenoble, France, email: First-
52
53
+ As the DOGM is generated using data from the vehicle-
54
+ mounted sensors, the grid is traditionally ego-centric,i.e. the
55
+ position of ego-vehicle is fixed in the grid. While this is an
56
+ effective method in scene representation, it complicates the
57
+ long-term prediction problem. For a dynamic ego-vehicle,
58
+ the complete scene translates and/or rotates around the ego-
59
+ vehicle, even the static components in the scene. Therefore,
60
+ the prediction network must transform every cell in the
61
+ grid, leading to blurry and vanishing static scene at longer
62
+ prediction time horizons.
63
+ To address this, we instead generate DOGMs with respect
64
+ to a fixed reference frame, referred as allo-centric grid.
65
+ While the observed space around the ego-vehicle remains
66
+ the same, the static scene structure in the allo-centric grid
67
+ remains fixed. This is illustrated in Fig. 1 where the ego-
68
+ vehicle is encircled, the vehicle moves like other agents in
69
+ the scene.
70
+ We approach the long-term multi-step predictions of allo-
71
+ centric DOGM as a video prediction problem due to the
72
+ inherent similarities between an image and an occupancy
73
+ grid, and both being a spatio-temporal problem [7]. Results
74
+ incorporating different video prediction networks are stud-
75
+ ied, including state-of-the-art recurrent neural networks and
76
+ memory-augmented network approaches. We compare and
77
+ evaluate the prediction results of allo-centric and ego-centric
78
+ grids for identical scenes and demonstrate the superior per-
79
+ formances of the allo-centric grid predictions.
80
+ The proposed approach is validated with the real-world
81
+ NuScenes dataset [3] of urban traffic scenes. We show that
82
+ allo-centric grids significantly improve the prediction results
83
+ and demonstrate the ability to retain the scene structure and
84
+ learn behaviours.
85
+ The paper is organized as follows. Section II discusses
86
+ related work to video and scene predictions. Section III
87
+ describes the system overview. Section IV and V present
88
+ implementations, results and analysis. Finally conclusions
89
+ are drawn in section VI.
90
+ II. RELATED WORK
91
+ A. Video Prediction
92
+ Spatio-temporal deep-learning methods have been ef-
93
+ fectively used for video prediction problems. Commonly,
94
+ combinations of Convolutional Neural Networks (CNNs)
95
+ and Recurrent Neural Networks (RNNs) are incorporated.
96
+ CNNs are capable of extracting spatial information and
97
+ capturing inter-dependencies of the surrounding pixels while
98
+ RNNs, such as long short-term memory (LSTM) blocks,
99
+ arXiv:2301.04454v1 [cs.CV] 11 Jan 2023
100
+
101
+ Fig. 1: Overview of our proposed approach. The allo-centric DOGM is represented as an image. Each channel red, green and
102
+ blue represent unknown, dynamic and static cells respectively. The black space represents known free space. The ego-vehicle
103
+ is circled in dotted line in both input and target output sequences.
104
+ capture the sequential or temporal dependencies. Lotter et
105
+ al. proposed Predictive Coding Network (PredNet), a deep
106
+ learning network architecture that comprises of vertically-
107
+ stacked Convolutional LSTMs (ConvLSTMs) where the local
108
+ error and the prediction signal are propagated bottom-up
109
+ and top-down respectively [8]. Wang et al. addresses the
110
+ video prediction challenges of capturing short-term and long-
111
+ term dynamics with the PredRNN architecture [9]. Building
112
+ on their original approach [10], they introduce memory-
113
+ decoupled spatio-temporal LSTM (ST-LSTM) blocks, fea-
114
+ ture zigzag memory flow and a novel curriculum learning
115
+ strategy to improve prediction results. Kim et al. takes in-
116
+ spiration from memory-augmented networks to use external
117
+ memory (LMC-Memory) to learn and store long-term motion
118
+ dynamics and propose a memory query decomposition to ad-
119
+ dress the high-dimensionality of motions in video predictions
120
+ [11].
121
+ B. Occupancy Grid Prediction
122
+ Jeon et al. proposed conventional ConvLSTM to predict
123
+ interaction-intensive traffic scenes on occupancy grids [12].
124
+ The approach represents only vehicles in the occupancy grid,
125
+ their states extracted from camera inputs. Desquaire et al.
126
+ [13], proposed an end-to-end object-tracking approach by
127
+ incorporating directly Lidar sensor data to predict the binary
128
+ grid, using recurrent neural network. To incorporate ego-
129
+ vehicle motion, they utilize a spatial transformer to allow
130
+ internal memory of RNNs to learn environment of the state.
131
+ Mohajerin et al. [14] suggested an RNN-based architecture
132
+ with a difference learning method, and makes OGM pre-
133
+ diction in the field of view of ego-vehicle front camera.
134
+ Schreiber et al. [15] proposed an encoder-decoder network
135
+ architecture, along with skip connections, to make long-term
136
+ DOGM predictions. While they collect the sensor data from
137
+ an autonomous vehicle, the vehicle remains stationary and
138
+ only acts as the sensor collection point at different inter-
139
+ sections. Itkina et al. proposed to use evidential occupancy
140
+ grid and implement PredNet architecture for the prediction
141
+ [16]. The approach is then carried forward to develop the
142
+ double-pronged architecture [17] and attention-augmented
143
+ ConvLSTM [18]. The latter work is able to make long-term
144
+ predictions, however at turns the predictions still lose the
145
+ scene structure. Mann et al. [19] addressed the problem of
146
+ OGM prediction in urban scenes by incorporating vehicles
147
+ semantics in the environment. Their proposed method de-
148
+ pends on the annotated vehicle data labels available in the
149
+ dataset.
150
+ Contrary to the conventional Occupancy Grid Prediction,
151
+ we present an allo-centric DOGM representation to predict
152
+ the urban traffic scene with respect to a fixed reference frame.
153
+ Apart from the conventional recurrent representation learning
154
+ approaches, we also use memory-augmented learning-based
155
+ video-prediction method, in relevance to learning long-term
156
+ motion context of the dynamic agents.
157
+ III. SYSTEM OVERVIEW
158
+ We discuss here the overall proposed approach for allo-
159
+ centric DOGM prediction, the pipeline is summarized in Fig.
160
+ 1.
161
+ A. Dynamic Occupancy Grid Maps
162
+ Dynamic occupancy grid maps provide a discretized rep-
163
+ resentation of environment in a bird’s eye view, where every
164
+ cell in the grid is independent and carries information about
165
+ the associated occupancy and velocity.
166
+ To generate DOGMs, we incorporate the Conditional
167
+ Monte Carlo Dense Occupancy Tracker (CMCDOT) [6].
168
+ This approach associates four occupancy states to the grid.
169
+ Each cell carries the probabilities of the cell being i) occupied
170
+ and static, ii) occupied and dynamic, iii) unoccupied or free
171
+ and iv) if the occupancy is unknown. The probabilities of
172
+ these four states sum to one. In our work, we make use
173
+ of three of these states and represent the grid as an RGB
174
+ image. The channels Red, Green and Blue represent the
175
+ unknown state, dynamic state and static state respectively.
176
+ The associated probabilities of the cell in the 3-channel
177
+ DOGM grid are interpreted as the pixel values of the RGB
178
+ images. The RGB grid images can be seen in Fig. 1-2. Low
179
+ probabilities in all three channels leave the grid-image black,
180
+ therefore, representing free space.
181
+ For allo-centric grid generation, we define the grid in the
182
+ world frame, close to the initial position of ego-vehicle.
183
+ The state probabilities are initially computed in an ego-
184
+ centric grid, since we use the on-board sensor data. To ensure
185
+ that we have cell information for the complete allo-centric
186
+ grid dimensions when the vehicle is dynamic and moving
187
+ away from the world frame origin, a much larger ego-centric
188
+
189
+ Allo-centric
190
+ Video
191
+ DOGM
192
+ Prediction
193
+ Generation
194
+ NetworkDOGM is computed. This information is then fused to update
195
+ every cell states in the allo-centric grid in the world frame.
196
+ We compare the allo-centric and ego-centric grids at 4
197
+ time instants for the same scene and same grid dimensions in
198
+ Figure 2. In the allo-centric grid, the ego-vehicle (illustrated
199
+ in the pink box) can be seen moving with respect to the grid,
200
+ while it remains fixed in the ego-centric grid. It is important
201
+ to note that the observable space around the ego-vehicle
202
+ remains the same for both grids. However, since they are
203
+ defined in different frames, the two cover different spaces in
204
+ the scene at a given time. We illustrate the common space
205
+ covered by both grids since the start of the sequence, marked
206
+ by yellow boundary.
207
+ Fig. 2: Visualization of allo-centric and ego-centric grids,
208
+ generated for the same scene. The area marked by yellow
209
+ lines is the common region covered by both grids up until
210
+ the t-th sequence. The ego-vehicle is boxed in pink grid and
211
+ the bus passing by is encircled in white.
212
+ B. Problem Formulation
213
+ We formally define the task of predicting the scene in
214
+ allo-centric DOGM representation, as sequence-to-sequence
215
+ learning, see Fig. 1. A sequence comprises of a set of
216
+ sequential grid images that capture the evolution of a given
217
+ scene. Let Xt ∈ R3xW xH and Yt ∈ R3xW xH be the t-th frame
218
+ of the 3-channel grid-image where W and H denote the width
219
+ and height respectively. The input sequence for the grid-
220
+ image is denoted by Xt−N:t, representing N consecutive
221
+ frames. Given a set of input sequence, the task of the
222
+ network is to predict future grid images, i.e. output sequence.
223
+ The target and predicted output sequences are denoted by
224
+ Yt+1:t+P and ˆYt+1:t+P where P is the prediction horizon.
225
+ For training and testing data, the DOGMs can be generated
226
+ for both the input and the target sequences, leaving behind
227
+ no additional need for labelled data or human intervention.
228
+ Since the input sequences, Xt−N:t, and output sequences,
229
+ Yt+1:t+P , are represented as images, this prediction task can
230
+ be considered a video prediction problem.
231
+ C. Deep Learning Prediction Architectures
232
+ To study and compare the scene prediction with ego-
233
+ centric and allo-centric grids, we train our datasets with
234
+ different video prediction networks. We consider 3 networks,
235
+ briefly discussed in section II-A: PredNet, PredRNN, LMC-
236
+ Memory with memory alignment learning (here on referred
237
+ as LMC-Memory).
238
+ PredNet [8], inspired from predictive coding, makes pre-
239
+ dictions based on how the predicted frames deviate from the
240
+ target [20]. The original work tests the network on vehicle
241
+ mounted camera images from Kitti dataset [21] and demon-
242
+ strates the ability to capture both egocentric motion as well as
243
+ motion of objects in camera images. We consider PredRNN
244
+ [9] and LMC-Memory architecture [11] as the state of the
245
+ art video prediction networks that aim to capture long-term
246
+ dependencies and motion context. PredRNN implements
247
+ novel ST-LSTM units with a zigzag internal memory flow
248
+ and proposes memory decoupling loss to discourage learning
249
+ redundant features. LMC-Memory architecture, on the other
250
+ hand, proposes an external memory block with its own
251
+ parameters to store various motion contexts. The approach
252
+ also offers an efficient computation method since the motion
253
+ context for long-term multi-step predictions is computed only
254
+ once for a given input sequence.
255
+ We study these networks capabilities to retain the occu-
256
+ pancy of the static region, and the ability to predict motion
257
+ of dynamic agents in DOGM.
258
+ D. Unknown Channel and Loss functions
259
+ In both ego-centric and allo-centric grids, a significant
260
+ part of the scene remains unobserved, see Fig. 2 (unknown
261
+ channel is represented in red). This is more pronounced in
262
+ the initial frames of the allo-centric grid, where the Lidar is
263
+ unable to detect the farthest area from the ego-vehicle.
264
+ While it is more relevant to learn the evolution of static
265
+ and dynamic components in the scene, inclusion of unknown
266
+ channel is useful for our prediction task. A Lidar based grid
267
+ is often unable to capture the full shape of a vehicle. For
268
+ example, we can see in Fig. 2 how the occupied cells by the
269
+ bus vary in different time steps on the grid. It is only in the
270
+ 2.0s time step that a rectangular shape is observed, otherwise
271
+ different parts of the bus remain unknown. The unknown
272
+ channel at different instants also carries spatial information
273
+ of the agents with respect to the ego-vehicle. Thus, with the
274
+ sequential frames and the unknown channel, we assist the
275
+ network to be able to extract spatial information and learn
276
+ scene representation.
277
+ The inclusion of unknown channel and emphasis on
278
+ learning static and dynamic components is addressed in the
279
+ loss function. Loss function L in the implemented video
280
+ prediction networks is modified to carry the weighted sum
281
+ of the RGB channels:
282
+ L = αLR + β(LG + LB)
283
+ (1)
284
+ where,
285
+ LR, LG and LB represent the loss for unknown (red),
286
+ dynamic (green) and static channels (blue) respectively. In
287
+ order to encourage the network to learn and improve the
288
+ prediction of the static and dynamic channels, α is always
289
+ kept smaller than β.
290
+
291
+ IV. EXPERIMENTS
292
+ A. Dataset
293
+ We study the prediction performance on the real-world
294
+ NuScenes dataset [3]. The original dataset consists of 850
295
+ scenes for training and 150 scenes for testing, each scene
296
+ is approximately 20s long. We generate the DOGM grid-
297
+ based on the Lidar pointcloud and available odometry. For
298
+ allo-centric grid, we represent the scene with respect to a
299
+ fixed reference frame and a grid dimension of 60 x 60m,
300
+ with a resolution of 0.1m per cell. Each sequence starts with
301
+ the ego-vehicle heading facing up, capturing the scene 10m
302
+ behind and 50m ahead of it. The initial pose was selected to
303
+ ensure that the ego-vehicle remains within the grid for the
304
+ total sequence length, even when running at a high speed. For
305
+ egocentric grid, we generate a grid of the same dimensions
306
+ and resolution, and the ego-vehicle fixed in the center. Each
307
+ sequence is comprised of 35 frames, a time duration of 3.5s
308
+ with DOGM grid images generated every 0.1s. In total, we
309
+ have 4,250 training and 750 testing sequences respectively.
310
+ B. Training
311
+ The input sequence Xt−9:t consists of 10 frames (1.0s).
312
+ Each network is trained to make predictions ˆYt+1:t+25 for
313
+ 25 future frames (2.5s). Both the allo-centric and ego-
314
+ centric datasets are trained with the original parameters of
315
+ the respective video prediction network. For training with
316
+ PredRNN and LMC Memory networks, both allo-centric
317
+ and ego-centric grid images are resized to 192x192 pixels.
318
+ PredRNN is trained with a batch size of 4 and a learning
319
+ rate of 10−4. The number of channels of each hidden state is
320
+ set to 64. The loss function is the sum of L2 and decoupling
321
+ loss, and the values of α and β in Eq. (1) are set to 0.2 and
322
+ 0.8. LMC-Memory is trained with a learning rate of 2x10−4,
323
+ memory slot is set to 100 and ConvLSTM to 4 layers for
324
+ frame predictions. The loss function is the sum of L1 and
325
+ L2 losses. The values of α and β are set to 0.2 and 0.8.
326
+ For training with PredNet, the grid images are resized to
327
+ 160x160 pixels. The network is set to 4 hierarchical layers
328
+ with an initial learning rate of 10−3. The loss function is the
329
+ L1 loss of only the first layer, the values of α and β are set
330
+ to 0.05 and 0.8. All models are trained on Adam optimizer
331
+ for 30 epochs.
332
+ V. EVALUATION
333
+ For evaluation, we are particularly interested in static and
334
+ dynamic agents in the scene. We discussed in section III-D,
335
+ the utility of unknown regions in learning scene representa-
336
+ tion. But the unknown region occupies a big portion of the
337
+ grid and, thus, in evaluation, overshadows the performance of
338
+ more interesting and relevant segments: static and dynamic
339
+ regions. For this reason, we evaluate the dataset and network
340
+ performances based on two channels of the predicted images,
341
+ the blue and green channels representing static and dynamic
342
+ components in the scene. We encourage the readers to refer
343
+ to the video1 for a better visualization of the results.
344
+ 1https://youtu.be/z-0BVM93X8c
345
+ A. Quantitative Evaluation
346
+ The allo-centric and ego-centric grids at any instant ob-
347
+ serve different parts of the scene, see Fig. 2. For fair
348
+ comparison between them, we modify the test dataset and
349
+ crop out the part of each t-frame that has not been observed
350
+ until the t-th sequence by both grids. Thus, for example, the
351
+ part of the grids outside of the yellow dotted boxes in Fig.
352
+ 2 are blacked out for the input sequence frames Xt−N:t as
353
+ well as the target frames in the output sequence Yt+1:t+P .
354
+ We measure the performances using three metrics: MSE
355
+ (Mean Square Error), SSIM (Structured Similarity Indexing
356
+ Method), and LPIPS (Learned Perceptual Image Patch Sim-
357
+ ilarity) [22]. MSE is calculated by the pixel-wise difference
358
+ between the ground truth and the predicted frame per channel
359
+ and per cell. However, with MSE, the slightest error in
360
+ predicted motion can result in large errors in the ego-
361
+ centric grids dataset. The SSIM and LPIPS metrics evaluate
362
+ the prediction results based on the structural similarity and
363
+ perception similarity respectively. Lower values are better for
364
+ MSE and LPIPS while higher values are better for SSIM.
365
+ Table I shows average results for the complete 2.5s pre-
366
+ diction horizons. The MSE score of allo-centric grids is
367
+ significantly lower compared to the one of ego-centric grids.
368
+ Since the complete scene transforms with respect to the
369
+ ego-vehicle, the MSE is always higher in the ego-centric
370
+ grid. The SSIM and LPIPS scores are also significantly
371
+ superior for the allo-centric grid, due to the tendency of ego-
372
+ centric grids to get increasingly blurry for higher prediction
373
+ horizons.
374
+ Network
375
+ MSE x 10−2(���)
376
+ SSIM(↑)
377
+ LPIPS(↓)
378
+ Allo-centric grid
379
+ LMC-Memory
380
+ 0.894
381
+ 0.895
382
+ 0.167
383
+ PredRNN
384
+ 0.882
385
+ 0.904
386
+ 0.167
387
+ PredNet
388
+ 0.905
389
+ 0.888
390
+ 0.172
391
+ Ego-centric grid
392
+ LMC-Memory
393
+ 1.302
394
+ 0.856
395
+ 0.217
396
+ PredRNN
397
+ 1.138
398
+ 0.845
399
+ 0.234
400
+ PredNet
401
+ 1.335
402
+ 0.847
403
+ 0.225
404
+ TABLE I: Average results with allo-centric and ego-centric
405
+ grids for prediction horizon of 2.5s. The allocentric grid
406
+ outperforms the other in all three video prediction networks.
407
+ In Fig. 3, we plot scores of the metrics for every 0.5s
408
+ prediction step. The results with allo-centric grid (shown
409
+ in blue) always perform better than the ego-centric grids.
410
+ Among the three prediction networks, overall PredRNN
411
+ performs the best with allo-centric grids. However, with the
412
+ ego-centric grids (results shown in orange), PredRNN offers
413
+ a good MSE score but the SSIM and LPIPS performances
414
+ drop after 1.0s. This is because PredRNN tends to make
415
+ blurry and diffused predictions in the output frames; this
416
+ helps reduce the MSE but the scene loses its structures. This
417
+ is further seen in the qualitative results discussed in section
418
+ V-B and illustrated in Fig. 4.
419
+ B. Qualitative Evaluation
420
+ The prediction results between the allo-centric and ego-
421
+ centric grids differ drastically when the ego-vehicle is turning
422
+
423
+ Fig. 3: Results with the MSE(↓), SSIM(↑) and LPIPS(↓) metrics with allo-centric and ego-centric grids for input sequences
424
+ of 1.0s and prediction horizon up to 2.5s. For fair comparison, all test sequence frames were modified to only contain the
425
+ scene observable in both the allo-centric and ego-centric grids. The allo-centric grid (results plotted in blue) outperforms
426
+ the other with all three video prediction networks.
427
+ (a) Allo-centric grids
428
+ (b) Ego-centric grids
429
+ Fig. 4: Qualitative results for the ego-vehicle leaving a roundabout on both allo-centric (4a) and ego-centric grids (4b). The
430
+ input sequence consists of 10 frames (1.0s) and output predicted sequence of up to 25 frames (2.5s). The prediction results
431
+ are shown at 0.5s, 1.5s and 2.5s instants and are magnified at the interesting spaces, marked by red box in the target(ground
432
+ truth) frames. The best results can be observed with LMC-Memory network with the allo-centric grid that retains the scene
433
+ structure and predicts the motion of the ego-vehicle best.
434
+ at an intersection or driving on a curved road. Figure 4 shows
435
+ results for a sequence where the ego-vehicle is exiting a
436
+ roundabout. In this scene, while there are no other dynamic
437
+ agents, the network needs to predict the behaviour of the ego-
438
+ vehicle, when it is driving along the curved static segment
439
+ (alluding to road structure) and is headed towards static
440
+ objects/obstacles. For the allo-centric grids, the challenge is
441
+ to predict the ego-vehicle pose while the scene remains static.
442
+ The best results are achieved with the LMC-Memory. The
443
+ vehicle pose is well-predicted up to 2.5s, its orientation is
444
+ adjusted so that it does not hit the static components. For
445
+ the same grid, the PredRNN fails to learn and predict the
446
+ behaviour resulting in false prediction of collisions. The ego-
447
+ vehicle, while getting more blurry, diffuses into the static
448
+ obstacles on the road. With the PredNet, the ego vehicle
449
+ is almost already lost at 1.5s prediction horizon. This is
450
+ expected behaviour since PredNet is ideally not aimed at
451
+ long-term video predictions. With all three networks, the ego-
452
+ vehicle gets more blurry, however with PredRNN, the static
453
+ scene also tends to get blurry at larger prediction horizon.
454
+
455
+ 洋LMC-ego
456
+ PredRNN-ego
457
+ PredNet-ego
458
+ LMC-allo
459
+ PredRNN-allo
460
+ PredNet-alloIn the ego-centric grid, the whole scene rotates around the
461
+ ego-vehicle. LMC-memory and PredNet significantly lose
462
+ the static components ahead of the vehicle. The rotation
463
+ results in increasing blurriness at every time step. PredRNN
464
+ predictions are more diffused and faint blurry cells are
465
+ still visible ahead of the vehicle, even at 2.5s prediction
466
+ horizon. In context of planning and safe navigation, this
467
+ high uncertainty in the environment structure renders the
468
+ prediction results unreliable.
469
+ VI. DISCUSSION AND FUTURE WORK
470
+ In this work, we presented a novel allo-centric dynamic
471
+ ocuupancy grid approach for long-term prediction of urban
472
+ traffic scene, and compared it to the conventional ego-
473
+ centric DOGM approach. We trained and tested various
474
+ video prediction networks to show that allo-centric DOGM
475
+ representation has superior ability to predict the same scene.
476
+ The most significant improvement is the allo-centric grid’s
477
+ ability to retain the static scene structure, especially when the
478
+ vehicle turns. The ego-centric grid, on the other hand tends
479
+ to lose the static scene, and hence the crucial information
480
+ about whether the given space is occupied or free.
481
+ The results of allo-centric grids prediction with state-of-
482
+ the-art PredRNN and LMC-Memory approaches have shown
483
+ complementary benefits. PredRNN predictions, though dif-
484
+ fuse and get more blurry, are capable of maintaining agents
485
+ longer. We observe that LMC-memory shows better tendency
486
+ at learning behaviours in comparison to the PredRNN.
487
+ It is pertinent to mention here that the two grids are still
488
+ very similar. In both scenarios, the observable space updates
489
+ relative to the position of the vehicle in the scene. Thus,
490
+ in allo-centric grid while the grid is no more fixed to the
491
+ ego-vehicle, the ego-vehicle bias remains.
492
+ All three video prediction networks tested in this work
493
+ address the prediction problem as deterministic. However,
494
+ the behaviour of agents in urban traffic scene tends to be
495
+ multimodal. For future work, the addition of multimodal
496
+ prediction capabilities in the network architecture would be
497
+ interesting. Additionally, the incorporation of semantics in
498
+ the occupancy grid such as agent type and offline road infor-
499
+ mation could assist in learning behaviours and interactions.
500
+ REFERENCES
501
+ [1] S. Mozaffari, O. Y. Al-Jarrah, M. Dianati, P. Jennings, and A. Mouza-
502
+ kitis, “Deep learning-based vehicle behavior prediction for au-
503
+ tonomous driving applications: A review,” IEEE Transactions on
504
+ Intelligent Transportation Systems, vol. 23, no. 1, pp. 33–47, 2020.
505
+ [2] A. Alahi, K. Goel, V. Ramanathan, A. Robicquet, L. Fei-Fei, and
506
+ S. Savarese, “Social lstm: Human trajectory prediction in crowded
507
+ spaces,” in Proceedings of the IEEE conference on computer vision
508
+ and pattern recognition, 2016, pp. 961–971.
509
+ [3] H. Caesar, V. Bankiti, A. H. Lang, S. Vora, V. E. Liong, Q. Xu, A. Kr-
510
+ ishnan, Y. Pan, G. Baldan, and O. Beijbom, “nuscenes: A multimodal
511
+ dataset for autonomous driving,” arXiv preprint arXiv:1903.11027,
512
+ 2019.
513
+ [4] S. Ettinger, S. Cheng, B. Caine, C. Liu, H. Zhao, S. Pradhan,
514
+ Y. Chai, B. Sapp, C. R. Qi, Y. Zhou et al., “Large scale interactive
515
+ motion forecasting for autonomous driving: The waymo open motion
516
+ dataset,” in Proceedings of the IEEE/CVF International Conference
517
+ on Computer Vision, 2021, pp. 9710–9719.
518
+ [5] A. N`egre, L. Rummelhard, and C. Laugier, “Hybrid sampling bayesian
519
+ occupancy filter,” in 2014 IEEE Intelligent Vehicles Symposium Pro-
520
+ ceedings.
521
+ IEEE, 2014, pp. 1307–1312.
522
+ [6] L. Rummelhard, A. N`egre, and C. Laugier, “Conditional monte carlo
523
+ dense occupancy tracker,” in 2015 IEEE 18th International Conference
524
+ on Intelligent Transportation Systems.
525
+ IEEE, 2015, pp. 2485–2490.
526
+ [7] S. Oprea, P. Martinez-Gonzalez, A. Garcia-Garcia, J. A. Castro-
527
+ Vargas, S. Orts-Escolano, J. Garcia-Rodriguez, and A. Argyros, “A
528
+ review on deep learning techniques for video prediction,” IEEE
529
+ Transactions on Pattern Analysis and Machine Intelligence, 2020.
530
+ [Online]. Available: https://arxiv.org/pdf/2004.05214.pdf
531
+ [8] W. Lotter, G. Kreiman, and D. Cox, “Deep predictive coding networks
532
+ for video prediction and unsupervised learning,” 5th International
533
+ Conference on Learning Representations, ICLR 2017 - Conference
534
+ Track Proceedings, pp. 1–18, 2017.
535
+ [9] Y. Wang, H. Wu, J. Zhang, Z. Gao, J. Wang, P. Yu, and M. Long,
536
+ “Predrnn: A recurrent neural network for spatiotemporal predictive
537
+ learning,” IEEE Transactions on Pattern Analysis and Machine Intel-
538
+ ligence, pp. 1–1, 2022.
539
+ [10] Y. Wang, M. Long, J. Wang, Z. Gao, and P. S. Yu, “Predrnn: Recurrent
540
+ neural networks for predictive learning using spatiotemporal lstms,” in
541
+ Advances in Neural Information Processing Systems, I. Guyon, U. V.
542
+ Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and
543
+ R. Garnett, Eds., vol. 30.
544
+ Curran Associates, Inc., 2017.
545
+ [11] S. Lee, H. G. Kim, D. H. Choi, H.-I. Kim, and Y. M. Ro, “Video
546
+ prediction recalling long-term motion context via memory alignment
547
+ learning,” in Proceedings of the IEEE/CVF Conference on Computer
548
+ Vision and Pattern Recognition, 2021, pp. 3054–3063.
549
+ [12] H.-S. Jeon, D.-S. Kum, and W.-Y. Jeong, “Traffic Scene Prediction
550
+ via Deep Learning: Introduction of Multi-Channel Occupancy Grid
551
+ Map as a Scene Representation,” in 2018 IEEE Intelligent Vehicles
552
+ Symposium (IV), 2018, pp. 1496–1501.
553
+ [13] J. Dequaire, P. Ondr´uˇska, D. Rao, D. Wang, and I. Posner,
554
+ “Deep tracking in the wild: End-to-end tracking using recurrent
555
+ neural networks,” The International Journal of Robotics Research,
556
+ vol. 37, no. 4-5, pp. 492–512, jun 2017. [Online]. Available:
557
+ https://doi.org/10.1177/0278364917710543
558
+ [14] N. Mohajerin and M. Rohani, “Multi-step prediction of occupancy grid
559
+ maps with recurrent neural networks,” in Proceedings of the IEEE/CVF
560
+ Conference on Computer Vision and Pattern Recognition, 2019, pp.
561
+ 10 600–10 608.
562
+ [15] M. Schreiber, S. Hoermann, and K. Dietmayer, “Long-term occupancy
563
+ grid prediction using recurrent neural networks,” in 2019 International
564
+ Conference on Robotics and Automation (ICRA).
565
+ IEEE, 2019, pp.
566
+ 9299–9305.
567
+ [16] M. Itkina, K. Driggs-Campbell, and M. J. Kochenderfer, “Dynamic
568
+ environment prediction in urban scenes using recurrent representation
569
+ learning,” in 2019 IEEE Intelligent Transportation Systems Conference
570
+ (ITSC).
571
+ IEEE, 2019, pp. 2052–2059. [Online]. Available: https:
572
+ //arxiv.org/abs/1904.12374
573
+ [17] M. Toyungyernsub, M. Itkina, R. Senanayake, and M. J. Kochenderfer,
574
+ “Double-Prong ConvLSTM for Spatiotemporal Occupancy Prediction
575
+ in Dynamic Environments,” arXiv preprint arXiv:2011.09045, 2020.
576
+ [Online]. Available: http://arxiv.org/abs/2011.09045
577
+ [18] B. Lange, M. Itkina, and M. J. Kochenderfer, “Attention Aug-
578
+ mented ConvLSTM for Environment Prediction,” arXiv preprint
579
+ arXiv:2010.09662, 2020.
580
+ [19] K. S. Mann, A. Tomy, A. Paigwar, A. Renzaglia, and C. Laugier,
581
+ “Predicting future occupancy grids in dynamic environment with
582
+ spatio-temporal learning,” 2022. [Online]. Available: https://arxiv.org/
583
+ abs/2205.03212
584
+ [20] R. P. Rane, E. Sz¨ugyi, V. Saxena, A. Ofner, and S. Stober, “Prednet
585
+ and predictive coding: A critical review,” in Proceedings of the 2020
586
+ international conference on multimedia retrieval, 2020, pp. 233–241.
587
+ [21] A. Geiger, P. Lenz, C. Stiller, and R. Urtasun, “Vision meets robotics:
588
+ The kitti dataset,” International Journal of Robotics Research (IJRR),
589
+ 2013.
590
+ [22] R. Zhang, P. Isola, A. A. Efros, E. Shechtman, and O. Wang, “The
591
+ unreasonable effectiveness of deep features as a perceptual metric,” in
592
+ CVPR, 2018.
593
+
DdE3T4oBgHgl3EQfUwqw/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,493 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf,len=492
2
+ page_content='Allo-centric Occupancy Grid Prediction for Urban Traffic Scene Using Video Prediction Networks Rabbia Asghar1, Lukas Rummelhard1, Anne Spalanzani1, Christian Laugier1 Abstract— Prediction of dynamic environment is crucial to safe navigation of an autonomous vehicle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
3
+ page_content=' Urban traffic scenes are particularly challenging to forecast due to complex interac- tions between various dynamic agents, such as vehicles and vulnerable road users.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
4
+ page_content=' Previous approaches have used ego- centric occupancy grid maps to represent and predict dynamic environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
5
+ page_content=' However, these predictions suffer from blurri- ness, loss of scene structure at turns, and vanishing of agents over longer prediction horizon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
6
+ page_content=' In this work, we propose a novel framework to make long-term predictions by representing the traffic scene in a fixed frame, referred as allo-centric occupancy grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
7
+ page_content=' This allows for the static scene to remain fixed and to represent motion of the ego-vehicle on the grid like other agents’.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
8
+ page_content=' We study the allo-centric grid prediction with different video prediction networks and validate the approach on the real-world Nuscenes dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
9
+ page_content=' The results demonstrate that the allo-centric grid representation significantly improves scene prediction, in comparison to the conventional ego-centric grid approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
10
+ page_content=' Index Terms— Scene Prediction, Deep Learning, Autonomous Vehicles I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
11
+ page_content=' INTRODUCTION Prediction of traffic scene evolution is essential to an autonomous vehicle for planning as well as detecting dan- gerous situations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
12
+ page_content=' In urban traffic scenarios, the vehicles not only interact with other vehicles, but also share space with vulnerable road users such as pedestrians and cyclists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
13
+ page_content=' Key challenges involve the uncertainty and multi-modality of the behaviour of agents in the environment, and complex multi- agents interactions [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
14
+ page_content=' While human drivers show superior ability to forecast the agents’ behaviour and interactions in such traffic scenes, it remains a challenge for autonomous vehicles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
15
+ page_content=' Data-driven methods provide powerful tools to solve pre- diction problems, particularly dealing with complex social interactions [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
16
+ page_content=' Most conventional approaches are object or agent-based and rely on heavily pre-processed data [3], [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
17
+ page_content=' Dynamic Occupancy Grip Maps (DOGMs), on the other hand, allow for end-to-end learning due to their discretized spatial representation, without higher-level segmentation [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
18
+ page_content=' Additionally, DOGMs are versatile in terms of sensor depen- dency, and can be generated from a variety of raw sensor data, such as Lidar or camera images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
19
+ page_content=' In our work, we use Bayesian-filter-based DOGM [6] that provide us with a spatially-dense model representation of static and dynamic space, as well as free and unknown space in the environment, as shown in Fig1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
20
+ page_content=' 1 Univ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
21
+ page_content=' Grenoble Alpes, Inria, 38000 Grenoble, France, email: First- Name.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
22
+ page_content='LastName@inria.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
23
+ page_content='fr As the DOGM is generated using data from the vehicle- mounted sensors, the grid is traditionally ego-centric,i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
24
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
25
+ page_content=' the position of ego-vehicle is fixed in the grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
26
+ page_content=' While this is an effective method in scene representation, it complicates the long-term prediction problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
27
+ page_content=' For a dynamic ego-vehicle, the complete scene translates and/or rotates around the ego- vehicle, even the static components in the scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
28
+ page_content=' Therefore, the prediction network must transform every cell in the grid, leading to blurry and vanishing static scene at longer prediction time horizons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
29
+ page_content=' To address this, we instead generate DOGMs with respect to a fixed reference frame, referred as allo-centric grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
30
+ page_content=' While the observed space around the ego-vehicle remains the same, the static scene structure in the allo-centric grid remains fixed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
31
+ page_content=' This is illustrated in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
32
+ page_content=' 1 where the ego- vehicle is encircled, the vehicle moves like other agents in the scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
33
+ page_content=' We approach the long-term multi-step predictions of allo- centric DOGM as a video prediction problem due to the inherent similarities between an image and an occupancy grid, and both being a spatio-temporal problem [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
34
+ page_content=' Results incorporating different video prediction networks are stud- ied, including state-of-the-art recurrent neural networks and memory-augmented network approaches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
35
+ page_content=' We compare and evaluate the prediction results of allo-centric and ego-centric grids for identical scenes and demonstrate the superior per- formances of the allo-centric grid predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
36
+ page_content=' The proposed approach is validated with the real-world NuScenes dataset [3] of urban traffic scenes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
37
+ page_content=' We show that allo-centric grids significantly improve the prediction results and demonstrate the ability to retain the scene structure and learn behaviours.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
38
+ page_content=' The paper is organized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
39
+ page_content=' Section II discusses related work to video and scene predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
40
+ page_content=' Section III describes the system overview.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
41
+ page_content=' Section IV and V present implementations, results and analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
42
+ page_content=' Finally conclusions are drawn in section VI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
43
+ page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
44
+ page_content=' RELATED WORK A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
45
+ page_content=' Video Prediction Spatio-temporal deep-learning methods have been ef- fectively used for video prediction problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
46
+ page_content=' Commonly, combinations of Convolutional Neural Networks (CNNs) and Recurrent Neural Networks (RNNs) are incorporated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
47
+ page_content=' CNNs are capable of extracting spatial information and capturing inter-dependencies of the surrounding pixels while RNNs, such as long short-term memory (LSTM) blocks, arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
48
+ page_content='04454v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
49
+ page_content='CV] 11 Jan 2023 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
50
+ page_content=' 1: Overview of our proposed approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
51
+ page_content=' The allo-centric DOGM is represented as an image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
52
+ page_content=' Each channel red, green and blue represent unknown, dynamic and static cells respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
53
+ page_content=' The black space represents known free space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
54
+ page_content=' The ego-vehicle is circled in dotted line in both input and target output sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
55
+ page_content=' capture the sequential or temporal dependencies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
56
+ page_content=' Lotter et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
57
+ page_content=' proposed Predictive Coding Network (PredNet), a deep learning network architecture that comprises of vertically- stacked Convolutional LSTMs (ConvLSTMs) where the local error and the prediction signal are propagated bottom-up and top-down respectively [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
58
+ page_content=' Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
59
+ page_content=' addresses the video prediction challenges of capturing short-term and long- term dynamics with the PredRNN architecture [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
60
+ page_content=' Building on their original approach [10], they introduce memory- decoupled spatio-temporal LSTM (ST-LSTM) blocks, fea- ture zigzag memory flow and a novel curriculum learning strategy to improve prediction results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
61
+ page_content=' Kim et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
62
+ page_content=' takes in- spiration from memory-augmented networks to use external memory (LMC-Memory) to learn and store long-term motion dynamics and propose a memory query decomposition to ad- dress the high-dimensionality of motions in video predictions [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
63
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
64
+ page_content=' Occupancy Grid Prediction Jeon et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
65
+ page_content=' proposed conventional ConvLSTM to predict interaction-intensive traffic scenes on occupancy grids [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
66
+ page_content=' The approach represents only vehicles in the occupancy grid, their states extracted from camera inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
67
+ page_content=' Desquaire et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
68
+ page_content=' [13], proposed an end-to-end object-tracking approach by incorporating directly Lidar sensor data to predict the binary grid, using recurrent neural network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
69
+ page_content=' To incorporate ego- vehicle motion, they utilize a spatial transformer to allow internal memory of RNNs to learn environment of the state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
70
+ page_content=' Mohajerin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
71
+ page_content=' [14] suggested an RNN-based architecture with a difference learning method, and makes OGM pre- diction in the field of view of ego-vehicle front camera.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
72
+ page_content=' Schreiber et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
73
+ page_content=' [15] proposed an encoder-decoder network architecture, along with skip connections, to make long-term DOGM predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
74
+ page_content=' While they collect the sensor data from an autonomous vehicle, the vehicle remains stationary and only acts as the sensor collection point at different inter- sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
75
+ page_content=' Itkina et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
76
+ page_content=' proposed to use evidential occupancy grid and implement PredNet architecture for the prediction [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
77
+ page_content=' The approach is then carried forward to develop the double-pronged architecture [17] and attention-augmented ConvLSTM [18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
78
+ page_content=' The latter work is able to make long-term predictions, however at turns the predictions still lose the scene structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
79
+ page_content=' Mann et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
80
+ page_content=' [19] addressed the problem of OGM prediction in urban scenes by incorporating vehicles semantics in the environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
81
+ page_content=' Their proposed method de- pends on the annotated vehicle data labels available in the dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
82
+ page_content=' Contrary to the conventional Occupancy Grid Prediction, we present an allo-centric DOGM representation to predict the urban traffic scene with respect to a fixed reference frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
83
+ page_content=' Apart from the conventional recurrent representation learning approaches, we also use memory-augmented learning-based video-prediction method, in relevance to learning long-term motion context of the dynamic agents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
84
+ page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
85
+ page_content=' SYSTEM OVERVIEW We discuss here the overall proposed approach for allo- centric DOGM prediction, the pipeline is summarized in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
86
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
87
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
88
+ page_content=' Dynamic Occupancy Grid Maps Dynamic occupancy grid maps provide a discretized rep- resentation of environment in a bird’s eye view, where every cell in the grid is independent and carries information about the associated occupancy and velocity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
89
+ page_content=' To generate DOGMs, we incorporate the Conditional Monte Carlo Dense Occupancy Tracker (CMCDOT) [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
90
+ page_content=' This approach associates four occupancy states to the grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
91
+ page_content=' Each cell carries the probabilities of the cell being i) occupied and static, ii) occupied and dynamic, iii) unoccupied or free and iv) if the occupancy is unknown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
92
+ page_content=' The probabilities of these four states sum to one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
93
+ page_content=' In our work, we make use of three of these states and represent the grid as an RGB image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
94
+ page_content=' The channels Red, Green and Blue represent the unknown state, dynamic state and static state respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
95
+ page_content=' The associated probabilities of the cell in the 3-channel DOGM grid are interpreted as the pixel values of the RGB images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
96
+ page_content=' The RGB grid images can be seen in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
97
+ page_content=' 1-2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
98
+ page_content=' Low probabilities in all three channels leave the grid-image black, therefore, representing free space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
99
+ page_content=' For allo-centric grid generation, we define the grid in the world frame, close to the initial position of ego-vehicle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
100
+ page_content=' The state probabilities are initially computed in an ego- centric grid, since we use the on-board sensor data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
101
+ page_content=' To ensure that we have cell information for the complete allo-centric grid dimensions when the vehicle is dynamic and moving away from the world frame origin, a much larger ego-centric Allo-centric Video DOGM Prediction Generation NetworkDOGM is computed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
102
+ page_content=' This information is then fused to update every cell states in the allo-centric grid in the world frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
103
+ page_content=' We compare the allo-centric and ego-centric grids at 4 time instants for the same scene and same grid dimensions in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
104
+ page_content=' In the allo-centric grid, the ego-vehicle (illustrated in the pink box) can be seen moving with respect to the grid, while it remains fixed in the ego-centric grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
105
+ page_content=' It is important to note that the observable space around the ego-vehicle remains the same for both grids.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
106
+ page_content=' However, since they are defined in different frames, the two cover different spaces in the scene at a given time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
107
+ page_content=' We illustrate the common space covered by both grids since the start of the sequence, marked by yellow boundary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
108
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
109
+ page_content=' 2: Visualization of allo-centric and ego-centric grids, generated for the same scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
110
+ page_content=' The area marked by yellow lines is the common region covered by both grids up until the t-th sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
111
+ page_content=' The ego-vehicle is boxed in pink grid and the bus passing by is encircled in white.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
112
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
113
+ page_content=' Problem Formulation We formally define the task of predicting the scene in allo-centric DOGM representation, as sequence-to-sequence learning, see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
114
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
115
+ page_content=' A sequence comprises of a set of sequential grid images that capture the evolution of a given scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
116
+ page_content=' Let Xt ∈ R3xW xH and Yt ∈ R3xW xH be the t-th frame of the 3-channel grid-image where W and H denote the width and height respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
117
+ page_content=' The input sequence for the grid- image is denoted by Xt−N:t, representing N consecutive frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
118
+ page_content=' Given a set of input sequence, the task of the network is to predict future grid images, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
119
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
120
+ page_content=' output sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
121
+ page_content=' The target and predicted output sequences are denoted by Yt+1:t+P and ˆYt+1:t+P where P is the prediction horizon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
122
+ page_content=' For training and testing data, the DOGMs can be generated for both the input and the target sequences, leaving behind no additional need for labelled data or human intervention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
123
+ page_content=' Since the input sequences, Xt−N:t, and output sequences, Yt+1:t+P , are represented as images, this prediction task can be considered a video prediction problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
124
+ page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
125
+ page_content=' Deep Learning Prediction Architectures To study and compare the scene prediction with ego- centric and allo-centric grids, we train our datasets with different video prediction networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
126
+ page_content=' We consider 3 networks, briefly discussed in section II-A: PredNet, PredRNN, LMC- Memory with memory alignment learning (here on referred as LMC-Memory).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
127
+ page_content=' PredNet [8], inspired from predictive coding, makes pre- dictions based on how the predicted frames deviate from the target [20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
128
+ page_content=' The original work tests the network on vehicle mounted camera images from Kitti dataset [21] and demon- strates the ability to capture both egocentric motion as well as motion of objects in camera images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
129
+ page_content=' We consider PredRNN [9] and LMC-Memory architecture [11] as the state of the art video prediction networks that aim to capture long-term dependencies and motion context.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
130
+ page_content=' PredRNN implements novel ST-LSTM units with a zigzag internal memory flow and proposes memory decoupling loss to discourage learning redundant features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
131
+ page_content=' LMC-Memory architecture, on the other hand, proposes an external memory block with its own parameters to store various motion contexts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
132
+ page_content=' The approach also offers an efficient computation method since the motion context for long-term multi-step predictions is computed only once for a given input sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
133
+ page_content=' We study these networks capabilities to retain the occu- pancy of the static region, and the ability to predict motion of dynamic agents in DOGM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
134
+ page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
135
+ page_content=' Unknown Channel and Loss functions In both ego-centric and allo-centric grids, a significant part of the scene remains unobserved, see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
136
+ page_content=' 2 (unknown channel is represented in red).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
137
+ page_content=' This is more pronounced in the initial frames of the allo-centric grid, where the Lidar is unable to detect the farthest area from the ego-vehicle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
138
+ page_content=' While it is more relevant to learn the evolution of static and dynamic components in the scene, inclusion of unknown channel is useful for our prediction task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
139
+ page_content=' A Lidar based grid is often unable to capture the full shape of a vehicle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
140
+ page_content=' For example, we can see in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
141
+ page_content=' 2 how the occupied cells by the bus vary in different time steps on the grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
142
+ page_content=' It is only in the 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
143
+ page_content='0s time step that a rectangular shape is observed, otherwise different parts of the bus remain unknown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
144
+ page_content=' The unknown channel at different instants also carries spatial information of the agents with respect to the ego-vehicle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
145
+ page_content=' Thus, with the sequential frames and the unknown channel, we assist the network to be able to extract spatial information and learn scene representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
146
+ page_content=' The inclusion of unknown channel and emphasis on learning static and dynamic components is addressed in the loss function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
147
+ page_content=' Loss function L in the implemented video prediction networks is modified to carry the weighted sum of the RGB channels: L = αLR + β(LG + LB) (1) where, LR, LG and LB represent the loss for unknown (red), dynamic (green) and static channels (blue) respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
148
+ page_content=' In order to encourage the network to learn and improve the prediction of the static and dynamic channels, α is always kept smaller than β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
149
+ page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
150
+ page_content=' EXPERIMENTS A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
151
+ page_content=' Dataset We study the prediction performance on the real-world NuScenes dataset [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
152
+ page_content=' The original dataset consists of 850 scenes for training and 150 scenes for testing, each scene is approximately 20s long.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
153
+ page_content=' We generate the DOGM grid- based on the Lidar pointcloud and available odometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
154
+ page_content=' For allo-centric grid, we represent the scene with respect to a fixed reference frame and a grid dimension of 60 x 60m, with a resolution of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
155
+ page_content='1m per cell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
156
+ page_content=' Each sequence starts with the ego-vehicle heading facing up, capturing the scene 10m behind and 50m ahead of it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
157
+ page_content=' The initial pose was selected to ensure that the ego-vehicle remains within the grid for the total sequence length, even when running at a high speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
158
+ page_content=' For egocentric grid, we generate a grid of the same dimensions and resolution, and the ego-vehicle fixed in the center.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
159
+ page_content=' Each sequence is comprised of 35 frames, a time duration of 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
160
+ page_content='5s with DOGM grid images generated every 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
161
+ page_content='1s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
162
+ page_content=' In total, we have 4,250 training and 750 testing sequences respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
163
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
164
+ page_content=' Training The input sequence Xt−9:t consists of 10 frames (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
165
+ page_content='0s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
166
+ page_content=' Each network is trained to make predictions ˆYt+1:t+25 for 25 future frames (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
167
+ page_content='5s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
168
+ page_content=' Both the allo-centric and ego- centric datasets are trained with the original parameters of the respective video prediction network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
169
+ page_content=' For training with PredRNN and LMC Memory networks, both allo-centric and ego-centric grid images are resized to 192x192 pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
170
+ page_content=' PredRNN is trained with a batch size of 4 and a learning rate of 10−4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
171
+ page_content=' The number of channels of each hidden state is set to 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
172
+ page_content=' The loss function is the sum of L2 and decoupling loss, and the values of α and β in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
173
+ page_content=' (1) are set to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
174
+ page_content='2 and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
175
+ page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
176
+ page_content=' LMC-Memory is trained with a learning rate of 2x10−4, memory slot is set to 100 and ConvLSTM to 4 layers for frame predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
177
+ page_content=' The loss function is the sum of L1 and L2 losses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
178
+ page_content=' The values of α and β are set to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
179
+ page_content='2 and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
180
+ page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
181
+ page_content=' For training with PredNet, the grid images are resized to 160x160 pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
182
+ page_content=' The network is set to 4 hierarchical layers with an initial learning rate of 10−3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
183
+ page_content=' The loss function is the L1 loss of only the first layer, the values of α and β are set to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
184
+ page_content='05 and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
185
+ page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
186
+ page_content=' All models are trained on Adam optimizer for 30 epochs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
187
+ page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
188
+ page_content=' EVALUATION For evaluation, we are particularly interested in static and dynamic agents in the scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
189
+ page_content=' We discussed in section III-D, the utility of unknown regions in learning scene representa- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
190
+ page_content=' But the unknown region occupies a big portion of the grid and, thus, in evaluation, overshadows the performance of more interesting and relevant segments: static and dynamic regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
191
+ page_content=' For this reason, we evaluate the dataset and network performances based on two channels of the predicted images, the blue and green channels representing static and dynamic components in the scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
192
+ page_content=' We encourage the readers to refer to the video1 for a better visualization of the results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
193
+ page_content=' 1https://youtu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
194
+ page_content='be/z-0BVM93X8c A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
195
+ page_content=' Quantitative Evaluation The allo-centric and ego-centric grids at any instant ob- serve different parts of the scene, see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
196
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
197
+ page_content=' For fair comparison between them, we modify the test dataset and crop out the part of each t-frame that has not been observed until the t-th sequence by both grids.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
198
+ page_content=' Thus, for example, the part of the grids outside of the yellow dotted boxes in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
199
+ page_content=' 2 are blacked out for the input sequence frames Xt−N:t as well as the target frames in the output sequence Yt+1:t+P .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
200
+ page_content=' We measure the performances using three metrics: MSE (Mean Square Error), SSIM (Structured Similarity Indexing Method), and LPIPS (Learned Perceptual Image Patch Sim- ilarity) [22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
201
+ page_content=' MSE is calculated by the pixel-wise difference between the ground truth and the predicted frame per channel and per cell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
202
+ page_content=' However, with MSE, the slightest error in predicted motion can result in large errors in the ego- centric grids dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
203
+ page_content=' The SSIM and LPIPS metrics evaluate the prediction results based on the structural similarity and perception similarity respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
204
+ page_content=' Lower values are better for MSE and LPIPS while higher values are better for SSIM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
205
+ page_content=' Table I shows average results for the complete 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
206
+ page_content='5s pre- diction horizons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
207
+ page_content=' The MSE score of allo-centric grids is significantly lower compared to the one of ego-centric grids.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
208
+ page_content=' Since the complete scene transforms with respect to the ego-vehicle, the MSE is always higher in the ego-centric grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
209
+ page_content=' The SSIM and LPIPS scores are also significantly superior for the allo-centric grid, due to the tendency of ego- centric grids to get increasingly blurry for higher prediction horizons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
210
+ page_content=' Network MSE x 10−2(↓) SSIM(↑) LPIPS(↓) Allo-centric grid LMC-Memory 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
211
+ page_content='894 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
212
+ page_content='895 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
213
+ page_content='167 PredRNN 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
214
+ page_content='882 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
215
+ page_content='904 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
216
+ page_content='167 PredNet 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
217
+ page_content='905 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
218
+ page_content='888 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
219
+ page_content='172 Ego-centric grid LMC-Memory 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
220
+ page_content='302 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
221
+ page_content='856 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
222
+ page_content='217 PredRNN 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
223
+ page_content='138 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
224
+ page_content='845 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
225
+ page_content='234 PredNet 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
226
+ page_content='335 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
227
+ page_content='847 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
228
+ page_content='225 TABLE I: Average results with allo-centric and ego-centric grids for prediction horizon of 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
229
+ page_content='5s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
230
+ page_content=' The allocentric grid outperforms the other in all three video prediction networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
231
+ page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
232
+ page_content=' 3, we plot scores of the metrics for every 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
233
+ page_content='5s prediction step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
234
+ page_content=' The results with allo-centric grid (shown in blue) always perform better than the ego-centric grids.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
235
+ page_content=' Among the three prediction networks, overall PredRNN performs the best with allo-centric grids.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
236
+ page_content=' However, with the ego-centric grids (results shown in orange), PredRNN offers a good MSE score but the SSIM and LPIPS performances drop after 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
237
+ page_content='0s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
238
+ page_content=' This is because PredRNN tends to make blurry and diffused predictions in the output frames;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
239
+ page_content=' this helps reduce the MSE but the scene loses its structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
240
+ page_content=' This is further seen in the qualitative results discussed in section V-B and illustrated in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
241
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
242
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
243
+ page_content=' Qualitative Evaluation The prediction results between the allo-centric and ego- centric grids differ drastically when the ego-vehicle is turning Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
244
+ page_content=' 3: Results with the MSE(↓), SSIM(↑) and LPIPS(↓) metrics with allo-centric and ego-centric grids for input sequences of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
245
+ page_content='0s and prediction horizon up to 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
246
+ page_content='5s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
247
+ page_content=' For fair comparison, all test sequence frames were modified to only contain the scene observable in both the allo-centric and ego-centric grids.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
248
+ page_content=' The allo-centric grid (results plotted in blue) outperforms the other with all three video prediction networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
249
+ page_content=' (a) Allo-centric grids (b) Ego-centric grids Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
250
+ page_content=' 4: Qualitative results for the ego-vehicle leaving a roundabout on both allo-centric (4a) and ego-centric grids (4b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
251
+ page_content=' The input sequence consists of 10 frames (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
252
+ page_content='0s) and output predicted sequence of up to 25 frames (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
253
+ page_content='5s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
254
+ page_content=' The prediction results are shown at 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
255
+ page_content='5s, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
256
+ page_content='5s and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
257
+ page_content='5s instants and are magnified at the interesting spaces, marked by red box in the target(ground truth) frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
258
+ page_content=' The best results can be observed with LMC-Memory network with the allo-centric grid that retains the scene structure and predicts the motion of the ego-vehicle best.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
259
+ page_content=' at an intersection or driving on a curved road.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
260
+ page_content=' Figure 4 shows results for a sequence where the ego-vehicle is exiting a roundabout.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
261
+ page_content=' In this scene, while there are no other dynamic agents, the network needs to predict the behaviour of the ego- vehicle, when it is driving along the curved static segment (alluding to road structure) and is headed towards static objects/obstacles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
262
+ page_content=' For the allo-centric grids, the challenge is to predict the ego-vehicle pose while the scene remains static.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
263
+ page_content=' The best results are achieved with the LMC-Memory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
264
+ page_content=' The vehicle pose is well-predicted up to 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
265
+ page_content='5s, its orientation is adjusted so that it does not hit the static components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
266
+ page_content=' For the same grid, the PredRNN fails to learn and predict the behaviour resulting in false prediction of collisions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
267
+ page_content=' The ego- vehicle, while getting more blurry, diffuses into the static obstacles on the road.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
268
+ page_content=' With the PredNet, the ego vehicle is almost already lost at 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
269
+ page_content='5s prediction horizon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
270
+ page_content=' This is expected behaviour since PredNet is ideally not aimed at long-term video predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
271
+ page_content=' With all three networks, the ego- vehicle gets more blurry, however with PredRNN, the static scene also tends to get blurry at larger prediction horizon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
272
+ page_content=' 洋LMC-ego PredRNN-ego PredNet-ego LMC-allo PredRNN-allo PredNet-alloIn the ego-centric grid, the whole scene rotates around the ego-vehicle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
273
+ page_content=' LMC-memory and PredNet significantly lose the static components ahead of the vehicle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
274
+ page_content=' The rotation results in increasing blurriness at every time step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
275
+ page_content=' PredRNN predictions are more diffused and faint blurry cells are still visible ahead of the vehicle, even at 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
276
+ page_content='5s prediction horizon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
277
+ page_content=' In context of planning and safe navigation, this high uncertainty in the environment structure renders the prediction results unreliable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
278
+ page_content=' VI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
279
+ page_content=' DISCUSSION AND FUTURE WORK In this work, we presented a novel allo-centric dynamic ocuupancy grid approach for long-term prediction of urban traffic scene, and compared it to the conventional ego- centric DOGM approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
280
+ page_content=' We trained and tested various video prediction networks to show that allo-centric DOGM representation has superior ability to predict the same scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
281
+ page_content=' The most significant improvement is the allo-centric grid’s ability to retain the static scene structure, especially when the vehicle turns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
282
+ page_content=' The ego-centric grid, on the other hand tends to lose the static scene, and hence the crucial information about whether the given space is occupied or free.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
283
+ page_content=' The results of allo-centric grids prediction with state-of- the-art PredRNN and LMC-Memory approaches have shown complementary benefits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
284
+ page_content=' PredRNN predictions, though dif- fuse and get more blurry, are capable of maintaining agents longer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
285
+ page_content=' We observe that LMC-memory shows better tendency at learning behaviours in comparison to the PredRNN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
286
+ page_content=' It is pertinent to mention here that the two grids are still very similar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
287
+ page_content=' In both scenarios, the observable space updates relative to the position of the vehicle in the scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
288
+ page_content=' Thus, in allo-centric grid while the grid is no more fixed to the ego-vehicle, the ego-vehicle bias remains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
289
+ page_content=' All three video prediction networks tested in this work address the prediction problem as deterministic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
290
+ page_content=' However, the behaviour of agents in urban traffic scene tends to be multimodal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
291
+ page_content=' For future work, the addition of multimodal prediction capabilities in the network architecture would be interesting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
292
+ page_content=' Additionally, the incorporation of semantics in the occupancy grid such as agent type and offline road infor- mation could assist in learning behaviours and interactions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
293
+ page_content=' REFERENCES [1] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
294
+ page_content=' Mozaffari, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
295
+ page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
296
+ page_content=' Al-Jarrah, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
297
+ page_content=' Dianati, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
298
+ page_content=' Jennings, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
299
+ page_content=' Mouza- kitis, “Deep learning-based vehicle behavior prediction for au- tonomous driving applications: A review,” IEEE Transactions on Intelligent Transportation Systems, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
300
+ page_content=' 23, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
301
+ page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
302
+ page_content=' 33–47, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
303
+ page_content=' [2] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
304
+ page_content=' Alahi, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
305
+ page_content=' Goel, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
306
+ page_content=' Ramanathan, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
307
+ page_content=' Robicquet, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
308
+ page_content=' Fei-Fei, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
309
+ page_content=' Savarese, “Social lstm: Human trajectory prediction in crowded spaces,” in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
310
+ page_content=' 961–971.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
311
+ page_content=' [3] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
312
+ page_content=' Caesar, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
313
+ page_content=' Bankiti, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
314
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
315
+ page_content=' Lang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
316
+ page_content=' Vora, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
317
+ page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
318
+ page_content=' Liong, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
319
+ page_content=' Xu, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
320
+ page_content=' Kr- ishnan, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
321
+ page_content=' Pan, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
322
+ page_content=' Baldan, and O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
323
+ page_content=' Beijbom, “nuscenes: A multimodal dataset for autonomous driving,” arXiv preprint arXiv:1903.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
324
+ page_content='11027, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
325
+ page_content=' [4] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
326
+ page_content=' Ettinger, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
327
+ page_content=' Cheng, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
328
+ page_content=' Caine, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
329
+ page_content=' Liu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
330
+ page_content=' Zhao, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
331
+ page_content=' Pradhan, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
332
+ page_content=' Chai, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
333
+ page_content=' Sapp, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
334
+ page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
335
+ page_content=' Qi, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
336
+ page_content=' Zhou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
337
+ page_content=', “Large scale interactive motion forecasting for autonomous driving: The waymo open motion dataset,” in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
338
+ page_content=' 9710–9719.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
339
+ page_content=' [5] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
340
+ page_content=' N`egre, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
341
+ page_content=' Rummelhard, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
342
+ page_content=' Laugier, “Hybrid sampling bayesian occupancy filter,” in 2014 IEEE Intelligent Vehicles Symposium Pro- ceedings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
343
+ page_content=' IEEE, 2014, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
344
+ page_content=' 1307–1312.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
345
+ page_content=' [6] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
346
+ page_content=' Rummelhard, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
347
+ page_content=' N`egre, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
348
+ page_content=' Laugier, “Conditional monte carlo dense occupancy tracker,” in 2015 IEEE 18th International Conference on Intelligent Transportation Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
349
+ page_content=' IEEE, 2015, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
350
+ page_content=' 2485–2490.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
351
+ page_content=' [7] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
352
+ page_content=' Oprea, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
353
+ page_content=' Martinez-Gonzalez, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
354
+ page_content=' Garcia-Garcia, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
355
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
356
+ page_content=' Castro- Vargas, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
357
+ page_content=' Orts-Escolano, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
358
+ page_content=' Garcia-Rodriguez, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
359
+ page_content=' Argyros, “A review on deep learning techniques for video prediction,” IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
360
+ page_content=' [Online].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
361
+ page_content=' Available: https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
362
+ page_content='org/pdf/2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
363
+ page_content='05214.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
364
+ page_content='pdf [8] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
365
+ page_content=' Lotter, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
366
+ page_content=' Kreiman, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
367
+ page_content=' Cox, “Deep predictive coding networks for video prediction and unsupervised learning,” 5th International Conference on Learning Representations, ICLR 2017 - Conference Track Proceedings, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
368
+ page_content=' 1–18, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
369
+ page_content=' [9] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
370
+ page_content=' Wang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
371
+ page_content=' Wu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
372
+ page_content=' Zhang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
373
+ page_content=' Gao, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
374
+ page_content=' Wang, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
375
+ page_content=' Yu, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
376
+ page_content=' Long, “Predrnn: A recurrent neural network for spatiotemporal predictive learning,” IEEE Transactions on Pattern Analysis and Machine Intel- ligence, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
377
+ page_content=' 1–1, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
378
+ page_content=' [10] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
379
+ page_content=' Wang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
380
+ page_content=' Long, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
381
+ page_content=' Wang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
382
+ page_content=' Gao, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
383
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
384
+ page_content=' Yu, “Predrnn: Recurrent neural networks for predictive learning using spatiotemporal lstms,” in Advances in Neural Information Processing Systems, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
385
+ page_content=' Guyon, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
386
+ page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
387
+ page_content=' Luxburg, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
388
+ page_content=' Bengio, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
389
+ page_content=' Wallach, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
390
+ page_content=' Fergus, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
391
+ page_content=' Vishwanathan, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
392
+ page_content=' Garnett, Eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
393
+ page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
394
+ page_content=' 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
395
+ page_content=' Curran Associates, Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
396
+ page_content=', 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
397
+ page_content=' [11] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
398
+ page_content=' Lee, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
399
+ page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
400
+ page_content=' Kim, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
401
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
402
+ page_content=' Choi, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
403
+ page_content='-I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
404
+ page_content=' Kim, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
405
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
406
+ page_content=' Ro, “Video prediction recalling long-term motion context via memory alignment learning,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
407
+ page_content=' 3054–3063.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
408
+ page_content=' [12] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
409
+ page_content='-S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
410
+ page_content=' Jeon, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
411
+ page_content='-S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
412
+ page_content=' Kum, and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
413
+ page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
414
+ page_content=' Jeong, “Traffic Scene Prediction via Deep Learning: Introduction of Multi-Channel Occupancy Grid Map as a Scene Representation,” in 2018 IEEE Intelligent Vehicles Symposium (IV), 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
415
+ page_content=' 1496–1501.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
416
+ page_content=' [13] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
417
+ page_content=' Dequaire, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
418
+ page_content=' Ondr´uˇska, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
419
+ page_content=' Rao, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
420
+ page_content=' Wang, and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
421
+ page_content=' Posner, “Deep tracking in the wild: End-to-end tracking using recurrent neural networks,” The International Journal of Robotics Research, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
422
+ page_content=' 37, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
423
+ page_content=' 4-5, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
424
+ page_content=' 492–512, jun 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
425
+ page_content=' [Online].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
426
+ page_content=' Available: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
427
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
428
+ page_content='1177/0278364917710543 [14] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
429
+ page_content=' Mohajerin and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
430
+ page_content=' Rohani, “Multi-step prediction of occupancy grid maps with recurrent neural networks,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
431
+ page_content=' 10 600–10 608.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
432
+ page_content=' [15] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
433
+ page_content=' Schreiber, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
434
+ page_content=' Hoermann, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
435
+ page_content=' Dietmayer, “Long-term occupancy grid prediction using recurrent neural networks,” in 2019 International Conference on Robotics and Automation (ICRA).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
436
+ page_content=' IEEE, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
437
+ page_content=' 9299–9305.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
438
+ page_content=' [16] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
439
+ page_content=' Itkina, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
440
+ page_content=' Driggs-Campbell, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
441
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
442
+ page_content=' Kochenderfer, “Dynamic environment prediction in urban scenes using recurrent representation learning,” in 2019 IEEE Intelligent Transportation Systems Conference (ITSC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
443
+ page_content=' IEEE, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
444
+ page_content=' 2052–2059.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
445
+ page_content=' [Online].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
446
+ page_content=' Available: https: //arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
447
+ page_content='org/abs/1904.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
448
+ page_content='12374 [17] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
449
+ page_content=' Toyungyernsub, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
450
+ page_content=' Itkina, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
451
+ page_content=' Senanayake, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
452
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
453
+ page_content=' Kochenderfer, “Double-Prong ConvLSTM for Spatiotemporal Occupancy Prediction in Dynamic Environments,” arXiv preprint arXiv:2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
454
+ page_content='09045, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
455
+ page_content=' [Online].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
456
+ page_content=' Available: http://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
457
+ page_content='org/abs/2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
458
+ page_content='09045 [18] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
459
+ page_content=' Lange, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
460
+ page_content=' Itkina, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
461
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
462
+ page_content=' Kochenderfer, “Attention Aug- mented ConvLSTM for Environment Prediction,” arXiv preprint arXiv:2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
463
+ page_content='09662, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
464
+ page_content=' [19] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
465
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
466
+ page_content=' Mann, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
467
+ page_content=' Tomy, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
468
+ page_content=' Paigwar, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
469
+ page_content=' Renzaglia, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
470
+ page_content=' Laugier, “Predicting future occupancy grids in dynamic environment with spatio-temporal learning,” 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
471
+ page_content=' [Online].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
472
+ page_content=' Available: https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
473
+ page_content='org/ abs/2205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
474
+ page_content='03212 [20] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
475
+ page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
476
+ page_content=' Rane, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
477
+ page_content=' Sz¨ugyi, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
478
+ page_content=' Saxena, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
479
+ page_content=' Ofner, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
480
+ page_content=' Stober, “Prednet and predictive coding: A critical review,” in Proceedings of the 2020 international conference on multimedia retrieval, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
481
+ page_content=' 233–241.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
482
+ page_content=' [21] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
483
+ page_content=' Geiger, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
484
+ page_content=' Lenz, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
485
+ page_content=' Stiller, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
486
+ page_content=' Urtasun, “Vision meets robotics: The kitti dataset,” International Journal of Robotics Research (IJRR), 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
487
+ page_content=' [22] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
488
+ page_content=' Zhang, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
489
+ page_content=' Isola, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
490
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
491
+ page_content=' Efros, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
492
+ page_content=' Shechtman, and O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
493
+ page_content=' Wang, “The unreasonable effectiveness of deep features as a perceptual metric,” in CVPR, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DdE3T4oBgHgl3EQfUwqw/content/2301.04454v1.pdf'}
DdFJT4oBgHgl3EQfBSxP/content/tmp_files/2301.11424v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
DdFJT4oBgHgl3EQfBSxP/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
E9E4T4oBgHgl3EQffg1w/content/2301.05108v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d34a018101d503cdcdc519eb29d00c0ab150a49adc8c7e24e3e46363e8b55781
3
+ size 1165522
E9E4T4oBgHgl3EQffg1w/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7803055498234117cc21bd1cfc469c0c86445196b054be34a1a719a7fe10928d
3
+ size 3932205
E9E4T4oBgHgl3EQffg1w/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b76e9878ca3a73e9bd41de6ab07cdca2e2f97e7d37eda4a13541752e186ed3e6
3
+ size 157458
ENA0T4oBgHgl3EQfA_81/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90c318b06518876498e9bef47319739e78c758945d7848ebb762eef54548048e
3
+ size 170269
EdA0T4oBgHgl3EQfA_-G/content/2301.01970v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24720e816fd658055620c682cdf7a31616c8e20708cdf1dcc875e74a2229b6e9
3
+ size 36989175
FNE5T4oBgHgl3EQfVQ8Q/content/2301.05549v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd50619d81e090f790a012020ed303d63cc9b043560ad95ba26f82f76bcfc4a7
3
+ size 102273
FNE5T4oBgHgl3EQfVQ8Q/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2eb2534ce23ea8c98c7ef1b4cc43141108f6b63f018f952aac99997be30b2364
3
+ size 1310765