jackkuo commited on
Commit
1cef13a
·
verified ·
1 Parent(s): fb54c55

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. -9E3T4oBgHgl3EQfSwmi/content/2301.04436v1.pdf +3 -0
  2. -9E3T4oBgHgl3EQfSwmi/vector_store/index.faiss +3 -0
  3. -9E3T4oBgHgl3EQfSwmi/vector_store/index.pkl +3 -0
  4. .gitattributes +70 -0
  5. 09E3T4oBgHgl3EQfnArs/vector_store/index.faiss +3 -0
  6. 0tFIT4oBgHgl3EQf3Cv8/content/2301.11380v1.pdf +3 -0
  7. 19FQT4oBgHgl3EQf1zZe/content/2301.13421v1.pdf +3 -0
  8. 19FQT4oBgHgl3EQf1zZe/vector_store/index.pkl +3 -0
  9. 1NAyT4oBgHgl3EQf1PnY/content/tmp_files/2301.00733v1.pdf.txt +0 -0
  10. 1NAyT4oBgHgl3EQf1PnY/content/tmp_files/load_file.txt +0 -0
  11. 2NFLT4oBgHgl3EQfqi-E/content/2301.12140v1.pdf +3 -0
  12. 2NFLT4oBgHgl3EQfqi-E/vector_store/index.pkl +3 -0
  13. 2tAyT4oBgHgl3EQfb_cv/content/2301.00272v1.pdf +3 -0
  14. 2tAyT4oBgHgl3EQfb_cv/vector_store/index.pkl +3 -0
  15. 3dE0T4oBgHgl3EQfeACo/content/tmp_files/2301.02385v1.pdf.txt +830 -0
  16. 3dE0T4oBgHgl3EQfeACo/content/tmp_files/load_file.txt +507 -0
  17. 3dE3T4oBgHgl3EQfoQqU/content/tmp_files/2301.04632v1.pdf.txt +1845 -0
  18. 3dE3T4oBgHgl3EQfoQqU/content/tmp_files/load_file.txt +0 -0
  19. 4dFKT4oBgHgl3EQf9S5d/content/tmp_files/2301.11953v1.pdf.txt +681 -0
  20. 4dFKT4oBgHgl3EQf9S5d/content/tmp_files/load_file.txt +0 -0
  21. 4tAzT4oBgHgl3EQfEPpQ/content/tmp_files/2301.00989v1.pdf.txt +763 -0
  22. 4tAzT4oBgHgl3EQfEPpQ/content/tmp_files/load_file.txt +0 -0
  23. 5dE2T4oBgHgl3EQfOgbi/vector_store/index.pkl +3 -0
  24. 69AzT4oBgHgl3EQfgPxu/content/2301.01465v1.pdf +3 -0
  25. 69AzT4oBgHgl3EQfgPxu/vector_store/index.pkl +3 -0
  26. 8tE4T4oBgHgl3EQfdQys/content/tmp_files/2301.05090v1.pdf.txt +0 -0
  27. 8tE4T4oBgHgl3EQfdQys/content/tmp_files/load_file.txt +0 -0
  28. 9NAzT4oBgHgl3EQfSft5/vector_store/index.faiss +3 -0
  29. 9tAzT4oBgHgl3EQfSvsB/vector_store/index.faiss +3 -0
  30. AtE0T4oBgHgl3EQfxwIb/content/2301.02649v1.pdf +3 -0
  31. B9E0T4oBgHgl3EQfyAKb/content/2301.02654v1.pdf +3 -0
  32. B9E0T4oBgHgl3EQfyAKb/vector_store/index.pkl +3 -0
  33. B9FRT4oBgHgl3EQfvjiF/content/2301.13635v1.pdf +3 -0
  34. B9FRT4oBgHgl3EQfvjiF/vector_store/index.faiss +3 -0
  35. BdFAT4oBgHgl3EQfsR4z/content/tmp_files/2301.08657v1.pdf.txt +2261 -0
  36. BdFAT4oBgHgl3EQfsR4z/content/tmp_files/load_file.txt +0 -0
  37. C9E2T4oBgHgl3EQfSAeL/content/2301.03788v1.pdf +3 -0
  38. C9E2T4oBgHgl3EQfSAeL/vector_store/index.faiss +3 -0
  39. C9FJT4oBgHgl3EQfAyyD/content/tmp_files/2301.11422v1.pdf.txt +788 -0
  40. C9FJT4oBgHgl3EQfAyyD/content/tmp_files/load_file.txt +0 -0
  41. C9FKT4oBgHgl3EQfYi5Z/content/2301.11799v1.pdf +3 -0
  42. C9FKT4oBgHgl3EQfYi5Z/vector_store/index.faiss +3 -0
  43. C9FKT4oBgHgl3EQfYi5Z/vector_store/index.pkl +3 -0
  44. CtE2T4oBgHgl3EQfoAiM/content/tmp_files/2301.04014v1.pdf.txt +461 -0
  45. CtE2T4oBgHgl3EQfoAiM/content/tmp_files/load_file.txt +306 -0
  46. D9E1T4oBgHgl3EQfqQU2/content/2301.03340v1.pdf +3 -0
  47. D9E1T4oBgHgl3EQfqQU2/vector_store/index.faiss +3 -0
  48. ENE1T4oBgHgl3EQfqQWR/content/2301.03341v1.pdf +3 -0
  49. ENE1T4oBgHgl3EQfqQWR/vector_store/index.faiss +3 -0
  50. ENE1T4oBgHgl3EQfqQWR/vector_store/index.pkl +3 -0
-9E3T4oBgHgl3EQfSwmi/content/2301.04436v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2510e03a9baf5994399fc05e391f5f6deff31a99527c3641bcbbe13a4c07e17e
3
+ size 186381
-9E3T4oBgHgl3EQfSwmi/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d592c7ebc4d4b36de6123f929607462005ea468e8653b8802c77c13e8a722127
3
+ size 1835053
-9E3T4oBgHgl3EQfSwmi/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95db57098a680dba900707e9ea36ec04138316c9160d096418f63758a6db7fda
3
+ size 67102
.gitattributes CHANGED
@@ -7498,3 +7498,73 @@ a9E2T4oBgHgl3EQfaAcV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -tex
7498
  TNE0T4oBgHgl3EQflAGw/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7499
  ENAyT4oBgHgl3EQf4vqx/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7500
  59E0T4oBgHgl3EQfewCK/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7498
  TNE0T4oBgHgl3EQflAGw/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7499
  ENAyT4oBgHgl3EQf4vqx/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7500
  59E0T4oBgHgl3EQfewCK/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7501
+ V9E1T4oBgHgl3EQfvQVU/content/2301.03397v1.pdf filter=lfs diff=lfs merge=lfs -text
7502
+ LdFJT4oBgHgl3EQfyS1K/content/2301.11637v1.pdf filter=lfs diff=lfs merge=lfs -text
7503
+ VNE2T4oBgHgl3EQfDAaO/content/2301.03620v1.pdf filter=lfs diff=lfs merge=lfs -text
7504
+ mtE0T4oBgHgl3EQfpwGZ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7505
+ C9E2T4oBgHgl3EQfSAeL/content/2301.03788v1.pdf filter=lfs diff=lfs merge=lfs -text
7506
+ MdAyT4oBgHgl3EQfgfhQ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7507
+ a9E1T4oBgHgl3EQfKgN7/content/2301.02965v1.pdf filter=lfs diff=lfs merge=lfs -text
7508
+ C9E2T4oBgHgl3EQfSAeL/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7509
+ D9E1T4oBgHgl3EQfqQU2/content/2301.03340v1.pdf filter=lfs diff=lfs merge=lfs -text
7510
+ UdFLT4oBgHgl3EQfRC8l/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7511
+ 9NAzT4oBgHgl3EQfSft5/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7512
+ ZNE0T4oBgHgl3EQfnAEV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7513
+ TNE3T4oBgHgl3EQfEAkP/content/2301.04291v1.pdf filter=lfs diff=lfs merge=lfs -text
7514
+ B9FRT4oBgHgl3EQfvjiF/content/2301.13635v1.pdf filter=lfs diff=lfs merge=lfs -text
7515
+ IdAzT4oBgHgl3EQfjf31/content/2301.01519v1.pdf filter=lfs diff=lfs merge=lfs -text
7516
+ ZtAyT4oBgHgl3EQfWvfe/content/2301.00171v1.pdf filter=lfs diff=lfs merge=lfs -text
7517
+ r9AyT4oBgHgl3EQfZvd-/content/2301.00229v1.pdf filter=lfs diff=lfs merge=lfs -text
7518
+ lNE4T4oBgHgl3EQftQ2k/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7519
+ ENE1T4oBgHgl3EQfqQWR/content/2301.03341v1.pdf filter=lfs diff=lfs merge=lfs -text
7520
+ dtFST4oBgHgl3EQfFDgM/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7521
+ IdAzT4oBgHgl3EQfjf31/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7522
+ 9tAzT4oBgHgl3EQfSvsB/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7523
+ VNE2T4oBgHgl3EQfDAaO/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7524
+ WtAzT4oBgHgl3EQfKPud/content/2301.01094v1.pdf filter=lfs diff=lfs merge=lfs -text
7525
+ TNE0T4oBgHgl3EQflAGw/content/2301.02481v1.pdf filter=lfs diff=lfs merge=lfs -text
7526
+ LdFJT4oBgHgl3EQfyS1K/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7527
+ ZtE4T4oBgHgl3EQfOgxT/content/2301.04965v1.pdf filter=lfs diff=lfs merge=lfs -text
7528
+ -9E3T4oBgHgl3EQfSwmi/content/2301.04436v1.pdf filter=lfs diff=lfs merge=lfs -text
7529
+ B9FRT4oBgHgl3EQfvjiF/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7530
+ MdAyT4oBgHgl3EQfgfhQ/content/2301.00359v1.pdf filter=lfs diff=lfs merge=lfs -text
7531
+ YtAyT4oBgHgl3EQfWvcM/content/2301.00167v1.pdf filter=lfs diff=lfs merge=lfs -text
7532
+ 09E3T4oBgHgl3EQfnArs/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7533
+ JdFRT4oBgHgl3EQfzjg4/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7534
+ ZtAyT4oBgHgl3EQfWvfe/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7535
+ ENE1T4oBgHgl3EQfqQWR/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7536
+ GNE4T4oBgHgl3EQfgA3B/content/2301.05113v1.pdf filter=lfs diff=lfs merge=lfs -text
7537
+ HtE0T4oBgHgl3EQfRgCz/content/2301.02209v1.pdf filter=lfs diff=lfs merge=lfs -text
7538
+ V9FJT4oBgHgl3EQf4C1z/content/2301.11664v1.pdf filter=lfs diff=lfs merge=lfs -text
7539
+ x9AyT4oBgHgl3EQfnvh8/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7540
+ wdAyT4oBgHgl3EQfOfZu/content/2301.00006v1.pdf filter=lfs diff=lfs merge=lfs -text
7541
+ -9E3T4oBgHgl3EQfSwmi/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7542
+ VNFLT4oBgHgl3EQfRy9S/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7543
+ u9E3T4oBgHgl3EQfkgqO/content/2301.04598v1.pdf filter=lfs diff=lfs merge=lfs -text
7544
+ xNAyT4oBgHgl3EQfavfQ/content/2301.00250v1.pdf filter=lfs diff=lfs merge=lfs -text
7545
+ h9AzT4oBgHgl3EQfo_2T/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7546
+ 2NFLT4oBgHgl3EQfqi-E/content/2301.12140v1.pdf filter=lfs diff=lfs merge=lfs -text
7547
+ D9E1T4oBgHgl3EQfqQU2/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7548
+ kdAyT4oBgHgl3EQfYPeA/content/2301.00199v1.pdf filter=lfs diff=lfs merge=lfs -text
7549
+ AtE0T4oBgHgl3EQfxwIb/content/2301.02649v1.pdf filter=lfs diff=lfs merge=lfs -text
7550
+ 19FQT4oBgHgl3EQf1zZe/content/2301.13421v1.pdf filter=lfs diff=lfs merge=lfs -text
7551
+ v9E3T4oBgHgl3EQflApa/content/2301.04602v1.pdf filter=lfs diff=lfs merge=lfs -text
7552
+ jdFIT4oBgHgl3EQfqSsD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7553
+ B9E0T4oBgHgl3EQfyAKb/content/2301.02654v1.pdf filter=lfs diff=lfs merge=lfs -text
7554
+ xNE2T4oBgHgl3EQf3Qj1/content/2301.04170v1.pdf filter=lfs diff=lfs merge=lfs -text
7555
+ C9FKT4oBgHgl3EQfYi5Z/content/2301.11799v1.pdf filter=lfs diff=lfs merge=lfs -text
7556
+ TNE3T4oBgHgl3EQfEAkP/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7557
+ C9FKT4oBgHgl3EQfYi5Z/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7558
+ iNE3T4oBgHgl3EQfIwkK/content/2301.04336v1.pdf filter=lfs diff=lfs merge=lfs -text
7559
+ RtE2T4oBgHgl3EQfsggH/content/2301.04059v1.pdf filter=lfs diff=lfs merge=lfs -text
7560
+ jdFIT4oBgHgl3EQfqSsD/content/2301.11326v1.pdf filter=lfs diff=lfs merge=lfs -text
7561
+ RtE2T4oBgHgl3EQfsggH/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7562
+ v9E3T4oBgHgl3EQflApa/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7563
+ HtE0T4oBgHgl3EQfRgCz/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7564
+ 69AzT4oBgHgl3EQfgPxu/content/2301.01465v1.pdf filter=lfs diff=lfs merge=lfs -text
7565
+ 2tAyT4oBgHgl3EQfb_cv/content/2301.00272v1.pdf filter=lfs diff=lfs merge=lfs -text
7566
+ a9AzT4oBgHgl3EQfnP2n/content/2301.01578v1.pdf filter=lfs diff=lfs merge=lfs -text
7567
+ 0tFIT4oBgHgl3EQf3Cv8/content/2301.11380v1.pdf filter=lfs diff=lfs merge=lfs -text
7568
+ F9E0T4oBgHgl3EQfzQJ7/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7569
+ yNE3T4oBgHgl3EQflwrt/content/2301.04611v1.pdf filter=lfs diff=lfs merge=lfs -text
7570
+ ZtE4T4oBgHgl3EQfOgxT/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
09E3T4oBgHgl3EQfnArs/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4f1bfd5a7fb26e2b01c832ea24dadd12a896d9d5734fba8f4a05bf70830e5ba
3
+ size 2949165
0tFIT4oBgHgl3EQf3Cv8/content/2301.11380v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdbd1356a3e9ffd142533dada32cf0d54a181ac28a4cf8cb6024f0354a605bf3
3
+ size 511508
19FQT4oBgHgl3EQf1zZe/content/2301.13421v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fec2095f5abb6e76256a263bae68e49470351389d02443890070d515116f45e
3
+ size 899394
19FQT4oBgHgl3EQf1zZe/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51a944c7dc0f2261058014d1ec1cffc3174210dc89649d91df7e1a2ac4203ae0
3
+ size 196549
1NAyT4oBgHgl3EQf1PnY/content/tmp_files/2301.00733v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
1NAyT4oBgHgl3EQf1PnY/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
2NFLT4oBgHgl3EQfqi-E/content/2301.12140v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31ab717c14f7089e43ffffa2f70b9e7ec31c49fea9633b3b83485ecdae6f6338
3
+ size 2027224
2NFLT4oBgHgl3EQfqi-E/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d19386f1438640ea90f7a0850ff5627caacecbfee99a71a16c18df257e84f27
3
+ size 156870
2tAyT4oBgHgl3EQfb_cv/content/2301.00272v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9139aaaf7cda9836077c0f5b5a2000d1c9280b9649f7557b548669cf3bb12791
3
+ size 186042
2tAyT4oBgHgl3EQfb_cv/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:939788085209bcdd9de9fb8d2b60b1a61761d1c5989f230536afea91870a631d
3
+ size 74762
3dE0T4oBgHgl3EQfeACo/content/tmp_files/2301.02385v1.pdf.txt ADDED
@@ -0,0 +1,830 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Multi-Genre Music Transformer - Composing Full Length Musical Piece
2
+ Abhinav Kaushal Keshari (Purdue University)
3
+ Abstract
4
+ In the task of generating music, the art factor plays
5
+ a big role and is a great challenge for AI. Previ-
6
+ ous work involving adversarial training (Dong
7
+ et al., 2018) to produce new music pieces and
8
+ modeling the compatibility (Huang et al., 2021)
9
+ of variety in music (beats, tempo, musical stems)
10
+ demonstrated great examples of learning this task.
11
+ Though this was limited to generating mashups
12
+ or learning features from tempo and key distri-
13
+ butions to produce similar patterns. Compound
14
+ Word Transformer (Hsiao et al., 2021) was able
15
+ to represent music generation task as a sequence
16
+ generation challenge involving musical events de-
17
+ fined by compound words. These musical events
18
+ give a more accurate description of notes progres-
19
+ sion, chord change, harmony and the art factor.
20
+ The objective of the project is to implement a
21
+ Multi-Genre Transformer which learns to produce
22
+ music pieces through more adaptive learning pro-
23
+ cess involving more challenging task where gen-
24
+ res or form of the composition is also considered.
25
+ We built a multi-genre compound word dataset,
26
+ implemented a linear transformer (Katharopoulos
27
+ et al., 2020) which was trained on this dataset.
28
+ We call this Multi-Genre Transformer, which was
29
+ able to generate full length new musical pieces
30
+ which is diverse and comparable to original tracks.
31
+ The model trains 2-5 times faster than other mod-
32
+ els discussed.
33
+ 1. Related Work
34
+ Despite achieving great success in generation challenges
35
+ using Artificial Intelligence in Natural Language Genera-
36
+ tion (NLG) there is a factor of art that still makes them
37
+ different from human like performance. In terms of NLG
38
+ we can relate it to something like the difference between
39
+ computer generated article and a piece of art like novels,
40
+ biography, etc. For music art factor always come into ac-
41
+ count and despite able to produce musical compositions
42
+ through Adversarial networks or mixing stems using super-
43
+ vised learning the solution still is very different from an
44
+ original piece of music which we discuss below.
45
+ 1.1. Music Generation using GANs
46
+ Generative adversarial networks (GANs) have provided sig-
47
+ nificant progress in producing text, videos and images. Sim-
48
+ ilar efforts have been made to bring neural networks to
49
+ artistic domain of music. MuseGAN(Dong et al., 2018)
50
+ brought a novel model for generating multi-track music.
51
+ Until 2018, the progress in using AI to compose music had
52
+ been able to produce
53
+ • Single-track (monophonic) music
54
+ • Multi-track (polyphonic) music by combining several
55
+ monophonic melodies in chronological order
56
+ Music usually being an art involving multiple instruments
57
+ played together requires music to be multi-track and because
58
+ music notes are made up of chords, arpeggios or melodies
59
+ the idea of using a chronological order setting prevents it
60
+ from being generalized.
61
+ The paper(Dong et al., 2018) address this challenge in gen-
62
+ eralising real music by discussing current technical lacks in
63
+ neural network models and how it relates to the real world
64
+ music.
65
+ 1. Music is an art of time and has characteristics of coher-
66
+ ence, rhythm, tension and emotion flow. This requires
67
+ it to have a Temporal Model.
68
+ 2. Music compositions usually involves different instru-
69
+ ments interacting with one another making the compo-
70
+ sitions to be harmonic. To solve this issue a Composer
71
+ Model is required.
72
+ 3. Musical notes are built of chords, arpeggios or
73
+ melodies and how they unfold over time; thus introduc-
74
+ ing chronological generation of notes is not suitable.
75
+ To address this the paper introduces using bars (seg-
76
+ ment of time) instead of notes as the basic unit for
77
+ composition. And then generate music bar by bar us-
78
+ ing transposed convolutional neural networks to learn
79
+ translation-invariant patterns.
80
+ The paper(Dong et al., 2018) makes contributions in terms
81
+ of both ability to artificially compose realistic music and
82
+ use of generative adversarial framework with temporal and
83
+ composition models. In short the contributions are:
84
+ arXiv:2301.02385v1 [cs.SD] 6 Jan 2023
85
+
86
+ Multi-Genre Music Transformer - Composing Full Length Musical Piece
87
+ • First GAN based model for generating multi-track se-
88
+ quence.
89
+ • First model which can generate multi-track polyphonic
90
+ music.
91
+ • Same model can be used as a music accompaniment.
92
+ • Creates a new Lakh Pianoroll Dataset (LPD) for multi-
93
+ track piano-rolls
94
+ • For future work metrics in the domain of artificial mu-
95
+ sic a new set of objective metrics are proposed.
96
+ MuseGAN model proposed considers two sub-network gen-
97
+ erator Gtemp (temporal structure generator) and Gbar (bar
98
+ generator) making the overall generator:
99
+ G(z) =
100
+
101
+ Gbar(Gtemp(z)(t))
102
+ �T
103
+ t=1
104
+ where z is the input noise vector. The strength of the model
105
+ is the ability to generate samples having chord like inter-
106
+ vals (learning features from temporal model) and melodies
107
+ involving pitch overlap among guitar, piano and strings
108
+ (learning features from composer model).
109
+ The model introduces multi-track by modeling interdepen-
110
+ dency of tracks by proposing 3 different generator model
111
+ (Jamming, Composer and Hybrid), but the author brings up
112
+ these based on the understanding of pop music composition.
113
+ This possibly restricts the generator to explore on a broad
114
+ spectrum of music and prevents it from being generalised.
115
+ Also worth mentioning is that the work relies on multi-track
116
+ interdependency, but misses to study about the compatibility
117
+ of these tracks which can significantly increase the quality
118
+ of music being generated. We will see this issue being
119
+ addressed in the next paper.
120
+ 1.2. Modeling the Compatibility of Stem Tracks to
121
+ Generate Music Mashups(Huang et al., 2021)
122
+ Source separation(Jansson et al., 2017; D´efossez et al.,
123
+ 2019) makes it possible to generate a music mashup with iso-
124
+ lated stems like vocals, drums, piano, etc. The challenge lies
125
+ in producing music which has compatibility between these
126
+ stems. This paper creates a mashup generation pipeline and
127
+ trains a model to predict the compatibility by automatically
128
+ learning to adjust key and tempo (characteristics of quality
129
+ mashups in real world).
130
+ General models trained for harmonic compatibility
131
+ (Bernardes et al., 2017; Macas et al., 2018) fails to con-
132
+ sider subtle features or surprise mixes of disparate samples
133
+ which is quite common in this art domain. Other issue that
134
+ arises is audio compatibility models like Neural Loop Com-
135
+ biner (Chen et al., 2020) having lack of vocal source and
136
+ variety of genres.
137
+ The authors designed a self supervised learning model
138
+ by recombining the original combination of stems before
139
+ source separation to serve as examples of ground truth. To
140
+ avoid highly polarized model, semi-supervised learning
141
+ was introduced which included producing several random
142
+ mashups by mixing different stems and treated them as
143
+ unlabeled instances. Label smoothing regularization for
144
+ outliers (Zheng et al., 2017) was used to assign uniform
145
+ distribution to the unlabeled data for loss computation. This
146
+ helps in regularization.
147
+ The final architecture consists of 3 modules:
148
+ 1. Music Source Separation:
149
+ Uses MSS algorithm
150
+ (Jansson et al., 2017) to get different stems vocals,
151
+ drums, bass and other.
152
+ 2. Mashup Database (MashupDB): Using Madmom
153
+ (B¨ock et al., 2016) different features from the music
154
+ clips are extracted like key, tempo and downbeat in-
155
+ formation. Using these features and separate stem
156
+ combinations a mashup database is created which will
157
+ act as either harmonic or percussion stem candidates
158
+ for mashup generation process.
159
+ 3. Mashup Generation: It uses candidate stems from
160
+ MashupDB and adjusts key and tempo to produce
161
+ mashups within 3 conditions - original, matched and
162
+ unmatched.
163
+ The model (Huang et al., 2021) is defined by p(y|V, H, P)
164
+ where V , H, and P are input signals for respective stems
165
+ vocal, harmonic, and percussion. The output probability p
166
+ is used as the mashup compatibility and y ∈ {0, 1} stating
167
+ good or bad.
168
+ The model (Huang et al., 2021) implementation tries to
169
+ mimic learning compatibility for producing new mashups
170
+ and provides objective and subjective evaluation by cross
171
+ validation among multiple different datasets. This technique
172
+ becomes easier because of the ability of the model to ex-
173
+ tract different stems and features and build its own mashup
174
+ candidates. This also makes the model training process not
175
+ dependent on human labeled data. The model is also ro-
176
+ bust as negative data is added along with positive data for
177
+ supervised learning. The range of music coverage is also
178
+ extensive and the source separation step makes it easier for
179
+ the model to be extended to different genres for training.
180
+ But the current model design lacks the effective embedding
181
+ of different stems while producing a mashup and makes
182
+ it dependent on tuning of key and tempo. Currently the
183
+ implementation comes up with fixed range of key and tempo
184
+ difference for compatibility and does not explain in detail
185
+ how they came up with these numbers. Although defining
186
+ a range prevents large pitch shifting and time stretching.
187
+ Additionally the results of the model ranks positive labeled
188
+ data (original) over unlabeled data which might lead to
189
+
190
+ Multi-Genre Music Transformer - Composing Full Length Musical Piece
191
+ concerns of flexibility. Another major challenge of the
192
+ model is the large training time which is around 3 days using
193
+ an NVIDIA Tesla-V100 GPU whereas using transformer
194
+ model significantly reduces the training time.
195
+ 1.3. Music Transformers
196
+ With state-of-the art neural network we managed to learn
197
+ features in music by defining certain rules on matching
198
+ tempo, beats or compatibility. In the previous paper we
199
+ also tried to learn compatibility with the help of supervised
200
+ learning. The model though suffered with bias as compati-
201
+ bility was favoured for matched key or tempo and also lacks
202
+ generalization. Compound Word Transformer (Hsiao et al.,
203
+ 2021) considers music as sequence of events and uses a
204
+ Transformer (neural sequence model) (Vaswani et al., 2017)
205
+ to generate a new musical sequence.
206
+ A musical note can be described by note’s pitch, chord, bar,
207
+ duration, velocity (dynamics), placement (onset time). If
208
+ we consider these as tokens we can then define music as
209
+ sequence of tokens and these tokens are a part of pre-defined
210
+ vocabulary. As music is multi-faceted a particular type of
211
+ token can capture only a certain feature like melody, rhythm,
212
+ harmony. All the neural networks until now treated these
213
+ tokens as equal and thus lacked heterogeneity.
214
+ Compound Word Transformer (Hsiao et al., 2021) generates
215
+ music in a conceptually different way as it allows tokens
216
+ to be of specific types and let them have their own proper-
217
+ ties. Tokens can be of note type (pitch, duration) or metric
218
+ type (beginning of new beat, bar). We then defines a mu-
219
+ sical event by combination of such tokens which allows to
220
+ capture co-occurrence relationship among the tokens. This
221
+ combination of tokens are termed as compound words. So,
222
+ now we can represent a music piece (X) as a sequence (S)
223
+ of compound words (cp) or S = g(X) = {cpt}T
224
+ t=1 where
225
+ g(.) is the conversion function to convert music into time-
226
+ ordered sequence of musical events and T is the length of
227
+ the music sequence.
228
+ Theoretically, the model learns over discrete-time dynamic
229
+ directed hypergraphs. Consider a graph G = (V, E) (Figure
230
+ 1) the vertices (V ) are tokens and edges (E) are sequence of
231
+ token. Collection of vertices can be defined as a compound
232
+ word and hyperedge in this graph represents sequence of
233
+ compound words. In figure 1 v1, v2, v5 are the tokens and
234
+ the edge E1 defines a sequence of tokens whereas e1, e2
235
+ defines a hyperedge (connecting more than 2 nodes). And
236
+ transitioning from one hyperedge to another defines the
237
+ sequence of composition words which we are trying to learn.
238
+ Using a transformer we are trying to learn the next musi-
239
+ cal event or compound word (combination of tokens). The
240
+ self attention part of the transformer learns the dependency
241
+ among the elements in musical sequence and different feed-
242
+ Figure 1. Graphical Representation of Music Space
243
+ forward head is used for tokens of different type. In short
244
+ the implementation groups tokens to form compound words
245
+ and then perform sequence modeling in this sequence of
246
+ compound words, the major contributions are:
247
+ • Compose pop-piano music of full song length.
248
+ • Compound word sequencing with linear transformer
249
+ providing state-of-the-art results in terms of quality
250
+ with 5-10x faster training and inference time.
251
+ • Music defined as Dynamic Directed Hypergraph.
252
+ Generating a new musical event or a group of tokens to
253
+ be combined as a compound word at each time step is the
254
+ backbone of this model, but it relies on assuming that no
255
+ two musical events can occur together. The new hyperedge
256
+ generated by the Transformer decoder marks other tokens as
257
+ [ignore] once an event of a particular token type is detected.
258
+ Can this limit the music generation task? Additionally the
259
+ model is trained using only pop music which limits the
260
+ expressing power of the transformer.
261
+ 2. Implementation
262
+ Compound Word Transformer (Hsiao et al., 2021) was able
263
+ to represent music generation task as a sequence generation
264
+ challenge involving musical events defined by compound
265
+ words. Leveraging this representation we implement a neu-
266
+ ral model which learns to produce music pieces through
267
+ more adaptive learning process involving more challenging
268
+ task where genres or form of the composition is also con-
269
+ sidered. This adds the richness of music art in the learning
270
+ process of attention driven sequential learning. We will call
271
+ this model Multi-Genre Music Transformer and following
272
+ are the steps involved for implementing this:
273
+ • Building Dataset: This involves generating compound
274
+ word dictionary for songs of different genres.
275
+
276
+ Pitch
277
+ Duration
278
+ v1
279
+ v2
280
+ Velocity
281
+ e1
282
+ EA
283
+ Chord
284
+ Beat
285
+ e2
286
+ v5Multi-Genre Music Transformer - Composing Full Length Musical Piece
287
+ • Implementing Transformer Model: We implement
288
+ our Transformer class, the training steps and the gener-
289
+ ation logic for inference.
290
+ • Adaptive Learning: We allow our tuned model to
291
+ be adaptable by training on a smaller and multi-genre
292
+ dataset.
293
+ 2.1. Building Dataset
294
+ To be able to provide a more generalised learning process
295
+ for our transformer it needs to be trained with a piano roll
296
+ dataset involving musical pieces of variety of genres/style.
297
+ The dataset should be based on compound words (Hsiao
298
+ et al., 2021) to represent different musical tokens as a com-
299
+ bined unit for sequence modeling which is different from
300
+ traditional musical dataset (MIDI, REMI).
301
+ Figure 2. Dataset Building Pipeline
302
+ This required us to build a dataset by selecting music clip-
303
+ pings and converting them to piano roll using Onsets and
304
+ Frames (Hawthorne et al., 2017). Extracting downbeat and
305
+ beat information from these songs using madmom, a mu-
306
+ sic signal processing library (B¨ock et al., 2016). Finally
307
+ representing these metadata into a compound word repre-
308
+ sentation using the dataset generation scripts provided in the
309
+ compound word transformer repository1. This also adds on
310
+ to the AILabs.tw Pop1K7 dataset (Hsiao et al., 2021) which
311
+ currently only includes pop music. Figure 2 demonstrates
312
+ the pipeline for creating a new dataset.
313
+ Following the pipeline above we managed to create a Com-
314
+ pound Word (Hsiao et al., 2021) dataset which involved
315
+ 1https://github.com/YatingMusic/compound-word-
316
+ transformer/blob/main/dataset/Dataset.md
317
+ piano roll for 150 musical pieces from 3 different genres
318
+ including Electronic Dance Music (EDM), Indie and Hip-
319
+ Hop.
320
+ 2.2. Implementing Transformer Model
321
+ We implement a linear transformer(Katharopoulos et al.,
322
+ 2020) to address long sequence dependency which is a very
323
+ relevant factor in music generation due to the presence of
324
+ a context or a rhythm in the entire musical piece. Hav-
325
+ ing an independent feed-forward head in the Transformer
326
+ Decoder allows to improve the loss of independent tokens.
327
+ This allows the model to scale for additional perspective
328
+ (like genre, form or involving a particular chord progres-
329
+ sion) in the music by adding an additional token type. We
330
+ implement our transformer model in a generic way which
331
+ allows user to define its own token sampling model, token
332
+ embedding model and these can be scalable for any number
333
+ of token types. The loss observed at each feed-forward head
334
+ is shown in Figure 6. This shows adding a new token (for
335
+ genre/style/form) for model to learn can be simply achieved
336
+ by adding an independent feed-forward head for the same.
337
+ 2.2.1. TOKEN EMBEDDING
338
+ Figure 3. Demonstrates how each token undergoes independent
339
+ embedding before combining with Positional Encoding. Here
340
+ T1, T2...Tk are K different tokens for our Transformer each having
341
+ its own embedding function and dimension. We are assuming the
342
+ Transformer supports K type of tokens.
343
+ The input to a transformer requires positional encoding
344
+ added to the embedding vector of our input sequence el-
345
+ ements. As each element in our sequence is a compound
346
+ word (Hsiao et al., 2021) which is combined of different
347
+ tokens, we embed each token separately (allowing to have
348
+ adaptive size) and then concatenate them. Having an adap-
349
+
350
+ Youtube
351
+ WAV Audio
352
+ MP3 Audio
353
+ Files
354
+ Files
355
+ Onsets and Frames
356
+ Madmom
357
+ Piano Transcription
358
+ Beat Tracking
359
+ Compound Word Transformer
360
+ Scripts
361
+ Training DataPositional
362
+ Transformer Input
363
+ Emedding
364
+ Feed-Forward Layer
365
+ Concatenate
366
+ T1
367
+ T2
368
+ Embedding
369
+ Embedding
370
+ Embedding
371
+ TK
372
+ Compound WordMulti-Genre Music Transformer - Composing Full Length Musical Piece
373
+ tive token size allows to use smaller embedding dimension
374
+ for a token type with smaller vocabulary and when we con-
375
+ catenate all of these we get an embedding dimension of 512
376
+ for our model. Refer to Figure 3 for detailed steps of token
377
+ embedding.
378
+ 2.2.2. TOKEN SAMPLING
379
+ For inference, sampling plays a crucial role to avoid degen-
380
+ eration and improve diversity. To avoid degeneration we
381
+ follow Nucleus Sampling (Holtzman et al., 2019), which is
382
+ a stochastic temperature controlled process. This method
383
+ samples from the smallest subset of tokens whose cumu-
384
+ lative probability mass exceeds a threshold. We also had
385
+ each token to have a separate sampling policy by defining
386
+ different threshold p and different temperature parameter
387
+ τ (Ackley et al., 1985) for reshaping the probability be-
388
+ fore sampling. We reused the inference implementation
389
+ from Compound Word Transformer (Hsiao et al., 2021) and
390
+ tweaked τ to have higher values for chord to allow more
391
+ diverse chord progressions. Figure 4 shows the sampling
392
+ process and individual feed-forward layer for each token in
393
+ the transformer.
394
+ Figure 4. Transformer with N self-attention layers and independent
395
+ feed-forward head for each token. We first predict the Token Type
396
+ for the particular time-step and then perform a nucleus sampling
397
+ before predicting the remaining tokens.
398
+ 2.3. Adaptive Learning
399
+ After defining the model, the next important step is to imple-
400
+ ment the training steps. To support scalable token definition
401
+ in our generalised transformer we make the training steps
402
+ modular and general to variable number of token types. This
403
+ allows easy addition of a new token and independently mon-
404
+ itor gradient descent optimization for the respective loss.
405
+ We trained our model in parallel for 2 different conditions.
406
+ The first set of training was performed on the original AIL-
407
+ abs.tw Pop1K7 dataset (Hsiao et al., 2021). The second set
408
+ of training took into consideration to provide multi-genre
409
+ learning environment for the transformer as it involved train-
410
+ ing on a dictionary that was generated from 3 different
411
+ genres (EDM, Indie, Hip-Hop).
412
+ 3. Evaluation and Results
413
+ To train a multi-genre transformer the primary objective
414
+ was to provide it with a dataset which is richer in variety
415
+ than the original pop only dataset. With the help of dataset
416
+ building pipeline we managed to create a token set which
417
+ has a higher variance allowing the model to have a broader
418
+ expressive power. Figure 5 shows the comparison of tokens
419
+ between the 2 datasets used.
420
+ Figure 5. Left image shows token distributions for the songs in the
421
+ generated multi-genre dataset and the right image shows similar
422
+ distribution for AILabs.tw Pop1K7 dataset (Hsiao et al., 2021).
423
+ After training the model for both the datasets we also ob-
424
+ serve (refer to Figure 6) the individual token loss and total
425
+ average loss is similar and indicates the model converging.
426
+ Additionally, the gradient descent is more gradual using the
427
+ multi-genre dataset displaying a more settled progression.
428
+ We trained the model with 12 self-attentions layers, 8 feed-
429
+ forward heads with model dimension of 512 and batch size
430
+ of 4 for 180 epochs which took around 17hrs. Then using
431
+ the trained model we generated 20 new full length musical
432
+ pieces with an average inference time of 12.56sec/song
433
+ which is faster than the compound-word transformer though
434
+ having slightly less number of average tokens per song.
435
+ Table 1 shows a more detailed comparison.
436
+
437
+ T(k-1)
438
+ Feed-Forward Layer
439
+ Feed-Forward Layer
440
+ Feed-Forward Layer
441
+ Nucleus Sampling
442
+ Type
443
+ Token
444
+ Feed-Forward Layer
445
+ T
446
+ h
447
+ Layer 1
448
+ Layer 2
449
+ Layer N
450
+ Self-Attention Layersmean:2342.926std:1194.481
451
+ mean:2138.370_std:775.472
452
+ 250
453
+ 10
454
+ 200
455
+ 8
456
+ Number of
457
+ 150
458
+ songs 6
459
+ Number of
460
+ songs
461
+ 100
462
+ 4
463
+ 2
464
+ 50
465
+ +0
466
+ 0
467
+ 0
468
+ 1000
469
+ 2000
470
+ 3000
471
+ 4000
472
+ 5000
473
+ 6000
474
+ 7000
475
+ 2000
476
+ 4000
477
+ 6000
478
+ 8000
479
+ Number of Tokens
480
+ Number of TokensMulti-Genre Music Transformer - Composing Full Length Musical Piece
481
+ Figure 6. Loss vs Epoch for different token types. The last plot corresponds to the average loss for all different token types.
482
+ For a qualitative evaluation of the musical pieces that were
483
+ produced we compare (Figure 7) the piano rolls of these
484
+ with the piano rolls of original tracks that were used to train
485
+ the model.
486
+ Original Songs
487
+ Generated Songs
488
+ Figure 7. Piano roll of original and generated songs. We can see a
489
+ rich and complete content for the generated songs similar to some
490
+ original tracks.
491
+ 4. Conclusion
492
+ In this project we produce music as a sequence of musical
493
+ events produced by a trained Transformer. We leverage the
494
+ definition of Compound Word (Hsiao et al., 2021) to define
495
+ musical event by grouping multiple tokens. This grouping
496
+ greatly reduces the size of our sequence and boosts long-
497
+ range learning. This also reduces the training and inference
498
+ time for our model remarkably. We also exploit the feature
499
+ of each token having its independent feed-forward head for
500
+ prediction to make the model scalable for new token types
501
+ that can be introduced in our dictionary. This allows to add
502
+ any new token for this transformer very easily which can be
503
+ used for musical form, chord progression, etc. Additionally,
504
+ we created an entire new dataset consisting of multi-genre
505
+ compound word dictionary and trained our model with this
506
+ to provide it a more adaptive learning environment. The
507
+ compositions that were generated were highly rich in musi-
508
+ cal events and were of good quality.
509
+ Table 1. Quantitative evaluation results for Multi-Genre Transformer and Compound Word Transformer. Results for Compound Word
510
+ Transformer comes from the implementation in the paper (Hsiao et al., 2021).
511
+ MODEL
512
+ TRAINING TIME
513
+ GPU
514
+ INFERENCE TIME (/SONG)
515
+ AVG TOKENS (/SONG)
516
+ MULTI-GENRE TRANSFORMER
517
+ 17 HRS
518
+ 9.8GB
519
+ 12.56 SEC
520
+ 9190
521
+ COMPOUND TRANSFORMER
522
+ 1.3 DAYS
523
+ 9.5GB
524
+ 19.8 SEC
525
+ 9546
526
+
527
+ tempo loss vs epoch
528
+ chord loss vs epoch
529
+ bar-beat loss vs epoch
530
+ type loss vs epoch
531
+ Pop Dataset
532
+ 14
533
+ Pop Dataset
534
+ 14
535
+ Pop Dataset
536
+ Pop Dataset
537
+ 14
538
+ Multi-Genre Dataset
539
+ Multi-Genre Dataset
540
+ Multi-Genre Dataset
541
+ 1.2
542
+ 0.6
543
+ Multi-Genre Dataset
544
+ 1.2
545
+ 1.2
546
+ 1.0
547
+ 1.0
548
+ 0.5
549
+ 10
550
+ 0.8
551
+ 0.8
552
+ 0.4
553
+ 0.8
554
+ 0.6
555
+ 0.6
556
+ 0.6
557
+ 0.4
558
+ E'O
559
+ 0.4
560
+ 0.4
561
+ 0.2
562
+ 0.2
563
+ 0.2
564
+ 0.2
565
+ 0.0
566
+ 75100125150175
567
+ 75100125150175
568
+ 0.1
569
+ 0
570
+ 25
571
+ 50
572
+ 75100125
573
+ 150175
574
+ 25
575
+ 50
576
+ 0
577
+ 25
578
+ 50
579
+ 0
580
+ 25
581
+ 50
582
+ 75100125150175
583
+ pitch loss vs epoch
584
+ duration loss vs epoch
585
+ velocity loss vs epoch
586
+ average loss vs epoch
587
+ OE
588
+ Pop Dataset
589
+ 18
590
+ Pop Dataset
591
+ 1.8
592
+ Pop Dataset
593
+ 16
594
+ PopDataset
595
+ Multi-Genre Dataset
596
+ Multi-Genre Dataset
597
+ Multi-Genre Dataset
598
+ Multi-Genre Dataset
599
+ 2.5
600
+ 16
601
+ 16
602
+ 14
603
+ 2.0
604
+ 14
605
+ 1.2
606
+ 14
607
+ 1.2
608
+ 1.0
609
+ 1.5
610
+ 1.2
611
+ 10
612
+ 0.8
613
+ 1.0
614
+ 0.8
615
+ 10
616
+ 0.6
617
+ 0.5
618
+ 0.8
619
+ 0.4
620
+ 0.6
621
+ 0
622
+ 25
623
+ 50
624
+ 100
625
+ 150
626
+ 175
627
+ 25
628
+ 50
629
+ 125
630
+ 150
631
+ 175
632
+ 0
633
+ 50
634
+ 75
635
+ 100125 150
636
+ 175
637
+ 0
638
+ 0
639
+ 50
640
+ 125150175.:
641
+ 84
642
+ (ow)
643
+ ".
644
+ (ow)
645
+ .
646
+ 72
647
+ 72
648
+ .
649
+ 60
650
+ 60
651
+ 48
652
+ 36
653
+ LLLL
654
+ 36
655
+ :
656
+ 24
657
+ 0
658
+ 20
659
+ 40
660
+ 60
661
+ 80
662
+ 100
663
+ 120
664
+ 140
665
+ 160
666
+ 0
667
+ 50
668
+ 100
669
+ 150
670
+ 200
671
+ time (sec)
672
+ time (sec)96
673
+ (aw)
674
+ 72
675
+ 72
676
+ itch
677
+ 60
678
+ 60
679
+ 48
680
+ 8
681
+ 96
682
+ 36
683
+ 0
684
+ 50
685
+ 100
686
+ 150
687
+ 0
688
+ 50
689
+ 100
690
+ 150
691
+ 200
692
+ time (sec)
693
+ time (sec)Multi-Genre Music Transformer - Composing Full Length Musical Piece
694
+ References
695
+ Ackley, D. H., Hinton, G. E., and Sejnowski, T. J.
696
+ A
697
+ learning algorithm for boltzmann machines.
698
+ Cog-
699
+ nitive science, 9(1):147–169, 1985.
700
+ URL https:
701
+ //www.sciencedirect.com/science/
702
+ article/abs/pii/S0364021385800124.
703
+ Bernardes, G., Davies, M. E., and Guedes, C.
704
+ A
705
+ hierarchical harmonic mixing method.
706
+ In Inter-
707
+ national Symposium on Computer Music Multidis-
708
+ ciplinary Research,
709
+ pp. 151–170. Springer,
710
+ 2017.
711
+ URL https://link.springer.com/chapter/
712
+ 10.1007/978-3-030-01692-0_11.
713
+ B¨ock, S., Korzeniowski, F., Schl¨uter, J., Krebs, F., and
714
+ Widmer, G. Madmom: A new python audio and music
715
+ signal processing library. In Proceedings of the 24th ACM
716
+ international conference on Multimedia, pp. 1174–1178,
717
+ 2016. URL https://dl.acm.org/doi/abs/10.
718
+ 1145/2964284.2973795.
719
+ Chen, B.-Y., Smith, J. B., and Yang, Y.-H. Neural loop com-
720
+ biner: Neural network models for assessing the compati-
721
+ bility of loops. arXiv preprint arXiv:2008.02011, 2020.
722
+ URL https://arxiv.org/abs/2008.02011.
723
+ D´efossez, A., Usunier, N., Bottou, L., and Bach, F. Mu-
724
+ sic source separation in the waveform domain. arXiv
725
+ preprint arXiv:1911.13254, 2019.
726
+ URL https://
727
+ arxiv.org/abs/1911.13254.
728
+ Dong, H.-W., Hsiao, W.-Y., Yang, L.-C., and Yang,
729
+ Y.-H.
730
+ Musegan:
731
+ Multi-track sequential generative
732
+ adversarial
733
+ networks for symbolic music
734
+ genera-
735
+ tion and accompaniment.
736
+ In Thirty-Second AAAI
737
+ Conference on Artificial Intelligence, 2018.
738
+ URL
739
+ https://www.aaai.org/ocs/index.php/
740
+ AAAI/AAAI18/paper/viewPaper/17286.
741
+ Hawthorne, C., Elsen, E., Song, J., Roberts, A., Simon,
742
+ I., Raffel, C., Engel, J., Oore, S., and Eck, D. Onsets
743
+ and frames: Dual-objective piano transcription. arXiv
744
+ preprint arXiv:1710.11153, 2017.
745
+ URL https://
746
+ arxiv.org/abs/1710.11153.
747
+ Holtzman, A., Buys, J., Du, L., Forbes, M., and Choi,
748
+ Y.
749
+ The curious case of neural text degeneration.
750
+ In
751
+ International Conference on Learning Representations,
752
+ 2019. URL https://openreview.net/forum?
753
+ id=rygGQyrFvH.
754
+ Hsiao, W.-Y., Liu, J.-Y., Yeh, Y.-C., and Yang, Y.-
755
+ H.
756
+ Compound word transformer: Learning to com-
757
+ pose full-song music over dynamic directed hyper-
758
+ graphs.
759
+ In Proceedings of the AAAI Conference
760
+ on Artificial Intelligence, volume 35, pp. 178–186,
761
+ 2021.
762
+ URL https://ojs.aaai.org/index.
763
+ php/AAAI/article/view/16091.
764
+ Huang, J., Wang, J.-C., Smith, J. B., Song, X., and Wang,
765
+ Y. Modeling the compatibility of stem tracks to gener-
766
+ ate music mashups. In Proceedings of the AAAI Con-
767
+ ference on Artificial Intelligence, volume 35, pp. 187–
768
+ 195, 2021. URL https://ojs.aaai.org/index.
769
+ php/AAAI/article/view/16092.
770
+ Jansson, A., Humphrey, E., Montecchio, N., Bittner, R.,
771
+ Kumar, A., and Weyde, T. Singing voice separation with
772
+ deep u-net convolutional networks. In 18th International
773
+ Society for Music Information Retrieval Conference, pp.
774
+ 23–27,
775
+ 2017.
776
+ URL
777
+ https://openaccess.
778
+ city.ac.uk/id/eprint/19289/1/
779
+ 7bb8d1600fba70dd79408775cd0c37a4ff62.
780
+ pdf.
781
+ Katharopoulos, A., Vyas, A., Pappas, N., and Fleuret, F.
782
+ Transformers are rnns: Fast autoregressive transform-
783
+ ers with linear attention. In International Conference
784
+ on Machine Learning, pp. 5156–5165. PMLR, 2020.
785
+ URL http://proceedings.mlr.press/v119/
786
+ katharopoulos20a.html.
787
+ Macas, C., Rodrigues, A., Bernardes, G., and Machado, P.
788
+ Mixmash: A visualisation system for musical mashup
789
+ creation. In 2018 22nd International Conference Infor-
790
+ mation Visualisation (IV), pp. 471–477. IEEE Computer
791
+ Society, 2018.
792
+ URL https://www.computer.
793
+ org/csdl/proceedings-article/iv/2018/
794
+ 720200a471/17D45XvMcd9.
795
+ Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J.,
796
+ Jones, L., Gomez, A. N., Kaiser, Ł., and Polo-
797
+ sukhin, I.
798
+ Attention is all you need.
799
+ In Advances
800
+ in neural information processing systems, pp. 5998–
801
+ 6008,
802
+ 2017.
803
+ URL
804
+ https://proceedings.
805
+ neurips.cc/paper/2017/file/
806
+ 3f5ee243547dee91fbd053c1c4a845aa-Paper.
807
+ pdf.
808
+ Zheng,
809
+ Z.,
810
+ Zheng,
811
+ L.,
812
+ and Yang,
813
+ Y.
814
+ Unlabeled
815
+ samples generated by gan improve the person re-
816
+ identification baseline in vitro.
817
+ In Proceedings
818
+ of
819
+ the
820
+ IEEE
821
+ international
822
+ conference
823
+ on
824
+ com-
825
+ puter vision, pp. 3754–3762, 2017.
826
+ URL https:
827
+ //openaccess.thecvf.com/content_iccv_
828
+ 2017/html/Zheng_Unlabeled_Samples_
829
+ Generated_ICCV_2017_paper.html.
830
+
3dE0T4oBgHgl3EQfeACo/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,507 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf,len=506
2
+ page_content='Multi-Genre Music Transformer - Composing Full Length Musical Piece Abhinav Kaushal Keshari (Purdue University) Abstract In the task of generating music, the art factor plays a big role and is a great challenge for AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
3
+ page_content=' Previ- ous work involving adversarial training (Dong et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
4
+ page_content=', 2018) to produce new music pieces and modeling the compatibility (Huang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
5
+ page_content=', 2021) of variety in music (beats, tempo, musical stems) demonstrated great examples of learning this task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
6
+ page_content=' Though this was limited to generating mashups or learning features from tempo and key distri- butions to produce similar patterns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
7
+ page_content=' Compound Word Transformer (Hsiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
8
+ page_content=', 2021) was able to represent music generation task as a sequence generation challenge involving musical events de- fined by compound words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
9
+ page_content=' These musical events give a more accurate description of notes progres- sion, chord change, harmony and the art factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
10
+ page_content=' The objective of the project is to implement a Multi-Genre Transformer which learns to produce music pieces through more adaptive learning pro- cess involving more challenging task where gen- res or form of the composition is also considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
11
+ page_content=' We built a multi-genre compound word dataset, implemented a linear transformer (Katharopoulos et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
12
+ page_content=', 2020) which was trained on this dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
13
+ page_content=' We call this Multi-Genre Transformer, which was able to generate full length new musical pieces which is diverse and comparable to original tracks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
14
+ page_content=' The model trains 2-5 times faster than other mod- els discussed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
15
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
16
+ page_content=' Related Work Despite achieving great success in generation challenges using Artificial Intelligence in Natural Language Genera- tion (NLG) there is a factor of art that still makes them different from human like performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
17
+ page_content=' In terms of NLG we can relate it to something like the difference between computer generated article and a piece of art like novels, biography, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
18
+ page_content=' For music art factor always come into ac- count and despite able to produce musical compositions through Adversarial networks or mixing stems using super- vised learning the solution still is very different from an original piece of music which we discuss below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
19
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
20
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
21
+ page_content=' Music Generation using GANs Generative adversarial networks (GANs) have provided sig- nificant progress in producing text, videos and images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
22
+ page_content=' Sim- ilar efforts have been made to bring neural networks to artistic domain of music.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
23
+ page_content=' MuseGAN(Dong et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
24
+ page_content=', 2018) brought a novel model for generating multi-track music.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
25
+ page_content=' Until 2018, the progress in using AI to compose music had been able to produce Single-track (monophonic) music Multi-track (polyphonic) music by combining several monophonic melodies in chronological order Music usually being an art involving multiple instruments played together requires music to be multi-track and because music notes are made up of chords, arpeggios or melodies the idea of using a chronological order setting prevents it from being generalized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
26
+ page_content=' The paper(Dong et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
27
+ page_content=', 2018) address this challenge in gen- eralising real music by discussing current technical lacks in neural network models and how it relates to the real world music.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
28
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
29
+ page_content=' Music is an art of time and has characteristics of coher- ence, rhythm, tension and emotion flow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
30
+ page_content=' This requires it to have a Temporal Model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
31
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
32
+ page_content=' Music compositions usually involves different instru- ments interacting with one another making the compo- sitions to be harmonic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
33
+ page_content=' To solve this issue a Composer Model is required.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
34
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
35
+ page_content=' Musical notes are built of chords, arpeggios or melodies and how they unfold over time;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
36
+ page_content=' thus introduc- ing chronological generation of notes is not suitable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
37
+ page_content=' To address this the paper introduces using bars (seg- ment of time) instead of notes as the basic unit for composition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
38
+ page_content=' And then generate music bar by bar us- ing transposed convolutional neural networks to learn translation-invariant patterns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
39
+ page_content=' The paper(Dong et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
40
+ page_content=', 2018) makes contributions in terms of both ability to artificially compose realistic music and use of generative adversarial framework with temporal and composition models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
41
+ page_content=' In short the contributions are: arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
42
+ page_content='02385v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
43
+ page_content='SD] 6 Jan 2023 Multi-Genre Music Transformer - Composing Full Length Musical Piece First GAN based model for generating multi-track se- quence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
44
+ page_content=' First model which can generate multi-track polyphonic music.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
45
+ page_content=' Same model can be used as a music accompaniment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
46
+ page_content=' Creates a new Lakh Pianoroll Dataset (LPD) for multi- track piano-rolls For future work metrics in the domain of artificial mu- sic a new set of objective metrics are proposed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
47
+ page_content=' MuseGAN model proposed considers two sub-network gen- erator Gtemp (temporal structure generator) and Gbar (bar generator) making the overall generator: G(z) = � Gbar(Gtemp(z)(t)) �T t=1 where z is the input noise vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
48
+ page_content=' The strength of the model is the ability to generate samples having chord like inter- vals (learning features from temporal model) and melodies involving pitch overlap among guitar, piano and strings (learning features from composer model).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
49
+ page_content=' The model introduces multi-track by modeling interdepen- dency of tracks by proposing 3 different generator model (Jamming, Composer and Hybrid), but the author brings up these based on the understanding of pop music composition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
50
+ page_content=' This possibly restricts the generator to explore on a broad spectrum of music and prevents it from being generalised.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
51
+ page_content=' Also worth mentioning is that the work relies on multi-track interdependency, but misses to study about the compatibility of these tracks which can significantly increase the quality of music being generated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
52
+ page_content=' We will see this issue being addressed in the next paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
53
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
54
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
55
+ page_content=' Modeling the Compatibility of Stem Tracks to Generate Music Mashups(Huang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
56
+ page_content=', 2021) Source separation(Jansson et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
57
+ page_content=', 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
58
+ page_content=' D´efossez et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
59
+ page_content=', 2019) makes it possible to generate a music mashup with iso- lated stems like vocals, drums, piano, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
60
+ page_content=' The challenge lies in producing music which has compatibility between these stems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
61
+ page_content=' This paper creates a mashup generation pipeline and trains a model to predict the compatibility by automatically learning to adjust key and tempo (characteristics of quality mashups in real world).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
62
+ page_content=' General models trained for harmonic compatibility (Bernardes et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
63
+ page_content=', 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
64
+ page_content=' Macas et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
65
+ page_content=', 2018) fails to con- sider subtle features or surprise mixes of disparate samples which is quite common in this art domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
66
+ page_content=' Other issue that arises is audio compatibility models like Neural Loop Com- biner (Chen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
67
+ page_content=', 2020) having lack of vocal source and variety of genres.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
68
+ page_content=' The authors designed a self supervised learning model by recombining the original combination of stems before source separation to serve as examples of ground truth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
69
+ page_content=' To avoid highly polarized model, semi-supervised learning was introduced which included producing several random mashups by mixing different stems and treated them as unlabeled instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
70
+ page_content=' Label smoothing regularization for outliers (Zheng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
71
+ page_content=', 2017) was used to assign uniform distribution to the unlabeled data for loss computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
72
+ page_content=' This helps in regularization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
73
+ page_content=' The final architecture consists of 3 modules: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
74
+ page_content=' Music Source Separation: Uses MSS algorithm (Jansson et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
75
+ page_content=', 2017) to get different stems vocals, drums, bass and other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
76
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
77
+ page_content=' Mashup Database (MashupDB): Using Madmom (B¨ock et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
78
+ page_content=', 2016) different features from the music clips are extracted like key, tempo and downbeat in- formation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
79
+ page_content=' Using these features and separate stem combinations a mashup database is created which will act as either harmonic or percussion stem candidates for mashup generation process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
80
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
81
+ page_content=' Mashup Generation: It uses candidate stems from MashupDB and adjusts key and tempo to produce mashups within 3 conditions - original, matched and unmatched.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
82
+ page_content=' The model (Huang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
83
+ page_content=', 2021) is defined by p(y|V, H, P) where V , H, and P are input signals for respective stems vocal, harmonic, and percussion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
84
+ page_content=' The output probability p is used as the mashup compatibility and y ∈ {0, 1} stating good or bad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
85
+ page_content=' The model (Huang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
86
+ page_content=', 2021) implementation tries to mimic learning compatibility for producing new mashups and provides objective and subjective evaluation by cross validation among multiple different datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
87
+ page_content=' This technique becomes easier because of the ability of the model to ex- tract different stems and features and build its own mashup candidates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
88
+ page_content=' This also makes the model training process not dependent on human labeled data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
89
+ page_content=' The model is also ro- bust as negative data is added along with positive data for supervised learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
90
+ page_content=' The range of music coverage is also extensive and the source separation step makes it easier for the model to be extended to different genres for training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
91
+ page_content=' But the current model design lacks the effective embedding of different stems while producing a mashup and makes it dependent on tuning of key and tempo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
92
+ page_content=' Currently the implementation comes up with fixed range of key and tempo difference for compatibility and does not explain in detail how they came up with these numbers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
93
+ page_content=' Although defining a range prevents large pitch shifting and time stretching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
94
+ page_content=' Additionally the results of the model ranks positive labeled data (original) over unlabeled data which might lead to Multi-Genre Music Transformer - Composing Full Length Musical Piece concerns of flexibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
95
+ page_content=' Another major challenge of the model is the large training time which is around 3 days using an NVIDIA Tesla-V100 GPU whereas using transformer model significantly reduces the training time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
96
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
97
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
98
+ page_content=' Music Transformers With state-of-the art neural network we managed to learn features in music by defining certain rules on matching tempo, beats or compatibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
99
+ page_content=' In the previous paper we also tried to learn compatibility with the help of supervised learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
100
+ page_content=' The model though suffered with bias as compati- bility was favoured for matched key or tempo and also lacks generalization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
101
+ page_content=' Compound Word Transformer (Hsiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
102
+ page_content=', 2021) considers music as sequence of events and uses a Transformer (neural sequence model) (Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
103
+ page_content=', 2017) to generate a new musical sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
104
+ page_content=' A musical note can be described by note’s pitch, chord, bar, duration, velocity (dynamics), placement (onset time).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
105
+ page_content=' If we consider these as tokens we can then define music as sequence of tokens and these tokens are a part of pre-defined vocabulary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
106
+ page_content=' As music is multi-faceted a particular type of token can capture only a certain feature like melody, rhythm, harmony.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
107
+ page_content=' All the neural networks until now treated these tokens as equal and thus lacked heterogeneity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
108
+ page_content=' Compound Word Transformer (Hsiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
109
+ page_content=', 2021) generates music in a conceptually different way as it allows tokens to be of specific types and let them have their own proper- ties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
110
+ page_content=' Tokens can be of note type (pitch, duration) or metric type (beginning of new beat, bar).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
111
+ page_content=' We then defines a mu- sical event by combination of such tokens which allows to capture co-occurrence relationship among the tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
112
+ page_content=' This combination of tokens are termed as compound words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
113
+ page_content=' So, now we can represent a music piece (X) as a sequence (S) of compound words (cp) or S = g(X) = {cpt}T t=1 where g(.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
114
+ page_content=') is the conversion function to convert music into time- ordered sequence of musical events and T is the length of the music sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
115
+ page_content=' Theoretically, the model learns over discrete-time dynamic directed hypergraphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
116
+ page_content=' Consider a graph G = (V, E) (Figure 1) the vertices (V ) are tokens and edges (E) are sequence of token.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
117
+ page_content=' Collection of vertices can be defined as a compound word and hyperedge in this graph represents sequence of compound words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
118
+ page_content=' In figure 1 v1, v2, v5 are the tokens and the edge E1 defines a sequence of tokens whereas e1, e2 defines a hyperedge (connecting more than 2 nodes).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
119
+ page_content=' And transitioning from one hyperedge to another defines the sequence of composition words which we are trying to learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
120
+ page_content=' Using a transformer we are trying to learn the next musi- cal event or compound word (combination of tokens).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
121
+ page_content=' The self attention part of the transformer learns the dependency among the elements in musical sequence and different feed- Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
122
+ page_content=' Graphical Representation of Music Space forward head is used for tokens of different type.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
123
+ page_content=' In short the implementation groups tokens to form compound words and then perform sequence modeling in this sequence of compound words, the major contributions are: Compose pop-piano music of full song length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
124
+ page_content=' Compound word sequencing with linear transformer providing state-of-the-art results in terms of quality with 5-10x faster training and inference time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
125
+ page_content=' Music defined as Dynamic Directed Hypergraph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
126
+ page_content=' Generating a new musical event or a group of tokens to be combined as a compound word at each time step is the backbone of this model, but it relies on assuming that no two musical events can occur together.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
127
+ page_content=' The new hyperedge generated by the Transformer decoder marks other tokens as [ignore] once an event of a particular token type is detected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
128
+ page_content=' Can this limit the music generation task?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
129
+ page_content=' Additionally the model is trained using only pop music which limits the expressing power of the transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
130
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
131
+ page_content=' Implementation Compound Word Transformer (Hsiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
132
+ page_content=', 2021) was able to represent music generation task as a sequence generation challenge involving musical events defined by compound words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
133
+ page_content=' Leveraging this representation we implement a neu- ral model which learns to produce music pieces through more adaptive learning process involving more challenging task where genres or form of the composition is also con- sidered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
134
+ page_content=' This adds the richness of music art in the learning process of attention driven sequential learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
135
+ page_content=' We will call this model Multi-Genre Music Transformer and following are the steps involved for implementing this: Building Dataset: This involves generating compound word dictionary for songs of different genres.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
136
+ page_content=' Pitch Duration v1 v2 Velocity e1 EA Chord Beat e2 v5Multi-Genre Music Transformer - Composing Full Length Musical Piece Implementing Transformer Model: We implement our Transformer class, the training steps and the gener- ation logic for inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
137
+ page_content=' Adaptive Learning: We allow our tuned model to be adaptable by training on a smaller and multi-genre dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
138
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
139
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
140
+ page_content=' Building Dataset To be able to provide a more generalised learning process for our transformer it needs to be trained with a piano roll dataset involving musical pieces of variety of genres/style.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
141
+ page_content=' The dataset should be based on compound words (Hsiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
142
+ page_content=', 2021) to represent different musical tokens as a com- bined unit for sequence modeling which is different from traditional musical dataset (MIDI, REMI).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
143
+ page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
144
+ page_content=' Dataset Building Pipeline This required us to build a dataset by selecting music clip- pings and converting them to piano roll using Onsets and Frames (Hawthorne et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
145
+ page_content=', 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
146
+ page_content=' Extracting downbeat and beat information from these songs using madmom, a mu- sic signal processing library (B¨ock et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
147
+ page_content=', 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
148
+ page_content=' Finally representing these metadata into a compound word repre- sentation using the dataset generation scripts provided in the compound word transformer repository1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
149
+ page_content=' This also adds on to the AILabs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
150
+ page_content='tw Pop1K7 dataset (Hsiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
151
+ page_content=', 2021) which currently only includes pop music.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
152
+ page_content=' Figure 2 demonstrates the pipeline for creating a new dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
153
+ page_content=' Following the pipeline above we managed to create a Com- pound Word (Hsiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
154
+ page_content=', 2021) dataset which involved 1https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
155
+ page_content='com/YatingMusic/compound-word- transformer/blob/main/dataset/Dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
156
+ page_content='md piano roll for 150 musical pieces from 3 different genres including Electronic Dance Music (EDM), Indie and Hip- Hop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
157
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
158
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
159
+ page_content=' Implementing Transformer Model We implement a linear transformer(Katharopoulos et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
160
+ page_content=', 2020) to address long sequence dependency which is a very relevant factor in music generation due to the presence of a context or a rhythm in the entire musical piece.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
161
+ page_content=' Hav- ing an independent feed-forward head in the Transformer Decoder allows to improve the loss of independent tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
162
+ page_content=' This allows the model to scale for additional perspective (like genre, form or involving a particular chord progres- sion) in the music by adding an additional token type.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
163
+ page_content=' We implement our transformer model in a generic way which allows user to define its own token sampling model, token embedding model and these can be scalable for any number of token types.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
164
+ page_content=' The loss observed at each feed-forward head is shown in Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
165
+ page_content=' This shows adding a new token (for genre/style/form) for model to learn can be simply achieved by adding an independent feed-forward head for the same.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
166
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
167
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
168
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
169
+ page_content=' TOKEN EMBEDDING Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
170
+ page_content=' Demonstrates how each token undergoes independent embedding before combining with Positional Encoding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
171
+ page_content=' Here T1, T2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
172
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
173
+ page_content='Tk are K different tokens for our Transformer each having its own embedding function and dimension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
174
+ page_content=' We are assuming the Transformer supports K type of tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
175
+ page_content=' The input to a transformer requires positional encoding added to the embedding vector of our input sequence el- ements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
176
+ page_content=' As each element in our sequence is a compound word (Hsiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
177
+ page_content=', 2021) which is combined of different tokens, we embed each token separately (allowing to have adaptive size) and then concatenate them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
178
+ page_content=' Having an adap- ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
179
+ page_content='Youtube ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
180
+ page_content='WAV Audio ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
181
+ page_content='MP3 Audio ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
182
+ page_content='Files ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
183
+ page_content='Files ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
184
+ page_content='Onsets and Frames ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
185
+ page_content='Madmom ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
186
+ page_content='Piano Transcription ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
187
+ page_content='Beat Tracking ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
188
+ page_content='Compound Word Transformer ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
189
+ page_content='Scripts ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
190
+ page_content='Training DataPositional ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
191
+ page_content='Transformer Input ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
192
+ page_content='Emedding ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
193
+ page_content='Feed-Forward Layer ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
194
+ page_content='Concatenate ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
195
+ page_content='T1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
196
+ page_content='T2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
197
+ page_content='Embedding ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
198
+ page_content='Embedding ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
199
+ page_content='Embedding ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
200
+ page_content='TK ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
201
+ page_content='Compound WordMulti-Genre Music Transformer - Composing Full Length Musical Piece ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
202
+ page_content='tive token size allows to use smaller embedding dimension ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
203
+ page_content='for a token type with smaller vocabulary and when we con- ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
204
+ page_content='catenate all of these we get an embedding dimension of 512 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
205
+ page_content='for our model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
206
+ page_content=' Refer to Figure 3 for detailed steps of token embedding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
207
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
208
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
209
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
210
+ page_content=' TOKEN SAMPLING For inference, sampling plays a crucial role to avoid degen- eration and improve diversity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
211
+ page_content=' To avoid degeneration we follow Nucleus Sampling (Holtzman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
212
+ page_content=', 2019), which is a stochastic temperature controlled process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
213
+ page_content=' This method samples from the smallest subset of tokens whose cumu- lative probability mass exceeds a threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
214
+ page_content=' We also had each token to have a separate sampling policy by defining different threshold p and different temperature parameter τ (Ackley et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
215
+ page_content=', 1985) for reshaping the probability be- fore sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
216
+ page_content=' We reused the inference implementation from Compound Word Transformer (Hsiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
217
+ page_content=', 2021) and tweaked τ to have higher values for chord to allow more diverse chord progressions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
218
+ page_content=' Figure 4 shows the sampling process and individual feed-forward layer for each token in the transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
219
+ page_content=' Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
220
+ page_content=' Transformer with N self-attention layers and independent feed-forward head for each token.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
221
+ page_content=' We first predict the Token Type for the particular time-step and then perform a nucleus sampling before predicting the remaining tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
222
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
223
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
224
+ page_content=' Adaptive Learning After defining the model, the next important step is to imple- ment the training steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
225
+ page_content=' To support scalable token definition in our generalised transformer we make the training steps modular and general to variable number of token types.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
226
+ page_content=' This allows easy addition of a new token and independently mon- itor gradient descent optimization for the respective loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
227
+ page_content=' We trained our model in parallel for 2 different conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
228
+ page_content=' The first set of training was performed on the original AIL- abs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
229
+ page_content='tw Pop1K7 dataset (Hsiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
230
+ page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
231
+ page_content=' The second set of training took into consideration to provide multi-genre learning environment for the transformer as it involved train- ing on a dictionary that was generated from 3 different genres (EDM, Indie, Hip-Hop).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
232
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
233
+ page_content=' Evaluation and Results To train a multi-genre transformer the primary objective was to provide it with a dataset which is richer in variety than the original pop only dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
234
+ page_content=' With the help of dataset building pipeline we managed to create a token set which has a higher variance allowing the model to have a broader expressive power.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
235
+ page_content=' Figure 5 shows the comparison of tokens between the 2 datasets used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
236
+ page_content=' Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
237
+ page_content=' Left image shows token distributions for the songs in the generated multi-genre dataset and the right image shows similar distribution for AILabs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
238
+ page_content='tw Pop1K7 dataset (Hsiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
239
+ page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
240
+ page_content=' After training the model for both the datasets we also ob- serve (refer to Figure 6) the individual token loss and total average loss is similar and indicates the model converging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
241
+ page_content=' Additionally, the gradient descent is more gradual using the multi-genre dataset displaying a more settled progression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
242
+ page_content=' We trained the model with 12 self-attentions layers, 8 feed- forward heads with model dimension of 512 and batch size of 4 for 180 epochs which took around 17hrs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
243
+ page_content=' Then using the trained model we generated 20 new full length musical pieces with an average inference time of 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
244
+ page_content='56sec/song which is faster than the compound-word transformer though having slightly less number of average tokens per song.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
245
+ page_content=' Table 1 shows a more detailed comparison.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
246
+ page_content=' T(k-1) Feed-Forward Layer Feed-Forward Layer Feed-Forward Layer Nucleus Sampling Type Token Feed-Forward Layer T h Layer 1 Layer 2 Layer N Self-Attention Layersmean:2342.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
247
+ page_content='926std:1194.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
248
+ page_content='481 mean:2138.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
249
+ page_content='370_std:775.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
250
+ page_content='472 250 10 200 8 Number of 150 songs 6 Number of songs 100 4 2 50 +0 0 0 1000 2000 3000 4000 5000 6000 7000 2000 4000 6000 8000 Number of Tokens Number of TokensMulti-Genre Music Transformer - Composing Full Length Musical Piece Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
251
+ page_content=' Loss vs Epoch for different token types.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
252
+ page_content=' The last plot corresponds to the average loss for all different token types.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
253
+ page_content=' For a qualitative evaluation of the musical pieces that were produced we compare (Figure 7) the piano rolls of these with the piano rolls of original tracks that were used to train the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
254
+ page_content=' Original Songs Generated Songs Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
255
+ page_content=' Piano roll of original and generated songs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
256
+ page_content=' We can see a rich and complete content for the generated songs similar to some original tracks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
257
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
258
+ page_content=' Conclusion In this project we produce music as a sequence of musical events produced by a trained Transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
259
+ page_content=' We leverage the definition of Compound Word (Hsiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
260
+ page_content=', 2021) to define musical event by grouping multiple tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
261
+ page_content=' This grouping greatly reduces the size of our sequence and boosts long- range learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
262
+ page_content=' This also reduces the training and inference time for our model remarkably.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
263
+ page_content=' We also exploit the feature of each token having its independent feed-forward head for prediction to make the model scalable for new token types that can be introduced in our dictionary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
264
+ page_content=' This allows to add any new token for this transformer very easily which can be used for musical form, chord progression, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
265
+ page_content=' Additionally, we created an entire new dataset consisting of multi-genre compound word dictionary and trained our model with this to provide it a more adaptive learning environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
266
+ page_content=' The compositions that were generated were highly rich in musi- cal events and were of good quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
267
+ page_content=' Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
268
+ page_content=' Quantitative evaluation results for Multi-Genre Transformer and Compound Word Transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
269
+ page_content=' Results for Compound Word Transformer comes from the implementation in the paper (Hsiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
270
+ page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
271
+ page_content=' MODEL TRAINING TIME GPU INFERENCE TIME (/SONG) AVG TOKENS (/SONG) MULTI-GENRE TRANSFORMER 17 HRS 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
272
+ page_content='8GB 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
273
+ page_content='56 SEC 9190 COMPOUND TRANSFORMER 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
274
+ page_content='3 DAYS 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
275
+ page_content='5GB 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
276
+ page_content='8 SEC 9546 tempo loss vs epoch chord loss vs epoch bar-beat loss vs epoch type loss vs epoch Pop Dataset 14 Pop Dataset 14 Pop Dataset Pop Dataset 14 Multi-Genre Dataset Multi-Genre Dataset Multi-Genre Dataset 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
277
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
278
+ page_content='6 Multi-Genre Dataset 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
279
+ page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
280
+ page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
281
+ page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
282
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
283
+ page_content='5 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
284
+ page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
285
+ page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
286
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
287
+ page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
288
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
289
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
290
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
291
+ page_content="4 E'O 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
292
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
293
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
294
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
295
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
296
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
297
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
298
+ page_content='0 75100125150175 75100125150175 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
299
+ page_content='1 0 25 50 75100125 150175 25 50 0 25 50 0 25 50 75100125150175 pitch loss vs epoch duration loss vs epoch velocity loss vs epoch average loss vs epoch OE Pop Dataset 18 Pop Dataset 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
300
+ page_content='8 Pop Dataset 16 PopDataset Multi-Genre Dataset Multi-Genre Dataset Multi-Genre Dataset Multi-Genre Dataset 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
301
+ page_content='5 16 16 14 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
302
+ page_content='0 14 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
303
+ page_content='2 14 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
304
+ page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
305
+ page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
306
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
307
+ page_content='2 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
308
+ page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
309
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
310
+ page_content='8 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
311
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
312
+ page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
313
+ page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
314
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
315
+ page_content='6 0 25 50 100 150 175 25 50 125 150 175 0 50 75 100125 150 175 0 0 50 125150175.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
316
+ page_content=': 84 (ow) ".' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
317
+ page_content=' (ow) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
318
+ page_content=' 72 72 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
319
+ page_content=' 60 60 48 36 LLLL 36 : 24 0 20 40 60 80 100 120 140 160 0 50 100 150 200 time (sec) time (sec)96 (aw) 72 72 itch 60 60 48 8 96 36 0 50 100 150 0 50 100 150 200 time (sec) time (sec)Multi-Genre Music Transformer - Composing Full Length Musical Piece References Ackley, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
320
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
321
+ page_content=', Hinton, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
322
+ page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
323
+ page_content=', and Sejnowski, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
324
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
325
+ page_content=' A learning algorithm for boltzmann machines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
326
+ page_content=' Cog- nitive science, 9(1):147–169, 1985.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
327
+ page_content=' URL https: //www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
328
+ page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
329
+ page_content='com/science/ article/abs/pii/S0364021385800124.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
330
+ page_content=' Bernardes, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
331
+ page_content=', Davies, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
332
+ page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
333
+ page_content=', and Guedes, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
334
+ page_content=' A hierarchical harmonic mixing method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
335
+ page_content=' In Inter- national Symposium on Computer Music Multidis- ciplinary Research, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
336
+ page_content=' 151–170.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
337
+ page_content=' Springer, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
338
+ page_content=' URL https://link.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
339
+ page_content='springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
340
+ page_content='com/chapter/ 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
341
+ page_content='1007/978-3-030-01692-0_11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
342
+ page_content=' B¨ock, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
343
+ page_content=', Korzeniowski, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
344
+ page_content=', Schl¨uter, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
345
+ page_content=', Krebs, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
346
+ page_content=', and Widmer, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
347
+ page_content=' Madmom: A new python audio and music signal processing library.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
348
+ page_content=' In Proceedings of the 24th ACM international conference on Multimedia, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
349
+ page_content=' 1174–1178, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
350
+ page_content=' URL https://dl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
351
+ page_content='acm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
352
+ page_content='org/doi/abs/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
353
+ page_content=' 1145/2964284.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
354
+ page_content='2973795.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
355
+ page_content=' Chen, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
356
+ page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
357
+ page_content=', Smith, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
358
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
359
+ page_content=', and Yang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
360
+ page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
361
+ page_content=' Neural loop com- biner: Neural network models for assessing the compati- bility of loops.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
362
+ page_content=' arXiv preprint arXiv:2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
363
+ page_content='02011, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
364
+ page_content=' URL https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
365
+ page_content='org/abs/2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
366
+ page_content='02011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
367
+ page_content=' D´efossez, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
368
+ page_content=', Usunier, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
369
+ page_content=', Bottou, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
370
+ page_content=', and Bach, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
371
+ page_content=' Mu- sic source separation in the waveform domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
372
+ page_content=' arXiv preprint arXiv:1911.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
373
+ page_content='13254, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
374
+ page_content=' URL https:// arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
375
+ page_content='org/abs/1911.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
376
+ page_content='13254.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
377
+ page_content=' Dong, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
378
+ page_content='-W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
379
+ page_content=', Hsiao, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
380
+ page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
381
+ page_content=', Yang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
382
+ page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
383
+ page_content=', and Yang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
384
+ page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
385
+ page_content=' Musegan: Multi-track sequential generative adversarial networks for symbolic music genera- tion and accompaniment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
386
+ page_content=' In Thirty-Second AAAI Conference on Artificial Intelligence, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
387
+ page_content=' URL https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
388
+ page_content='aaai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
389
+ page_content='org/ocs/index.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
390
+ page_content='php/ AAAI/AAAI18/paper/viewPaper/17286.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
391
+ page_content=' Hawthorne, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
392
+ page_content=', Elsen, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
393
+ page_content=', Song, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
394
+ page_content=', Roberts, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
395
+ page_content=', Simon, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
396
+ page_content=', Raffel, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
397
+ page_content=', Engel, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
398
+ page_content=', Oore, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
399
+ page_content=', and Eck, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
400
+ page_content=' Onsets and frames: Dual-objective piano transcription.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
401
+ page_content=' arXiv preprint arXiv:1710.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
402
+ page_content='11153, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
403
+ page_content=' URL https:// arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
404
+ page_content='org/abs/1710.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
405
+ page_content='11153.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
406
+ page_content=' Holtzman, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
407
+ page_content=', Buys, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
408
+ page_content=', Du, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
409
+ page_content=', Forbes, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
410
+ page_content=', and Choi, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
411
+ page_content=' The curious case of neural text degeneration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
412
+ page_content=' In International Conference on Learning Representations, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
413
+ page_content=' URL https://openreview.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
414
+ page_content='net/forum?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
415
+ page_content=' id=rygGQyrFvH.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
416
+ page_content=' Hsiao, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
417
+ page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
418
+ page_content=', Liu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
419
+ page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
420
+ page_content=', Yeh, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
421
+ page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
422
+ page_content=', and Yang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
423
+ page_content='- H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
424
+ page_content=' Compound word transformer: Learning to com- pose full-song music over dynamic directed hyper- graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
425
+ page_content=' In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
426
+ page_content=' 178–186, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
427
+ page_content=' URL https://ojs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
428
+ page_content='aaai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
429
+ page_content='org/index.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
430
+ page_content=' php/AAAI/article/view/16091.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
431
+ page_content=' Huang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
432
+ page_content=', Wang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
433
+ page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
434
+ page_content=', Smith, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
435
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
436
+ page_content=', Song, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
437
+ page_content=', and Wang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
438
+ page_content=' Modeling the compatibility of stem tracks to gener- ate music mashups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
439
+ page_content=' In Proceedings of the AAAI Con- ference on Artificial Intelligence, volume 35, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
440
+ page_content=' 187– 195, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
441
+ page_content=' URL https://ojs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
442
+ page_content='aaai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
443
+ page_content='org/index.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
444
+ page_content=' php/AAAI/article/view/16092.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
445
+ page_content=' Jansson, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
446
+ page_content=', Humphrey, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
447
+ page_content=', Montecchio, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
448
+ page_content=', Bittner, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
449
+ page_content=', Kumar, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
450
+ page_content=', and Weyde, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
451
+ page_content=' Singing voice separation with deep u-net convolutional networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
452
+ page_content=' In 18th International Society for Music Information Retrieval Conference, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
453
+ page_content=' 23–27, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
454
+ page_content=' URL https://openaccess.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
455
+ page_content=' city.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
456
+ page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
457
+ page_content='uk/id/eprint/19289/1/ 7bb8d1600fba70dd79408775cd0c37a4ff62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
458
+ page_content=' pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
459
+ page_content=' Katharopoulos, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
460
+ page_content=', Vyas, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
461
+ page_content=', Pappas, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
462
+ page_content=', and Fleuret, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
463
+ page_content=' Transformers are rnns: Fast autoregressive transform- ers with linear attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
464
+ page_content=' In International Conference on Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
465
+ page_content=' 5156–5165.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
466
+ page_content=' PMLR, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
467
+ page_content=' URL http://proceedings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
468
+ page_content='mlr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
469
+ page_content='press/v119/ katharopoulos20a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
470
+ page_content='html.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
471
+ page_content=' Macas, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
472
+ page_content=', Rodrigues, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
473
+ page_content=', Bernardes, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
474
+ page_content=', and Machado, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
475
+ page_content=' Mixmash: A visualisation system for musical mashup creation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
476
+ page_content=' In 2018 22nd International Conference Infor- mation Visualisation (IV), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
477
+ page_content=' 471–477.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
478
+ page_content=' IEEE Computer Society, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
479
+ page_content=' URL https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
480
+ page_content='computer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
481
+ page_content=' org/csdl/proceedings-article/iv/2018/ 720200a471/17D45XvMcd9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
482
+ page_content=' Vaswani, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
483
+ page_content=', Shazeer, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
484
+ page_content=', Parmar, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
485
+ page_content=', Uszkoreit, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
486
+ page_content=', Jones, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
487
+ page_content=', Gomez, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
488
+ page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
489
+ page_content=', Kaiser, Ł.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
490
+ page_content=', and Polo- sukhin, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
491
+ page_content=' Attention is all you need.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
492
+ page_content=' In Advances in neural information processing systems, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
493
+ page_content=' 5998– 6008, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
494
+ page_content=' URL https://proceedings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
495
+ page_content=' neurips.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
496
+ page_content='cc/paper/2017/file/ 3f5ee243547dee91fbd053c1c4a845aa-Paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
497
+ page_content=' pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
498
+ page_content=' Zheng, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
499
+ page_content=', Zheng, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
500
+ page_content=', and Yang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
501
+ page_content=' Unlabeled samples generated by gan improve the person re- identification baseline in vitro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
502
+ page_content=' In Proceedings of the IEEE international conference on com- puter vision, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
503
+ page_content=' 3754–3762, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
504
+ page_content=' URL https: //openaccess.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
505
+ page_content='thecvf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
506
+ page_content='com/content_iccv_ 2017/html/Zheng_Unlabeled_Samples_ Generated_ICCV_2017_paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
507
+ page_content='html.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3dE0T4oBgHgl3EQfeACo/content/2301.02385v1.pdf'}
3dE3T4oBgHgl3EQfoQqU/content/tmp_files/2301.04632v1.pdf.txt ADDED
@@ -0,0 +1,1845 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Federated Learning under Heterogeneous and
2
+ Correlated Client Availability
3
+ Angelo Rodio∗, Francescomaria Faticanti∗, Othmane Marfoq∗†, Giovanni Neglia∗, Emilio Leonardi‡
4
+ ∗Inria, Universit´e Cˆote d’Azur, France. Email: {firstname.lastname}@inria.fr,
5
+ †Accenture Labs, Sophia-Antipolis, France. Email: {firstname.lastname}@accenture.com,
6
+ ‡Politecnico di Torino, Turin, Italy. Email: {firstname.lastname}@polito.it
7
+ Abstract—The enormous amount of data produced by mobile and
8
+ IoT devices has motivated the development of federated learning
9
+ (FL), a framework allowing such devices (or clients) to collabora-
10
+ tively train machine learning models without sharing their local
11
+ data. FL algorithms (like FedAvg) iteratively aggregate model
12
+ updates computed by clients on their own datasets. Clients may
13
+ exhibit different levels of participation, often correlated over time
14
+ and with other clients. This paper presents the first convergence
15
+ analysis for a FedAvg-like FL algorithm under heterogeneous
16
+ and correlated client availability. Our analysis highlights how
17
+ correlation adversely affects the algorithm’s convergence rate
18
+ and how the aggregation strategy can alleviate this effect at
19
+ the cost of steering training toward a biased model. Guided
20
+ by the theoretical analysis, we propose CA-Fed, a new FL
21
+ algorithm that tries to balance the conflicting goals of maximizing
22
+ convergence speed and minimizing model bias. To this purpose,
23
+ CA-Fed dynamically adapts the weight given to each client and
24
+ may ignore clients with low availability and large correlation. Our
25
+ experimental results show that CA-Fed achieves higher time-
26
+ average accuracy and a lower standard deviation than state-of-
27
+ the-art AdaFed and F3AST, both on synthetic and real datasets.
28
+ Index Terms—Federated Learning, Distributed Optimization.
29
+ I. INTRODUCTION
30
+ The enormous amount of data generated by mobile and IoT de-
31
+ vices motivated the emergence of distributed machine learning
32
+ training paradigms [1], [2]. Federated Learning (FL) [3]–[6]
33
+ is an emerging framework where geographically distributed
34
+ devices (or clients) participate in the training of a shared
35
+ Machine Learning (ML) model without sharing their local
36
+ data. FL was proposed to reduce the overall cost of collecting
37
+ a large amount of data as well as to protect potentially
38
+ sensitive users’ private information. In the original Federated
39
+ Averaging algorithm (FedAvg) [4], a central server selects
40
+ a random subset of clients from the set of available clients
41
+ and broadcasts them the shared model. The sampled clients
42
+ perform a number of independent Stochastic Gradient Descent
43
+ (SGD) steps over their local datasets and send their local
44
+ model updates back to the server. Then, the server aggregates
45
+ the received client updates to produce a new global model, and
46
+ a new training round begins. At each iteration of FedAvg, the
47
+ server typically samples randomly a few hundred devices to
48
+ participate [7], [8].
49
+ This research was supported by the French government through the 3IA
50
+ Cˆote d’Azur Investments in the Future project by the National Research
51
+ Agency (ANR) with reference ANR-19-P3IA-0002, and by Groupe La Poste,
52
+ sponsor of Inria Foundation, in the framework of FedMalin Inria Challenge.
53
+ A first version of this work has been accepted at IEEE INFOCOM 2023.
54
+ In real-world scenarios, the availability/activity of clients is
55
+ dictated by exogenous factors that are beyond the control of
56
+ the orchestrating server and hard to predict. For instance, only
57
+ smartphones that are idle, under charge, and connected to
58
+ broadband networks are commonly allowed to participate in
59
+ the training process [4], [9]. These eligibility requirements can
60
+ make the availability of devices correlated over time and space
61
+ [7], [10]–[12]. For example, temporal correlation may origin
62
+ from a smartphone being under charge for a few consecutive
63
+ hours and then ineligible for the rest of the day. Similarly,
64
+ the activity of a sensor powered by renewable energy may
65
+ depend on natural phenomena intrinsically correlated over
66
+ time (e.g., solar light). Spatial correlation refers instead to
67
+ correlation across different clients, which often emerges as
68
+ consequence of users’ different geographical distribution. For
69
+ instance, clients in the same time zone often exhibit similar
70
+ availability patterns, e.g., due to time-of-day effects.
71
+ Temporal correlation in the data sampling procedure is known
72
+ to negatively affect the performance of ML training even in
73
+ the centralized setting [13], [14] and can potentially lead to
74
+ catastrophic forgetting: the data used during the final training
75
+ phases can have a disproportionate effect on the final model,
76
+ “erasing” the memory of previously learned information [15],
77
+ [16]. Catastrophic forgetting has also been observed in FL,
78
+ where clients in the same geographical area have more similar
79
+ local data distributions and clients’ participation follows a
80
+ cyclic daily pattern (leading to spatial correlation) [7], [10],
81
+ [11], [17]. Despite this evidence, a theoretical study of the
82
+ convergence of FL algorithms under both temporally and
83
+ spatially correlated client participation is still missing.
84
+ This
85
+ paper
86
+ provides
87
+ the
88
+ first
89
+ convergence
90
+ analysis
91
+ of
92
+ FedAvg [4] under heterogeneous and correlated client avail-
93
+ ability. We assume that clients’ temporal and spatial availabil-
94
+ ity follows an arbitrary finite-state Markov chain: this assump-
95
+ tion models a realistic scenario in which the activity of clients
96
+ is correlated and, at the same time, still allows the analytical
97
+ tractability of the system. Our theoretical analysis (i) quantifies
98
+ the negative effect of correlation on the algorithm’s conver-
99
+ gence rate through an additional term, which depends on the
100
+ spectral properties of the Markov chain; (ii) points out a trade-
101
+ off between two conflicting objectives: slow convergence to
102
+ the optimal model, or fast convergence to a biased model, i.e.,
103
+ a model that minimizes an objective function different from the
104
+ initial target. Guided by insights from the theoretical analysis,
105
+ 1
106
+ arXiv:2301.04632v1 [cs.LG] 11 Jan 2023
107
+
108
+ we propose CA-Fed, an algorithm which dynamically assigns
109
+ weights to clients and achieves a good trade-off between
110
+ maximizing convergence speed and minimizing model bias.
111
+ Interestingly, CA-Fed can decide to ignore clients with low
112
+ availability and high temporal correlation. Our experimental
113
+ results demonstrate that excluding such clients is a simple, but
114
+ effective approach to handle the heterogeneous and correlated
115
+ client availability in FL. Indeed, while CA-Fed achieves a
116
+ comparable maximum accuracy as the state-of-the-art methods
117
+ F3AST [18] and AdaFed [19], its test accuracy exhibits
118
+ higher time-average and smaller variability over time.
119
+ The remainder of this paper is organized as follows. Section II
120
+ describes the problem of correlated client availability in FL
121
+ and discusses the main related works. Section III provides
122
+ a convergence analysis of FedAvg under heterogeneous and
123
+ correlated client participation. CA-Fed, our correlation-aware
124
+ FL algorithm, is presented in Section IV. We evaluate CA-Fed
125
+ in Section V, comparing it with state-of-the-art methods on
126
+ synthetic and real-world data. Section VII concludes the paper.
127
+ II. BACKGROUND AND RELATED WORKS
128
+ We consider a finite set K of N clients. Each client k ∈ K
129
+ holds a local dataset Dk. Clients aim to jointly learn the
130
+ parameters w ∈ W ⊆ Rd of a global ML model (e.g., the
131
+ weights of a neural network architecture). During training, the
132
+ quality of the model with parameters w on a data sample
133
+ ξ ∈ Dk is measured by a loss function f(w; ξ). The clients
134
+ solve, under the orchestration of a central server, the following
135
+ optimization problem:
136
+ min
137
+ w∈W ⊆Rd
138
+
139
+ F(w) :=
140
+
141
+ k∈K
142
+ αkFk(w)
143
+
144
+ ,
145
+ (1)
146
+ where Fk(w) :=
147
+ 1
148
+ |Dk|
149
+
150
+ ξ∈Dk f(w; ξ) is the average loss
151
+ computed on client k’s local dataset, and α = (αk)k∈K are
152
+ positive coefficients such that �
153
+ k αk = 1. They represent
154
+ the target importance assigned by the central server to each
155
+ client k. Typically (αk)k∈K are set proportional to the clients’
156
+ dataset size |Dk|, such that the objective function F in (1)
157
+ coincides with the average loss computed on the union of the
158
+ clients’ local datasets D = ∪k∈KDk.
159
+ Under proper assumptions, precised in Section III, Problem (1)
160
+ admits a unique solution. We use w∗ (resp. F ∗) to denote
161
+ the minimizer (resp. the minimum value) of F. Moreover, for
162
+ k∈K, Fk admits a unique minimizer on W. We use w∗
163
+ k (resp.
164
+ F ∗
165
+ k ) to denote the minimizer (resp. the minimum value) of Fk.
166
+ Problem (1) is commonly solved through iterative algo-
167
+ rithms [4], [8] requiring multiple communication rounds be-
168
+ tween the server and the clients. At round t > 0, the server
169
+ broadcasts the latest estimate of the global model wt,0 to
170
+ the set of available clients (At). Client k ∈ At updates the
171
+ global model with its local data through E ≥ 1 steps of local
172
+ Stochastic Gradient Descent (SGD):
173
+ wk
174
+ t,j+1 = wk
175
+ t,j − ηt∇Fk(wk
176
+ t,j, Bk
177
+ t,j)
178
+ j = 0, . . . , E − 1, (2)
179
+ where ηt
180
+ > 0 is an appropriately chosen learning rate,
181
+ referred to as local learning rate; Bk
182
+ t,j is a random batch
183
+ sampled from client k’ local dataset at round t and step j;
184
+ ∇Fk(·, B) :=
185
+ 1
186
+ |B|
187
+
188
+ ξ∈B ∇f(·, ξ) is an unbiased estimator of
189
+ the local gradient ∇Fk. Then, each client sends its local model
190
+ update ∆k
191
+ t := wk
192
+ t,E − wk
193
+ t,0 to the server. The server computes
194
+ ∆t := �
195
+ k∈At qk ·∆k
196
+ t , a weighted average of the clients’ local
197
+ updates with non-negative aggregation weights q = (qk)k∈K.
198
+ The choice of the aggregation weights defines an aggregation
199
+ strategy (we will discuss different aggregation strategies later).
200
+ The aggregated update ∆t can be interpreted as a proxy for
201
+ −∇F(wt,0); the server applies it to the global model:
202
+ wt+1,0 = ProjW (wt,0 + ηs · ∆t)
203
+ (3)
204
+ where ProjW (·) denotes the projection over the set W, and
205
+ ηs > 0 is an appropriately chosen learning rate, referred to as
206
+ the server learning rate.1
207
+ The aggregate update ∆t is, in general, a biased estimator
208
+ of −∇F(wt,0), where each client k is taken into account
209
+ proportionally to its frequency of appearance in the set At and
210
+ to its aggregation weight qk. Indeed, under proper assumptions
211
+ specified in Section III, one can show (see Theorem 2) that the
212
+ update rule described by (2) and (3) converges to the unique
213
+ minimizer of a biased global objective FB, which depends
214
+ both on the clients’ availability (i.e., on the sequence (At)t>0)
215
+ and on the aggregation strategy (i.e., on q = (qk)k∈K):
216
+ FB(w) := �N
217
+ k=1 pkFk(w), with pk :=
218
+ πkqk
219
+ �N
220
+ h=1 πhqh ,
221
+ (4)
222
+ where πk := limt→∞ P(k ∈ At) is the asymptotic availability
223
+ of client k. The coefficients p = (pk)k∈K can be interpreted
224
+ as the biased importance the server is giving to each client k
225
+ during training, in general different from the target importance
226
+ α. In what follows, w∗
227
+ B (resp. F ∗
228
+ B) denotes the minimizer
229
+ (resp. the minimum value) of FB.
230
+ In some large-scale FL applications, like training Google
231
+ keyboard next-word prediction models, each client participates
232
+ in training at most for one round. The orchestrator usually
233
+ selects a few hundred clients at each round for a few thousand
234
+ rounds (e.g., see [5, Table 2]), but the available set of clients
235
+ may include hundreds of millions of Android devices. In this
236
+ scenario, it is difficult to address the potential bias unless there
237
+ is some a-priori information about each client’s availability.
238
+ Anyway, FL can be used by service providers with access
239
+ to a much smaller set of clients (e.g., smartphone users that
240
+ have installed a specific app). In this case, a client participates
241
+ multiple times in training: the orchestrating server may keep
242
+ track of each client’s availability and try to compensate for
243
+ the potentially dangerous heterogeneity in their participation.
244
+ Much previous effort on federated learning [4], [17]–[19],
245
+ [22]–[25] considered this problem and, under different as-
246
+ 1The aggregation rule (3) has been considered also in other works, e.g., [8],
247
+ [20], [21]. In other FL algorithms, the server computes an average of clients’
248
+ local models. This aggregation rule can be obtained with minor changes to (3).
249
+ 2
250
+
251
+ sumptions on the clients’ availability (i.e., on (At)t>0), de-
252
+ signed aggregation strategies that unbias ∆t through an appro-
253
+ priate choice of q. Reference [22] provides the first analysis of
254
+ FedAvg on non-iid data under clients’ partial participation.
255
+ Their analysis covers both the case when active clients are
256
+ sampled uniformly at random without replacement from K and
257
+ assigned aggregation weights equal to their target importance
258
+ (as assumed in [4]), and the case when active clients are
259
+ sampled iid with replacement from K with probabilities α
260
+ and assigned equal weights (as assumed in [23]). However,
261
+ references [4], [22], [23] ignore the variance induced by the
262
+ clients stochastic availability. The authors of [24] reduce such
263
+ variance by considering only the clients with important up-
264
+ dates, as measured by the value of their norm. References [17]
265
+ and [25] reduce the aggregation variance through clustered and
266
+ soft-clustered sampling, respectively.
267
+ Some recent works [18], [19], [26] do not actively pursue the
268
+ optimization of the unbiased objective. Instead, they derive
269
+ bounds for the convergence error and propose heuristics to
270
+ minimize those bounds, potentially introducing some bias.
271
+ Our work follows a similar development: we compare our
272
+ algorithm with F3AST from [18] and AdaFed from [19].
273
+ The novelty of our study is in considering the spatial and
274
+ temporal correlation in clients’ availability dynamics. As dis-
275
+ cussed in the introduction, such correlations are also intro-
276
+ duced by clients’ eligibility criteria, e.g., smartphones being
277
+ under charge and connected to broadband networks. The effect
278
+ of correlation has been ignored until now, probably due to the
279
+ additional complexity in studying FL algorithms’ convergence.
280
+ To the best of our knowledge, the only exception is [18], which
281
+ scratches the issue of spatial correlation by proposing two
282
+ different algorithms for the case when clients’ availabilities
283
+ are uncorrelated and for the case when they are positively
284
+ correlated (there is no smooth transition from one algorithm
285
+ to the other as a function of the degree of correlation).
286
+ The effect of temporal correlation on centralized stochastic
287
+ gradient methods has been addressed in [12]–[14], [27]: these
288
+ works study a variant of stochastic gradient descent where
289
+ samples are drawn according to a Markov chain. Refer-
290
+ ence [12] extends its analysis to a FL setting where each client
291
+ draws samples according to a Markov chain. In contrast, our
292
+ work does not assume a correlation in the data sampling but
293
+ rather in the client’s availability. Nevertheless, some of our
294
+ proof techniques are similar to those used in this line of work
295
+ and, in particular, we rely on some results in [14].
296
+ III. ANALYSIS
297
+ A. Main assumptions
298
+ We consider a time-slotted system where a slot corresponds
299
+ to one FL communication round. We assume that clients’
300
+ availability over the timeslots t ∈ N follows a discrete-time
301
+ Markov chain (At)t≥0.2
302
+ 2In Section III-D we will focus on the case where this chain is the
303
+ superposition of N independent Markov chains, one for each client.
304
+ Assumption 1. The Markov chain (At)t≥0 on the finite state
305
+ space [M] is time-homogeneous, irreducible, and aperiodic. It
306
+ has transition matrix P and stationary distribution π.
307
+ Markov chains have already been used in the literature to
308
+ model the dynamics of stochastic networks where some nodes
309
+ or edges in the graph can switch between active and inactive
310
+ states [28], [29]. The previous Markovian assumption, while
311
+ allowing a great degree of flexibility, still guarantees the
312
+ analytical tractability of the system. The distance dynamics
313
+ between current and stationary distribution of the Markov
314
+ process can be characterized by the spectral properties of its
315
+ transition matrix P [30]. Let λ2(P ) denote the the second
316
+ largest eigenvalue of P in absolute value. Previous works [14]
317
+ have shown that:
318
+ max
319
+ i,j∈[M] |[P t]i,j − πj| ≤ CP · λ(P )t,
320
+ for t ≥ TP ,
321
+ (5)
322
+ where the parameter λ(P ) := (λ2(P ) + 1)/2, and CP , TP
323
+ are positive constants whose values are reported in [14,
324
+ Lemma 1].3 Note that λ(P ) quantifies the correlation of the
325
+ Markov process (At)t≥0: the closer λ(P ) is to one, the slower
326
+ the Markov chain converges to its stationary distribution.
327
+ In our analysis, we make the following additional assumptions.
328
+ Let w∗, w∗
329
+ B denote the minimizers of F and FB on W,
330
+ respectively.
331
+ Assumption 2. The hypothesis class W is convex, compact,
332
+ and contains in its interior the minimizers w∗, w∗
333
+ B, w∗
334
+ k.
335
+ The following assumptions concern clients’ local objective
336
+ functions {Fk}k∈K. Assumptions 3 and 4 are standard in
337
+ the literature on convex optimization [31, Sections 4.1, 4.2].
338
+ Assumption 5 is a standard hypothesis in the analysis of
339
+ federated optimization algorithms [8, Section 6.1].
340
+ Assumption 3 (L-smoothness). The local functions {Fk}N
341
+ k=1
342
+ have L-Lipschitz continuous gradients: Fk(v) ≤ Fk(w) +
343
+ ⟨∇Fk(w), v − w⟩ + L
344
+ 2 ∥v − w∥2
345
+ 2, ∀v, w ∈ W.
346
+ Assumption
347
+ 4
348
+ (Strong convexity). The local functions
349
+ {Fk}N
350
+ k=1
351
+ are
352
+ µ-strongly
353
+ convex:
354
+ Fk(v)
355
+
356
+ Fk(w) +
357
+ ⟨∇Fk(w), v − w⟩ + µ
358
+ 2 ∥v − w∥2
359
+ 2 , ∀v, w ∈ W.
360
+ Assumption 5 (Bounded variance). The variance of stochastic
361
+ gradients in each device is bounded: E ∥∇Fk(wk
362
+ t,j, ξk
363
+ t,j) −
364
+ ∇Fk(wk
365
+ t,j)∥2 ≤ σ2
366
+ k, k = 1, . . . , N.
367
+ Assumptions 2–5 imply the following properties for the local
368
+ functions, described by Lemma 1 (proof in Appendix B).
369
+ Lemma 1. Under Assumptions 2–5, there exist constants D,
370
+ G, and H > 0, such that, for w ∈ W and k ∈ K, we have:
371
+ ∥∇Fk(w)∥ ≤ D,
372
+ (6)
373
+ E ∥∇Fk(w, ξ)∥2 ≤ G2,
374
+ (7)
375
+ |Fk(w) − Fk(w∗
376
+ B)| ≤ H.
377
+ (8)
378
+ 3Note that (5) holds for different definitions of λ(P ) as far as λ(P ) ∈
379
+ (λ2(P ), 1). The specific choice for λ(P ) changes the constants CP and TP .
380
+ 3
381
+
382
+ Similarly to other works [8], [22], [23], [32], we introduce a
383
+ metric to quantify the heterogeneity of clients’ local datasets:
384
+ Γ := max
385
+ k∈K{Fk(w∗) − F ∗
386
+ k }.
387
+ (9)
388
+ If the local datasets are identical, the local functions {Fk}k∈K
389
+ coincide among them and with F, w∗ is a minimizer of each
390
+ local function, and Γ = 0. In general, Γ is smaller the closer
391
+ the distributions the local datasets are drawn from.
392
+ B. Main theorems
393
+ Theorem 1 (proof in Appendix A) decomposes the error of
394
+ the target global objective as the sum of an optimization error
395
+ for the biased global objective and a bias error.
396
+ Theorem 1 (Decomposing the total error). Under Assump-
397
+ tions 2–4, the optimization error of the target global objective
398
+ ϵ = F(w) − F ∗ can be bounded as follows:
399
+ ϵ ≤ 2κ2(FB(w) − F ∗
400
+ B)
401
+
402
+ ��
403
+
404
+ :=ϵopt
405
+ + 2κ4χ2
406
+ α∥pΓ
407
+
408
+ ��
409
+
410
+ :=ϵbias
411
+ ,
412
+ (10)
413
+ where κ := L/µ, and χ2
414
+ α∥p := �N
415
+ k=1 (αk − pk)2/pk.
416
+ Theorem 2 below proves that the optimization error ϵopt asso-
417
+ ciated to the biased objective FB, evaluated on the trajectory
418
+ determined by scheme (3), asymptotically vanishes. The non-
419
+ vanishing bias error ϵbias captures the discrepancy between
420
+ F(w) and FB(w). This latter term depends on the chi-square
421
+ divergence χ2
422
+ α∥p between the target and biased probability
423
+ distributions α = (αk)k∈K and p = (pk)k∈K, and on
424
+ Γ, that quantifies the degree of heterogeneity of the local
425
+ functions. When all local functions are identical (Γ = 0),
426
+ the bias term ϵbias also vanishes. For Γ > 0, the bias error
427
+ can still be controlled by the aggregation weights assigned
428
+ to the devices. In particular, the bias term vanishes when
429
+ qk ∝ αk/πk, ∀k ∈ K. Since it asymptotically cancels the bias
430
+ error, we refer to this choice as unbiased aggregation strategy.
431
+ However, in practice, FL training is limited to a finite number
432
+ of iterations T (typically a few hundreds [5], [7]), and the
433
+ previous asymptotic considerations may not apply. In this
434
+ regime, the unbiased aggregation strategy can be suboptimal,
435
+ since the minimization of ϵbias not necessarily leads to the
436
+ minimization of the total error ϵ ≤ ϵopt + ϵbias. This motivates
437
+ the analysis of the optimization error ϵopt.
438
+ Theorem 2 (Convergence of the optimization error ϵopt). Let
439
+ Assumptions 1–5 hold and the constants M, L, D, G, H, Γ,
440
+ σk, CP , TP , λ(P ) be defined as above. Let Q = �
441
+ k∈K qk.
442
+ Let the stepsizes satisfy:
443
+
444
+ t ηt = +∞,
445
+
446
+ t ln(t) · η2
447
+ t < +∞.
448
+ (11)
449
+ Let T denote the total communication rounds. For T ≥ TP ,
450
+ the expected optimization error can be bounded as follows:
451
+ E[FB( ¯wT,0) − F ∗
452
+ B] ≤
453
+ 1
454
+ 2 q⊺Σq+υ
455
+ π⊺q
456
+ + ψ +
457
+ φ
458
+ ln(1/λ(P ))
459
+ (�T
460
+ t=1 ηt)
461
+ ,
462
+ (12)
463
+ where ¯wT,0 :=
464
+ �T
465
+ t=1 ηtwt,0
466
+ �T
467
+ t=1 ηt
468
+ , and
469
+ Σ = diag(σ2
470
+ kπk
471
+
472
+ t η2
473
+ t ),
474
+ υ = 2
475
+ E ∥w0,0 − w∗∥2 + 1
476
+ 4MQ �
477
+ t(η2
478
+ t + 1
479
+ t2 ),
480
+ ψ = 4L(EQ + 2)Γ �
481
+ t η2
482
+ t + 2
483
+ 3(E − 1)(2E − 1)G2 �
484
+ t η2
485
+ t ,
486
+ Jt =min {max {⌈ln (2CP Ht)/ln (1/λ(P ))⌉ , TP } , t},
487
+ φ = 2EDGQ �
488
+ t ln(2CP Ht)η2
489
+ t−Jt.
490
+ Theorem 2 (proof in Appendix B) proves convergence of
491
+ the expected biased objective FB to its minimum F ∗
492
+ B under
493
+ correlated client participation. Our bound (12) captures the
494
+ effect of correlation through the factor ln (1/λ(P )): a high
495
+ correlation worsens the convergence rate. In particular, we
496
+ found that the numerator of (12) has a quadratic-over-linear
497
+ fractional dependence on q. Minimizing ϵopt leads, in general,
498
+ to a different choice of q than minimizing ϵbias.
499
+ C. Minimizing the total error ϵ ≤ ϵopt + ϵbias
500
+ Our analysis points out a trade-off between minimizing ϵopt
501
+ or ϵbias. Our goal is to find the optimal aggregation weights q∗
502
+ that minimize the upper bound on total error ϵ(q) in (10):
503
+ minimize
504
+ q
505
+ ϵopt(q) + ϵbias(q);
506
+ subject to
507
+ q ≥ 0,
508
+ ∥q∥1 = Q.
509
+ (13)
510
+ In Appendix E we prove that (13) is a convex optimization
511
+ problem, which can be solved with the method of Lagrange
512
+ multipliers. However, the solution is not of practical utility
513
+ because the constants in (10) and (12) (e.g., L, µ, Γ, CP ) are
514
+ in general problem-dependent and difficult to estimate during
515
+ training. In particular, Γ poses particular difficulties as it is
516
+ defined in terms of the minimizer of the target objective F, but
517
+ the FL algorithm generally minimizes the biased function FB.
518
+ Moreover, the bound in (10), similarly to the bound in [32],
519
+ diverges when setting some qk equal to 0, but this is simply
520
+ an artifact of the proof technique. A result of more practical
521
+ interest is the following (proof in Appendix C):
522
+ Theorem 3 (An alternative decomposition of the total er-
523
+ ror ϵ). Under the same assumptions of Theorem 1, let Γ′ :=
524
+ maxk{Fk(w∗
525
+ B) − F ∗
526
+ k }. The following result holds:
527
+ ϵ ≤ 2κ2(FB(w) − F ∗
528
+ B)
529
+
530
+ ��
531
+
532
+ :=ϵopt
533
+ + 8κ4d2
534
+ T V (α, p)Γ′
535
+
536
+ ��
537
+
538
+ :=ϵ′
539
+ bias
540
+ ,
541
+ (14)
542
+ where dT V (α, p) := 1
543
+ 2
544
+ �N
545
+ k=1|αk − pk| is the total variation
546
+ distance between the probability distributions α and p.
547
+ The new constant Γ′ is defined in terms of w∗
548
+ B, and then
549
+ it is easier to evaluate during training. However, Γ′ depends
550
+ on q, because it is evaluated at the point of minimum of FB.
551
+ This dependence makes the minimization of the right-hand
552
+ side of (14) more challenging (for example, the corresponding
553
+ problem is not convex). We study the minimization of the two
554
+ terms ϵopt and ϵ′
555
+ bias separately and learn some insights, which
556
+ we use to design the new FL algorithm CA-Fed.
557
+ 4
558
+
559
+ D. Minimizing ϵopt
560
+ The minimization of ϵopt is still a convex optimization problem
561
+ (Appendix D). In particular, at the optimum non-negative
562
+ weights are set accordingly to q∗
563
+ k = a(λ∗πk − θ∗) with
564
+ a, λ∗, and θ∗ positive constants (see (29)). It follows that
565
+ clients with smaller availability get smaller weights in the
566
+ aggregation. In particular, this suggests that clients with the
567
+ smallest availability can be excluded from the aggregation,
568
+ leading to the following guideline:
569
+ Guideline A: to speed up the convergence, we can exclude,
570
+ i.e., set q∗
571
+ k = 0, the clients with lowest availability πk.
572
+ This guideline can be justified intuitively: updates from clients
573
+ with low participation may be too sporadic to allow the FL
574
+ algorithm to keep track of their local objectives. They act as
575
+ a noise slowing down the algorithm’s convergence. It may be
576
+ advantageous to exclude these clients from participating.
577
+ We observe that the choice of the aggregation weights q does
578
+ not affect the clients’ availability process and, in particular,
579
+ λ(P ). However, if the algorithm excludes some clients, it
580
+ is possible to consider the state space of the Markov chain
581
+ that only specifies the availability state of the remaining
582
+ clients, and this Markov chain may have different spectral
583
+ properties. For the sake of concreteness, we consider here
584
+ (and in the rest of the paper) the particular case when the
585
+ availability of each client k evolves according to a two-
586
+ states Markov chain (Ak
587
+ t )t≥0 with transition probability ma-
588
+ trix Pk and these Markov chains are all independent. In
589
+ this case, the aggregate process is described by the product
590
+ Markov chain (At)t≥0 with transition matrix P = �
591
+ k∈K Pk
592
+ and λ(P ) = maxk∈K λ(Pk), where Pi
593
+ � Pj denotes the
594
+ Kronecker product between matrices Pi and Pj [30, Exer-
595
+ cise 12.6]. In this setting, it is possible to redefine the Markov
596
+ chain (At)t≥0 by taking into account the reduced state space
597
+ defined by the clients with a non-null aggregation weight, i.e.,
598
+ P ′ = �
599
+ k′∈K|qk′>0 Pk′ and λ(P ′) = maxk′∈K|qk′>0 λ(Pk′),
600
+ which is potentially smaller than the case when all clients
601
+ participate to the aggregation. These considerations lead to
602
+ the following guideline:
603
+ Guideline B: to speed up the convergence, we can exclude,
604
+ i.e., set q∗
605
+ k = 0, the clients with largest λ(Pk).
606
+ Intuition also supports this guideline. Clients with large λ(Pk)
607
+ tend to be available or unavailable for long periods of time.
608
+ Due to the well-known catastrophic forgetting problem affect-
609
+ ing gradient methods [33], [34], these clients may unfairly
610
+ steer the algorithm toward their local objective when they
611
+ appear at the final stages of the training period. Moreover,
612
+ their participation in the early stages may be useless, as their
613
+ contribution will be forgotten during their long absence. The
614
+ FL algorithm may benefit from directly neglecting such clients.
615
+ We observe that guideline B strictly applies to this specific
616
+ setting where clients’ dynamics are independent (and there
617
+ is no spatial correlation). We do not provide a corresponding
618
+ Algorithm 1: CA-Fed (Correlation-Aware FL)
619
+ Input : w0,0, α, q(0), {ηt}T
620
+ t=1, ηs, E, β, τ
621
+ 1 Initialize ˆF (0), ˆF ∗, ˆΓ
622
+ ′(0), ˆπ(0), and ˆλ(0);
623
+ 2 for t = 1, . . . , T do
624
+ 3
625
+ Receive set of active client At, loss vector F (t);
626
+ 4
627
+ Update ˆF (t), ˆΓ
628
+ ′(t), ˆπ(t), and ˆλ(t);
629
+ 5
630
+ Initialize q(t) =
631
+ α
632
+ ˆπ(t) ;
633
+ 6
634
+ q(t) ← get(q(t), α, ˆF (t), ˆF ∗, ˆΓ
635
+ ′(t), ˆπ(t), ˆλ(t));
636
+ 7
637
+ q(t) ← get(q(t), α, ˆF (t), ˆF ∗, ˆΓ
638
+ ′(t), ˆπ(t), �ˆπ(t));
639
+ 8
640
+ for client {k ∈ At; q(t)
641
+ k
642
+ > 0}, in parallel do
643
+ 9
644
+ for j = 0, . . . , E − 1 do
645
+ 10
646
+ wk
647
+ t,j+1 = wk
648
+ t,j − ηt∇Fk(wk
649
+ t,j, Bk
650
+ t,j) ;
651
+ 11
652
+ ∆k
653
+ t ← wt,E − wt,0;
654
+ 12
655
+ wt+1,0 ← ProjW (wt,0 + ηs
656
+
657
+ k∈At q
658
+ (t)
659
+ k · ∆k
660
+ t );
661
+ 13 Function get(q, α, F , F ∗, Γ, π, ρ):
662
+ 14
663
+ K ← sort by descending order in ρ;
664
+ 15
665
+ ˆϵ ← ⟨F −F ∗, π ˜⊙q⟩ + d2
666
+ T V (α, π ˜⊙q) · Γ;
667
+ 16
668
+ for k ∈ K do
669
+ 17
670
+ q+
671
+ k ← 0;
672
+ 18
673
+ ˆϵ+ ← ⟨F −F ∗, π ˜⊙q+⟩ + d2
674
+ T V (α, π ˜⊙q+) · Γ;
675
+ 19
676
+ if ˆϵ − ˆϵ+ ≥ τ then
677
+ 20
678
+ ˆϵ ← ˆϵ+;
679
+ 21
680
+ q ← q+;
681
+ 22
682
+ return q
683
+ guideline for the case when clients are spatially correlated (we
684
+ leave this task for future research). However, in this more gen-
685
+ eral setting, it is possible to ignore guideline B but still draw
686
+ on guidelines A and C, or still consider guideline B if clients
687
+ are spatially correlated (see discussion in Section VI-B).
688
+ E. Minimizing ϵ′
689
+ bias
690
+ The bias error ϵ′
691
+ bias in (14) vanishes when the total variation
692
+ distance between the target importance α and the biased
693
+ importance p is zero, i.e., when qk ∝ αk/πk, ∀k ∈ K. Then,
694
+ after excluding the clients that contribute the most to the
695
+ optimization error and particularly slow down the convergence
696
+ (guidelines A and B), we can assign to the remaining clients an
697
+ aggregation weight inversely proportional to their availability,
698
+ such that the bias error ϵ′
699
+ bias is minimized.
700
+ Guideline C: to reduce the bias error, we set q∗
701
+ k ∝ αk/πk for
702
+ the clients that are not excluded by the previous guidelines.
703
+ IV. PROPOSED ALGORITHM
704
+ Guidelines A and B in Section III suggest that the minimiza-
705
+ tion of ϵopt can lead to the exclusion of some available clients
706
+ from the aggregation step (3), in particular those with low
707
+ availability and/or high correlation. For the remaining clients,
708
+ guideline C proposes to set their aggregation weight inversely
709
+ proportional to their availability to reduce the bias error ϵ′
710
+ bias.
711
+ Motivated by these insights, we propose CA-Fed, a client
712
+ sampling and aggregation strategy that takes into account the
713
+ problem of correlated client availability in FL, described in
714
+ 5
715
+
716
+ Algorithm 1. CA-Fed learns during training which are the
717
+ clients to exclude and how to set the aggregation weights of the
718
+ other clients to achieve a good trade-off between ϵopt and ϵ′
719
+ bias.
720
+ While guidelines A and B indicate which clients to remove,
721
+ the exact number of clients to remove at round t is identified
722
+ by minimizing ϵ(t) as a proxy for the bound in (14):4
723
+ ϵ(t) := FB(wt,0)−F ∗
724
+ B + d2
725
+ T V (α, p)Γ′.
726
+ (15)
727
+ A. CA-Fed’s core steps
728
+ At each communication round t, the server sends the current
729
+ model wt,0 to all active clients and each client k sends back
730
+ a noisy estimate F
731
+ (t)
732
+ k
733
+ of the current loss computed on a batch
734
+ of samples Bk
735
+ t,0, i.e., F
736
+ (t)
737
+ k
738
+ =
739
+ 1
740
+ |Bk
741
+ t,0|
742
+
743
+ ξ∈Bk
744
+ t,0 f(wt,0, ξ) (line 3).
745
+ The server uses these values and the information about the
746
+ current set of available clients At to refine its own estimates
747
+ of each client’s loss ( ˆF (t) = ( ˆF
748
+ (t)
749
+ k )k∈K), and each client’s
750
+ loss minimum value ( ˆF ∗ = ( ˆF ∗
751
+ k )k∈K), as well as of Γ′, πk,
752
+ λk, and ϵ(t), denoted as ˆΓ
753
+ ′(t), ˆπ
754
+ (t)
755
+ k , ˆλ
756
+ (t)
757
+ k , and ˆϵ(t), respectively
758
+ (possible estimators are described below) (line 4).
759
+ The server decides whether excluding clients whose avail-
760
+ ability pattern exhibits high correlation (high ˆλ
761
+ (t)
762
+ k ) (line 6).
763
+ First, the server considers all clients in descending order of
764
+ ˆλ(t) (line 14), and evaluates if, by excluding them (line 17),
765
+ ˆϵ(t) appears to be decreasing by more than a threshold τ ≥ 0
766
+ (line 19). Then, the server considers clients in ascending order
767
+ of ˆπ(t), and repeats the same procedure to possibly exclude
768
+ some of the clients with low availability (low ˆπ
769
+ (t)
770
+ k ) (lines 7).
771
+ Once the participating clients (those with qk > 0) have
772
+ been selected, the server notifies them to proceed updating
773
+ the current models (lines 9–10) according to (2), while the
774
+ other available clients stay idle. Finally, model’s updates are
775
+ aggregated according to (3) (line 12).
776
+ B. Estimators
777
+ We now briefly discuss possible implementation of the esti-
778
+ mators ˆF
779
+ (t)
780
+ k , ˆF ∗
781
+ k , ˆΓ
782
+ ′(t), ˆπ
783
+ (t)
784
+ k , and ˆλ
785
+ (t)
786
+ k . Server’s estimates for the
787
+ clients’ local losses ( ˆF (t) = ( ˆF
788
+ (t)
789
+ k )k∈K) can be obtained from
790
+ the received active clients’ losses (F (t) = (F
791
+ (t)
792
+ k )k∈At) through
793
+ an auto-regressive filter with parameter β ∈ (0, 1]:
794
+ ˆF
795
+ (t) = (1 − β1At) ⊙ ˆF (t−1) + β1At ⊙ F
796
+ (t),
797
+ (16)
798
+ where ⊙ denotes the component-wise multiplication between
799
+ vectors, and 1At is a N-dimensions binary vector whose k-th
800
+ component equals 1 if and only if k is active at round t, i.e.,
801
+ k ∈ At. The server can keep track of the clients’ loss minimum
802
+ values and estimate F ∗
803
+ k as ˆF ∗
804
+ k = mins∈[0,t] ˆF
805
+ (s)
806
+ k . The values of
807
+ FB(wt,0), F ∗
808
+ B, Γ′, and ϵ(t) can be estimated as follows:
809
+ ˆF
810
+ (t)
811
+ B − ˆF ∗
812
+ B = ⟨ ˆF (t) − ˆF ∗, ˆπ(t) ˜⊙q(t)⟩,
813
+ (17)
814
+ ˆΓ
815
+ ′(t) = maxk∈K( ˆF
816
+ (t)
817
+ k
818
+ − ˆF ∗
819
+ k ),
820
+ (18)
821
+ ˆϵ(t) = ˆF
822
+ (t)
823
+ B − ˆF ∗
824
+ B + d2
825
+ T V (α, ˆπ(t) ˜⊙q(t)) · ˆΓ
826
+ ′(t).
827
+ (19)
828
+ 4Following (14), one could reasonably introduce a hyper-parameter to
829
+ weigh the relative importance of the optimization and bias terms in the sum.
830
+ We discuss this additional optimization of CA-Fed in Section VI-A.
831
+ where π ˜⊙q ∈ RN, such that
832
+
833
+ π ˜⊙q
834
+
835
+ k =
836
+ πkqk
837
+ �N
838
+ h=1 πhqh , k ∈ K.
839
+ For ˆπ
840
+ (t)
841
+ k , the server can simply keep track of the total number
842
+ of times client k was available up to time t and compute
843
+ ˆπ
844
+ (t)
845
+ k
846
+ using a Bayesian estimator with beta prior, i.e., ˆπ
847
+ (t)
848
+ k
849
+ =
850
+ (�
851
+ s≤t 1k∈As +nk)/(t+nk +mk), where nk and mk are the
852
+ initial parameters of the beta prior.
853
+ For ˆλ
854
+ (t)
855
+ k , the server can assume the client’s availability evolves
856
+ according to a Markov chain with two states (available and
857
+ unavailable), track the corresponding number of state tran-
858
+ sitions, and estimate the transition matrix
859
+ ˆP
860
+ (t)
861
+ k
862
+ through a
863
+ Bayesian estimator similarly to what done for ˆπ
864
+ (t)
865
+ k . Finally,
866
+ ˆλ
867
+ (t)
868
+ k is obtained computing the eigenvalues of ˆP
869
+ (t)
870
+ k .
871
+ C. CA-Fed’s computation/communication cost
872
+ CA-Fed aims to improve training convergence and not to
873
+ reduce its computation and communication overhead. Never-
874
+ theless, excluding some available clients reduces the overall
875
+ training cost, as we will discuss in this section referring, for
876
+ the sake of concreteness, to neural networks’ training.
877
+ The available clients not selected for training are only re-
878
+ quested to evaluate their local loss on the current model once
879
+ on a single batch instead than performing E gradient updates,
880
+ which would require roughly 2 × E − 1 more calculations
881
+ (because of the forward and backward pass). For the selected
882
+ clients, there is no extra computation cost as computing the
883
+ loss corresponds to the forward pass they should, in any case,
884
+ perform during the first local gradient update.
885
+ In terms of communication, the excluded clients only transmit
886
+ the loss, a single scalar, much smaller than the model update.
887
+ Conversely, participating clients transmit the local loss and the
888
+ model update. Still, this additional overhead is negligible and
889
+ likely fully compensated by the communication savings for
890
+ the excluded clients.
891
+ V. EXPERIMENTAL EVALUATION
892
+ A. Experimental Setup
893
+ a) Federated system simulator: In our experiments, we sim-
894
+ ulate the clients’ availability dynamics featuring different
895
+ levels of temporal correlations. We model the activity of each
896
+ client as a two-state homogeneous Markov process with state
897
+ space S = {“active”, “inactive”}. We use pk,s to denote the
898
+ probability that client k ∈ K remains in state s ∈ S.
899
+ In order to simulate the statistical heterogeneity present in the
900
+ federated learning system, we consider an experimental setting
901
+ with two disjoint groups of clients Gi, i = 1, 2, to which
902
+ we associate two different data distributions Pi, i = 1, 2,
903
+ to be precised later. Let ri = |Gi|/N, i = 1, 2 denote the
904
+ fraction of clients in group i = 1, 2. In order to simulate
905
+ the heterogeneity of clients’ availability patterns in realistic
906
+ federated systems, we split the clients of each group in two
907
+ classes uniformly at random: “more available” clients whose
908
+ steady-state probability to be active is πk,active = 1/2 + g and
909
+ “less available” clients with πk,active = 1/2 − g, where g ∈
910
+ 6
911
+
912
+ Inactive,
913
+ excluded
914
+ Inactive,
915
+ included
916
+ Active,
917
+ excluded
918
+ Active,
919
+ included
920
+ More Available
921
+ Less Available, Weakly Correlated
922
+ 0
923
+ 20
924
+ 40
925
+ 60
926
+ 80
927
+ 100
928
+ 120
929
+ 140
930
+ Communication round
931
+ Less Available, Correlated
932
+ Clients
933
+ Fig. 1: Clients’ activities and CA-Fed’s clients selection on the synthetic dataset.
934
+ More Available
935
+ Less Available
936
+ Correlated
937
+ Less Available
938
+ Weakly Correlated
939
+ Clients
940
+ Cumulative weight
941
+ Unbiased
942
+ CA-Fed
943
+ AdaFed
944
+ F3AST
945
+ Target
946
+ Fig. 2: Importance given to the clients by the different algorithms
947
+ throughout a whole training process on the synthetic dataset.
948
+ (0, 1/2) is a parameter controlling the heterogeneity of clients
949
+ availability. We furthermore split each class of clients in two
950
+ sub-classes uniformly at random: “correlated” clients that tend
951
+ to persist in the same state (λk = ν with values of ν close to
952
+ 1), and “weakly correlated” clients that are almost as likely
953
+ to keep as to change their state (λk ∼ N(0, ε2), with ε close
954
+ to 0). In our experiments, we suppose that r1 = r2 = 1/2,
955
+ g = 0.4, ν = 0.9, and ε = 10−2.
956
+ b) Datasets and models: All experiments are performed on
957
+ a binary classification synthetic dataset (described in Ap-
958
+ pendix F) and on the real-world MNIST dataset [35], using
959
+ N = 24 clients. For MNIST dataset, we introduce statistical
960
+ heterogeneity across the two groups of clients (i.e., we make
961
+ the two distributions P1 and P2 different), following the same
962
+ approach in [36]: 1) every client is assigned a random subset
963
+ of the total training data; 2) the data of clients from the second
964
+ group is modified by randomly swapping two pairs of labels.
965
+ We maintain the original training/test data split of MNIST and
966
+ use 20% of the training dataset as validation dataset. We use a
967
+ linear classifier with a ridge penalization of parameter 10−2,
968
+ which is a strongly convex objective function, for both the
969
+ synthetic and the real-world MNIST datasets.
970
+ c) Benchmarks:
971
+ We compare CA-Fed, defined in Algo-
972
+ rithm 1, with the Unbiased aggregation strategy, where all
973
+ the active clients participate and receive a weight inversely
974
+ proportional to their availability, and with the state-of-the-
975
+ art FL algorithms discussed in Section II: F3AST [18] and
976
+ AdaFed [19]. We tuned the learning rates η, ηs via grid
977
+ search, on the grid η : {10−3, 10−2.5, 10−2, 10−1.5, 10−1},
978
+ ηs : {10−2, 10−1.5, 10−1, 10−0.5, 100}. For CA-Fed, we used
979
+ τ = 0, β = 0.2. We assume all algorithms can access an oracle
980
+ providing the true availability parameters for each client. In
981
+ 0
982
+ 20
983
+ 40
984
+ 60
985
+ 80
986
+ 100
987
+ 120
988
+ 140
989
+ Communication round
990
+ 35
991
+ 40
992
+ 45
993
+ 50
994
+ 55
995
+ 60
996
+ 65
997
+ 70
998
+ 75
999
+ Time-average test accuracy
1000
+ Unbiased
1001
+ F3AST
1002
+ AdaFed
1003
+ CA-Fed (Ours)
1004
+ (a) Synthetic
1005
+ 0
1006
+ 20
1007
+ 40
1008
+ 60
1009
+ 80
1010
+ 100
1011
+ 120
1012
+ 140
1013
+ Communication round
1014
+ 10
1015
+ 20
1016
+ 30
1017
+ 40
1018
+ 50
1019
+ 60
1020
+ Time-average test accuracy
1021
+ Unbiased
1022
+ F3AST
1023
+ AdaFed
1024
+ CA-Fed (Ours)
1025
+ (b) MNIST
1026
+ Fig. 3: Test accuracy vs number of communication rounds.
1027
+ practice, Unbiased, AdaFed, and F3AST rely on the exact
1028
+ knowledge of πk,active, and CA-Fed on πk,active and λk. 5
1029
+ B. Experimental Results
1030
+ Figure 1 shows the availability of each client during a training
1031
+ run on the synthetic dataset. Clients selected (resp. excluded)
1032
+ by CA-Fed are highlighted in black (resp. red). We observe
1033
+ that excluded clients tend to be those with low average
1034
+ availability or high correlation.
1035
+ Figure 2 shows the importance pk (averaged over time) given
1036
+ by different algorithms to each client k during a full training
1037
+ run. We observe that all the algorithms, except Unbiased,
1038
+ depart from the target importance α. As suggested by guide-
1039
+ lines A and B, CA-Fed tends to favor the group of “more
1040
+ available” clients, at the expense of the “less available” clients.
1041
+ Figure 3 shows the time-average accuracy up to round t of
1042
+ the learned model averaged over three different runs. On both
1043
+ datasets, CA-Fed achieves the highest accuracy, which is
1044
+ about a percentage point higher than the second best algorithm
1045
+ (F3AST). Table I shows for each algorithm: the average over
1046
+ three runs of the maximum test accuracy achieved during train-
1047
+ ing, the time-average test accuracy achieved during training,
1048
+ together with its standard deviation within the second half of
1049
+ the training period. Results show that while CA-Fed achieves
1050
+ a maximum accuracy which is comparable to the Unbiased
1051
+ baseline and state-of-the-art AdaFed and F3AST, it gets a
1052
+ higher time-average accuracy (1.24 percentage points) in com-
1053
+ parison to the second best (F3AST), and a smaller standard
1054
+ deviation (1.5×) in comparison to the second best (F3AST).
1055
+ 5The authors have provided public access to their code and data at:
1056
+ https://github.com/arodio/CA-Fed.
1057
+ 7
1058
+
1059
+ TABLE I: Maximum and time-average test accuracy, together with
1060
+ their standard deviations, on the Synthetic / MNIST datasets.
1061
+ TEST ACCURACY
1062
+ MAXIMUM
1063
+ TIME-AVERAGE
1064
+ STANDARD DEVIATION
1065
+ UNB I AS ED
1066
+ 78.94 / 64.87
1067
+ 75.32 / 61.39
1068
+ 0.48 / 1.09
1069
+ F3AST
1070
+ 78.97 / 64.91
1071
+ 75.33 / 61.52
1072
+ 0.40 / 0.94
1073
+ ADAFED
1074
+ 78.69 / 63.77
1075
+ 74.81 / 60.48
1076
+ 0.59 / 1.37
1077
+ CA-FE D
1078
+ 79.03 / 64.94
1079
+ 76.22 / 62.76
1080
+ 0.28 / 0.61
1081
+ VI. DISCUSSION
1082
+ In this section, we discuss some general concerns and remarks
1083
+ on our algorithm.
1084
+ A. Controlling the number of excluded clients
1085
+ Theorems 1 and 3 suggest that the condition number κ2 can
1086
+ play a meaningful role in the minimization of the total error ϵ.
1087
+ Our algorithm uses a proxy (ϵ(t)) of the total error. To take into
1088
+ account the effect of κ2, we can introduce a hyper-parameter
1089
+ that weights the relative importance of the optimization and
1090
+ bias error in (15):
1091
+ ϵ′(t) := FB(wt,0) − F ∗
1092
+ B + ¯κ2 · d2
1093
+ T V (α, p)Γ′.
1094
+ A small value of ¯κ2 penalizes the bias term in favor of the
1095
+ optimization error, resulting in a larger number of clients
1096
+ excluded by CA-Fed. On the other hand, CA-Fed tends to
1097
+ include more clients for a large value of ¯κ2. Asymptotically,
1098
+ for ¯κ2 → +∞, CA-Fed reduces to the Unbiased baseline.
1099
+ To further improve the performance of CA-Fed, a finer tuning
1100
+ of the values of ¯κ2 can be performed.
1101
+ B. CA-Fed in presence of spatial correlation
1102
+ Although CA-Fed is mainly designed to handle temporal
1103
+ correlation, it does not necessarily perform poorly in presence
1104
+ of spatial correlation, as well.
1105
+ Consider the following spatially-correlated scenario: clients
1106
+ are grouped in clusters, each cluster c ∈ C is characterized
1107
+ by an underlying Markov chain, which determines when all
1108
+ clients in the cluster are available/unavailable, the Markov
1109
+ chains of different clusters are independent. Let λc denote
1110
+ the second largest eigenvalue in module of cluster-c’s Markov
1111
+ chain. In this case, one needs to exclude all clients in the
1112
+ cluster ¯c = arg maxc∈C λc to reduce the eigenvalue of the
1113
+ aggregate Markov chain.
1114
+ In this setting, CA-Fed would associate similar eigenvalue
1115
+ estimates to all clients in the same cluster, then it would
1116
+ correctly start considering for exclusion the clients in cluster
1117
+ ¯c and potentially remove sequentially all clients in the same
1118
+ cluster. These considerations suggest that CA-Fed may still
1119
+ operate correctly even in presence of spatial correlation.
1120
+ C. About CA-Fed’s fairness
1121
+ A strategy that excludes clients from the training phase,
1122
+ such as CA-Fed, may naturally raise fairness concerns. The
1123
+ concept of fairness in FL does not have a unified definition in
1124
+ the literature [37, Chapter 8]: fairness goals can be captured by
1125
+ a suitable choice of the target weights in (1). For example, per-
1126
+ client fairness can be achieved by setting αk equal for every
1127
+ client, while per-sample fairness by setting αk proportional
1128
+ to the local dataset size |Dk|. If we assume that the global
1129
+ objective in (1) indeed reflects also fairness concerns, then
1130
+ CA-Fed is intrinsically fair, in the sense that it guarantees
1131
+ that the performance objective of the learned model is as close
1132
+ as possible to its minimum value.
1133
+ VII. CONCLUSION
1134
+ This paper presented the first convergence analysis for a
1135
+ FedAvg-like FL algorithm under heterogeneous and corre-
1136
+ lated client availability. The analysis quantifies how correla-
1137
+ tion adversely affects the algorithm’s convergence rate and
1138
+ highlights a general bias-versus-convergence-speed trade-off.
1139
+ Guided by the theoretical analysis, we proposed CA-Fed, a
1140
+ new FL algorithm that tries to balance the conflicting goals
1141
+ of maximizing convergence speed and minimizing model bias.
1142
+ Our experimental results demonstrate that adaptively excluding
1143
+ clients with high temporal correlation and low availability is an
1144
+ effective approach to handle the heterogeneous and correlated
1145
+ client availability in FL.
1146
+ APPENDIX
1147
+ A. Proof of Theorem 1
1148
+ We bound the optimization error of the target objective as the
1149
+ optimization error of the biased objective plus a bias term:
1150
+ F(w) − F ∗
1151
+ (a)
1152
+
1153
+ 1
1154
+ 2µ ∥∇F(w)∥2
1155
+ (b)
1156
+ ≤ L2
1157
+ 2µ ∥w − w∗∥2
1158
+ (c)
1159
+ ≤ L2
1160
+ µ (∥w − w∗
1161
+ B∥2 + ∥w∗
1162
+ B − w∗∥2)
1163
+ (d)
1164
+ ≤ 2L2
1165
+ µ2 (FB(w) − F ∗
1166
+ B)
1167
+
1168
+ ��
1169
+
1170
+ :=ϵopt
1171
+ + 2L2
1172
+ µ2 (F(w∗
1173
+ B) − F ∗)
1174
+
1175
+ ��
1176
+
1177
+ :=ϵbias
1178
+ ,
1179
+ where (a), (b), and (d) follow from the Assumptions 3, 4,
1180
+ and the inequality (c) follows from (a + b)2 ≤ 2a2 + 2b2.
1181
+ In particular, (b) requires ∇Fk(w∗
1182
+ k) = 0. Theorem 2 further
1183
+ develops the optimization error ϵopt. We now expand ϵbias:
1184
+ ∥∇F(w∗
1185
+ B)∥
1186
+ (e)=
1187
+ ����N
1188
+ k=1(αk − pk)∇Fk(w∗
1189
+ B)
1190
+ ���
1191
+ (f)
1192
+ ≤ L �N
1193
+ k=1|αk − pk| ∥w∗
1194
+ B − w∗
1195
+ k∥
1196
+ (20)
1197
+ (g)
1198
+ ≤ L
1199
+
1200
+ 2
1201
+ µ
1202
+ �N
1203
+ k=1
1204
+ |αk−pk|
1205
+ √pk
1206
+
1207
+ pk(Fk(w∗
1208
+ B) − F ∗
1209
+ k ),
1210
+ where (e) uses ∇FB(w∗
1211
+ B) = 0; (f) applies first the triangle
1212
+ inequality, then the L-smoothness, and (g) follows from the
1213
+ µ-strong convexity. In addition, (f) requires ∇Fk(w∗
1214
+ k) = 0.
1215
+ Similarly to [32], in (g) we multiply numerator and denomi-
1216
+ nator by √pk. By direct calculations, it follows that:
1217
+ ∥∇F(w∗
1218
+ B)∥2
1219
+ (h)
1220
+ ≤ 2L2
1221
+ µ
1222
+ � �N
1223
+ k=1
1224
+ |αk−pk|
1225
+ √pk
1226
+
1227
+ pk(Fk(w∗
1228
+ B) − F ∗
1229
+ k )
1230
+ �2
1231
+ (i)
1232
+ ≤ 2L2
1233
+ µ
1234
+
1235
+ N�
1236
+ k=1
1237
+ (αk−pk)2
1238
+ pk
1239
+ ��
1240
+ N�
1241
+ k=1
1242
+ pk(Fk(w∗
1243
+ B) − F ∗
1244
+ k )
1245
+
1246
+ (j)
1247
+ ≤ 2L2
1248
+ µ χ2
1249
+ α∥pΓ,
1250
+ 8
1251
+
1252
+ where (i) uses the Cauchy–Schwarz inequality, and (j) used:
1253
+ �N
1254
+ k=1 pk(Fk(w∗
1255
+ B) − F ∗
1256
+ k ) ≤ �N
1257
+ k=1 pk(Fk(w∗) − F ∗
1258
+ k ) ≤ Γ.
1259
+ Finally, by strong convexity of F, we conclude that:
1260
+ F(w∗
1261
+ B) − F ∗ ≤
1262
+ 1
1263
+ 2µ ∥∇F(w∗
1264
+ B)∥2 ≤ L2
1265
+ µ2 χ2
1266
+ α∥pΓ.
1267
+ B. Proof of Theorem 2
1268
+ 1) Additional notation: let wk
1269
+ t,j be the model parameter vector
1270
+ computed by device k at the global round t, local iteration j.
1271
+ We define:
1272
+ gt(At) = �
1273
+ k∈At qk
1274
+ �E−1
1275
+ j=0 ∇Fk(wk
1276
+ t,j, ξk
1277
+ t,j),
1278
+ and ¯gt(At) = Eξ|At[gt(At)].
1279
+ Following (2) and (3), the update rule of CA-Fed is:
1280
+ wt+1,0 = ProjW (wt,0 − ηtgt(At)).
1281
+ (21)
1282
+ 2) Key lemmas and results: we provide useful lemmas and
1283
+ results to support the proof of the main theorem.
1284
+ Proof of Lemma 1. The boundedness of W gives a bound on
1285
+ (wt,0)t≥0 based on the update rules in (2) and (3). From the
1286
+ convexity of {Fk}k∈K, it follows that:
1287
+ D :=
1288
+ sup
1289
+ w∈W,k∈K
1290
+ ∥∇Fk(w)∥ < +∞.
1291
+ Items (6), (8) are directly derived from the previous observa-
1292
+ tion. Item (7) follows combining (6) and Assumption 5:
1293
+ E ∥∇Fk(w, ξ)∥2 ≤ D2 + max
1294
+ k∈K {σ2
1295
+ k} := G2.
1296
+ Lemma 2 (Convergence under heterogeneous client availabil-
1297
+ ity). Let the local functions {Fk}k∈K be convex, Assump-
1298
+ tions 3, 5 hold. If ηt ≤
1299
+ 1
1300
+ 2L(EQ+1), we have:
1301
+
1302
+ t ηt E[�
1303
+ k∈At qk (Fk(wt,0) − Fk(w∗
1304
+ B))] ≤
1305
+ + 2
1306
+ E ∥w0,0 − w∗
1307
+ B∥2 + 2 �N
1308
+ k=1 πkq2
1309
+ kσ2
1310
+ k
1311
+
1312
+ t η2
1313
+ t
1314
+ + 2
1315
+ 3
1316
+ �N
1317
+ k=1 πkqk(E − 1)(2E − 1)G2 �
1318
+ t η2
1319
+ t
1320
+ + 2L(EQ + 2) �N
1321
+ k=1 πkqkΓ �
1322
+ t η2
1323
+ t := C1 < +∞.
1324
+ Proof of Lemma 2.
1325
+ ∥wt+1,0 − w∗
1326
+ B∥2 = ∥ProjW (wt,0 − ηtgt) − ProjW (w∗
1327
+ B)∥2
1328
+ ≤ ∥wt,0 − ηtgt − w∗
1329
+ B + ηt¯gt − ηt¯gt∥2 = A1 + A2 + A3,
1330
+ where:
1331
+ A1 = ∥wt,0 − w∗
1332
+ B − ηt¯gt∥2 ,
1333
+ A2 = 2ηt⟨wt,0 − w∗
1334
+ B − ηt¯gt, ¯gt − gt⟩,
1335
+ A3 = η2
1336
+ t ∥gt − ¯gt∥2 .
1337
+ Note E[A2] = 0. We bound A1, A3 using the key steps in [22]:
1338
+ (1) the variance of gt(At) is bounded if the variance of the
1339
+ stochastic gradients at each device is bounded:
1340
+ A3 = EB|At ∥gt − ¯gt∥2 =
1341
+ = �
1342
+ k∈At q2
1343
+ k
1344
+ �E−1
1345
+ j=0 EB|At
1346
+ ��∇Fk(wk
1347
+ t,j, ξk
1348
+ t,j)−∇Fk(wk
1349
+ t,j)
1350
+ ��2
1351
+ ≤ E �
1352
+ k∈At q2
1353
+ kσ2
1354
+ k;
1355
+ (2) the distance of the local model wk
1356
+ t,E from the global
1357
+ model wt,0 is bounded since the expected squared norm of
1358
+ the stochastic gradients is bounded:
1359
+ EB|At
1360
+
1361
+ k∈At qk
1362
+ �E−1
1363
+ j=0
1364
+ ��wk
1365
+ t,j − wt,0
1366
+ ��2 =
1367
+ = EB|At
1368
+
1369
+ k∈At qk
1370
+ �E−1
1371
+ j=1 η2
1372
+ t
1373
+ ���
1374
+ �j−1
1375
+ j′=0 ∇Fk(wk
1376
+ t,j′, ξk
1377
+ t,j′)
1378
+ ���
1379
+ 2
1380
+ ≤ η2
1381
+ t
1382
+
1383
+ k∈At qk
1384
+ �E−1
1385
+ j=1 j �j−1
1386
+ j′=0 EB|At
1387
+ ��∇Fk(wk
1388
+ t,j′, ξk
1389
+ t,j′)
1390
+ ��2
1391
+ ≤ η2
1392
+ t
1393
+
1394
+ k∈At qkG2 �E−1
1395
+ j=1 j2
1396
+ = 1
1397
+ 6η2
1398
+ t
1399
+
1400
+ k∈At qkE(E − 1)(2E − 1)G2.
1401
+ Lemma 3 (Optimization error after Jt steps). Let Assump-
1402
+ tions 1, 2 hold, the local functions {Fk}k∈K be convex, D, H
1403
+ be defined as in (6), (8), and Jt defined as in Theorem 2.
1404
+ Then:
1405
+
1406
+ t ηt E[�
1407
+ k∈At qk(Fk(wt−Jt,0) − Fk(wt,0))]
1408
+ ≤ EDGQ �
1409
+ t Jtη2
1410
+ t−Jt
1411
+ �N
1412
+ k=1 πkqk :=
1413
+ C3
1414
+ ln(1/λ(P )) < +∞.
1415
+ For the proof of Lemma 3, we introduce the following results:
1416
+ |Fk(v) − Fk(w)| ≤ D · ∥v − w∥ , ∀v, w ∈ W,
1417
+ (22)
1418
+ EBk
1419
+ t,0,...,Bk
1420
+ t,E−1 ∥wt+1,0 − wt,0∥ ≤ ηtGE(�
1421
+ k∈At qk). (23)
1422
+ Equation (22) is due to convexity of {Fk}k∈K, which gives:
1423
+ ⟨∇Fk(v), v − w⟩ ≤ ∥Fk(v) − Fk(w)∥ ≤ ⟨∇Fk(w), v − w⟩;
1424
+ the Cauchy–Schwarz inequality concludes:
1425
+ |Fk(v) − Fk(w)| ≤ max{∥∇Fk(v)∥ , ∥∇Fk(w)∥} ∥v − w∥
1426
+ ≤ D · ∥v − w∥ .
1427
+ Equation (23) follows combining equations (7) and (21):
1428
+ EB|At ∥wt+1,0 − wt,0∥ ≤
1429
+ ≤ ηt EB|At
1430
+ ����
1431
+ k∈At qk
1432
+ �E−1
1433
+ j=0 ∇Fk(wk
1434
+ t,j, ξk
1435
+ t,j)
1436
+ ���
1437
+ ≤ ηt
1438
+
1439
+ k∈At qk
1440
+ �E−1
1441
+ j=0 EB|At
1442
+ ��∇Fk(wk
1443
+ t,j, ξk
1444
+ t,j)
1445
+ ��
1446
+ ≤ ηtGE(�
1447
+ k∈At qk).
1448
+ Proof of Lemma 3. The evolution of the local objectives after
1449
+ Jt communication rounds is bounded:
1450
+
1451
+ tηt E[�
1452
+ k∈At qk(Fk(wt−Jt,0) − Fk(wt,0))]
1453
+ (a)
1454
+ ≤ D �
1455
+ t ηt E[�
1456
+ k∈At qk EB ∥wt−Jt,0 − wt,0∥]
1457
+ (b)
1458
+ ≤ D �
1459
+ t ηt
1460
+ �t−1
1461
+ d=t−Jt E[�
1462
+ k∈At qk EB ∥wd,0 − wd+1,0∥]
1463
+ (c)
1464
+ ≤ EDG �
1465
+ t
1466
+ �t−1
1467
+ d=t−Jt ηtηd E[�
1468
+ k∈At qk
1469
+
1470
+ k′∈Ad qk′]
1471
+ (d)
1472
+ ≤ EDG
1473
+ 2
1474
+
1475
+ t
1476
+ �t−1
1477
+ d=t−Jt(η2
1478
+ t + η2
1479
+ d) E[�
1480
+ k∈At qk
1481
+
1482
+ k′∈Ad qk′]
1483
+ (e)
1484
+ ≤ EDGQ �
1485
+ t Jtη2
1486
+ t−Jt
1487
+ �N
1488
+ k=1 πkqk :=
1489
+ C3
1490
+ ln(1/λ(P )),
1491
+ 9
1492
+
1493
+ where (a) follows from (22); (b) applies the triangle inequal-
1494
+ ity; (c) uses (23); (d) applies the Cauchy–Schwarz inequality;
1495
+ (e) uses ηt < ηd ≤ ηt−Jt and �N
1496
+ k=1 qk = Q.
1497
+ 3) Core of the proof: The proof consists in two main steps:
1498
+ 1. �
1499
+ t ηt
1500
+ �N
1501
+ k=1 πkqk E[FB(wt−Jt,0) − F ∗
1502
+ B)]≤C2+
1503
+ C3
1504
+ ln(1/λ(P ));
1505
+ 2. �
1506
+ t ηt
1507
+ �N
1508
+ k=1 πkqk E[FB(wt,0)−FB(wt−Jt,0)]≤
1509
+ C3
1510
+ ln(1/λ(P )).
1511
+ Step 1. Combining Lemma 2 and 3, we get:
1512
+
1513
+ t ηt E[ �
1514
+ k∈At
1515
+ qk(Fk(wt−Jt,0) − Fk(w∗
1516
+ B))] ≤ C1 +
1517
+ C3
1518
+ ln(1/λ(P )).
1519
+ The constant Jt, introduced in [14], is an important parameter
1520
+ for the analysis and frequently used. Combining its definition
1521
+ in Theorem 2 and equation (5), it follows:
1522
+ ��[P Jt]i,j − πj
1523
+ �� ≤ CP λ(P )Jt ≤
1524
+ 1
1525
+ 2Ht,
1526
+ ∀i, j ∈ [M].
1527
+ (24)
1528
+ Assume t ≥ TP . We derive an important lower bound:
1529
+ EAt|At−Jt [�
1530
+ k∈At qk(Fk(wt−Jt,0) − Fk(w∗
1531
+ B))]
1532
+ (a)= �M
1533
+ I=1 P(At=I|At−Jt) �
1534
+ k∈I qk(Fk(wt−Jt,0)−Fk(w∗
1535
+ B))
1536
+ (b)= �M
1537
+ I=1 [P Jt]At−Jt,I
1538
+
1539
+ k∈I qk (Fk(wt−Jt,0) − Fk(w∗
1540
+ B))
1541
+ (c)
1542
+ ≥ �M
1543
+ I=1
1544
+
1545
+ π(I) −
1546
+ 1
1547
+ 2Ht
1548
+ � �
1549
+ k∈I qk(Fk(wt−Jt,0) − Fk(w∗
1550
+ B))
1551
+ (d)
1552
+ ≥ (�N
1553
+ k=1 πkqk) · (FB(wt−Jt,0) − F ∗
1554
+ B) − 1
1555
+ 2tMQ,
1556
+ (25)
1557
+ where (a) is the definition of the conditional expectation, (b)
1558
+ uses the Markov property, (c) follows from (24), and (d) is
1559
+ due to (8). Taking total expectations:
1560
+ ( �N
1561
+ k=1 πkqk) �
1562
+ t ηt E[FB(wt−Jt,0) − F ∗
1563
+ B]
1564
+ ≤ �
1565
+ t ηt E[�
1566
+ k∈At qk(Fk(wt−Jt,0) − Fk(w∗
1567
+ B))]
1568
+ + 1
1569
+ 4MQ �
1570
+ t(η2
1571
+ t + 1
1572
+ t2 ) = C2 +
1573
+ C3
1574
+ ln(1/λ(P )),
1575
+ (26)
1576
+ where C2 = C1 + 1
1577
+ 4MQ �
1578
+ t(η2
1579
+ t + 1
1580
+ t2 ).
1581
+ Step 2. By direct calculation (similar to Lemma 3):
1582
+ (�N
1583
+ k=1 πkqk) �
1584
+ t ηt E[FB(wt,0) − FB(wt−Jt,0)]≤
1585
+ C3
1586
+ ln(1/λ(P )).
1587
+ Summing Step 1 and 2, and applying Jensen’s inequality:
1588
+ (�T
1589
+ t=1 ηt)(�N
1590
+ k=1 πkqk) E[FB( ¯wT,0) − F ∗
1591
+ B] ≤
1592
+ (�N
1593
+ k=1 πkqk) �T
1594
+ t=1 ηt E[FB(wt,0) − F ∗
1595
+ B] ≤ C2 +
1596
+ 2C3
1597
+ ln(1/λ(P )),
1598
+ where ¯wT,0 :=
1599
+ �T
1600
+ t=1 ηtwt,0
1601
+ �T
1602
+ t=1 ηt
1603
+ , and the constants are in (12).
1604
+ C. Proof of Theorem 3
1605
+ It follows the same lines of Theorem 1, developing (20) as:
1606
+ ∥∇F(w∗
1607
+ B)∥ ≤ L
1608
+
1609
+ 2
1610
+ µ
1611
+ �N
1612
+ k=1|αk − pk|
1613
+
1614
+ (Fk(w∗
1615
+ B) − F ∗
1616
+ k )
1617
+ ≤ 2L
1618
+
1619
+ 2
1620
+ µdT V (α, p)
1621
+
1622
+ Γ′,
1623
+ where dT V (α, p) := 1
1624
+ 2
1625
+ �N
1626
+ k=1|αk − pk| is the total variation
1627
+ distance between the probability measures α and p.
1628
+ D. Minimizing ϵopt
1629
+ Equation 12 defines the following optimization problem:
1630
+ minimize
1631
+ q
1632
+ f(q) =
1633
+ 1
1634
+ 2 q⊺Aq+B
1635
+ π⊺q
1636
+ + C;
1637
+ subject to
1638
+ q ≥ 0,
1639
+ π⊺q > 0,
1640
+ ∥q∥1 = Q.
1641
+ Let us rewrite the problem by adding a variable s := 1/π⊺q
1642
+ and then replacing y := sq. Note that the objective function is
1643
+ the perspective of a convex function, and is therefore convex:
1644
+ min
1645
+ y,s
1646
+ f(y, s) =
1647
+ 1
1648
+ 2sy⊺Ay + Bs + C
1649
+ (27a)
1650
+ s.t.
1651
+ y ≥ 0, s > 0, π⊺y = 1, ∥y∥1 = Qs.
1652
+ (27b)
1653
+ The Lagrangian function L is as follows:
1654
+ L(y, s, λ, θ, µ) =
1655
+ 1
1656
+ 2sy⊺Ay + Bs + C+
1657
+ +λ(1 − π⊺y) + θ(∥y∥1 − Qs) − µ⊺y.
1658
+ (28)
1659
+ Since the constraint s > 0 defines an open set, the set defined
1660
+ by the constraints in (27b) is not closed. However, the solution
1661
+ is never on the boundary s = 0 because L∗ → +∞ as s → 0+,
1662
+ and we can consider s ≥ 0. The KKT conditions for y∗
1663
+ k read:
1664
+ if y∗
1665
+ k > 0: y∗
1666
+ k =
1667
+ s∗
1668
+ A[kk](λ∗πk − θ∗); y∗
1669
+ k = 0 otherwise. (29)
1670
+ Since λ∗ ≥ 0, the clients with smaller πk may have q∗
1671
+ k = 0.
1672
+ E. Convexity of ϵopt + ϵbias
1673
+ In Appendix D, we proved that ϵopt(q) is convex. To prove
1674
+ that ϵbias(q) is also convex, we need to study the convexity of
1675
+ χ2
1676
+ α∥p = �N
1677
+ k=1(fk ◦ gk)(q), where fk(pk) = (pk − αk)2/pk,
1678
+ and gk(q) = (πkqk)/ �N
1679
+ h=1 πhqh. We observe that fk(pk)
1680
+ is convex, and gk(q) is a particular case of linear-fractional
1681
+ function [38]. By direct inspection, it can be proved that
1682
+ (fk◦gk)(q) is convex in dom(fk◦gk) = {q : ∥q∥1 = Q > 0}.
1683
+ F. Synthetic dataset
1684
+ Our synthetic datasets has been generated as follows:
1685
+ 1) For client k ∈ K, sample group identity ik from a
1686
+ Bernoulli distribution of parameter 1/2;
1687
+ 2) Sample model parameters w∗ ∼ N(0, Id) from the d-
1688
+ dimensional normal distribution;
1689
+ 3) For client k ∈ K and sample index j ∈ {1, . . . , 150},
1690
+ sample clients input data x(j)
1691
+ k
1692
+ ∼ N(0, Id) from the d-
1693
+ dimensional normal distribution;
1694
+ 4) For client k ∈ K such that ik = 0 and sample index j ∈
1695
+ {1, . . . , 150}, sample the true labels y(j)
1696
+ k
1697
+ from a Bernoulli
1698
+ distribution with parameter equal to sigmoid(⟨w∗, x(j)
1699
+ k ⟩);
1700
+ 5) For client k
1701
+
1702
+ K such that ik
1703
+ =
1704
+ 1 and sample
1705
+ index j ∈ {1, . . . , 150}, sample the true labels y(j)
1706
+ k
1707
+ from a Bernoulli distribution with parameter equal to
1708
+ 0.8·sigmoid(⟨w∗, x(j)
1709
+ k ⟩)+0.2·(1−sigmoid(⟨w∗, x(j)
1710
+ k ⟩)).
1711
+ 10
1712
+
1713
+ REFERENCES
1714
+ [1] J. Verbraeken, M. Wolting, J. Katzy, J. Kloppenburg, T. Verbelen, and
1715
+ J. S. Rellermeyer, “A survey on distributed machine learning,” ACM
1716
+ Computing Surveys (CSUR), vol. 53, no. 2, pp. 1–33, 2020.
1717
+ [2] S. Wang, T. Tuor, T. Salonidis, K. K. Leung, C. Makaya, T. He, and
1718
+ K. Chan, “When edge meets learning: Adaptive control for resource-
1719
+ constrained distributed machine learning,” in IEEE INFOCOM 2018-
1720
+ IEEE Conference on Computer Communications.
1721
+ IEEE, 2018, pp. 63–
1722
+ 71.
1723
+ [3] J. Koneˇcn´y, H. B. McMahan, F. X. Yu, P. Richtarik, A. T. Suresh, and
1724
+ D. Bacon, “Federated learning: Strategies for improving communication
1725
+ efficiency,” in NIPS Workshop on Private Multi-Party Machine Learning,
1726
+ 2016, https://arxiv.org/abs/1610.05492.
1727
+ [4] B. McMahan, E. Moore, D. Ramage, S. Hampson, and B. A. y Arcas,
1728
+ “Communication-efficient learning of deep networks from decentralized
1729
+ data,” in Artificial Intelligence and Statistics.
1730
+ PMLR, 2017, pp. 1273–
1731
+ 1282.
1732
+ [5] P. Kairouz, H. B. McMahan, B. Avent, A. Bellet, M. Bennis, A. N.
1733
+ Bhagoji, K. Bonawitz, Z. Charles, G. Cormode, R. Cummings et al.,
1734
+ “Advances and open problems in federated learning,” Foundations and
1735
+ Trends® in Machine Learning, vol. 14, no. 1–2, pp. 1–210, 2021.
1736
+ [6] T. Li, A. K. Sahu, A. Talwalkar, and V. Smith, “Federated learning:
1737
+ Challenges, methods, and future directions,” IEEE Signal Processing
1738
+ Magazine, vol. 37, no. 3, pp. 50–60, 2020.
1739
+ [7] H. Eichner, T. Koren, B. McMahan, N. Srebro, and K. Talwar, “Semi-
1740
+ cyclic stochastic gradient descent,” in International Conference on
1741
+ Machine Learning.
1742
+ PMLR, 2019, pp. 1764–1773.
1743
+ [8] J. Wang, Z. Charles, Z. Xu, G. Joshi, H. B. McMahan, M. Al-Shedivat,
1744
+ G. Andrew, S. Avestimehr, K. Daly, D. Data et al., “A field guide to
1745
+ federated optimization,” arXiv preprint arXiv:2107.06917, 2021.
1746
+ [9] K. Bonawitz, H. Eichner, W. Grieskamp, D. Huba, A. Ingerman,
1747
+ V. Ivanov, C. Kiddon, J. Koneˇcn`y, S. Mazzocchi, B. McMahan et al.,
1748
+ “Towards federated learning at scale: System design,” Proceedings of
1749
+ Machine Learning and Systems, vol. 1, pp. 374–388, 2019.
1750
+ [10] Y. Ding, C. Niu, Y. Yan, Z. Zheng, F. Wu, G. Chen, S. Tang, and
1751
+ R. Jia, “Distributed optimization over block-cyclic data,” arXiv preprint
1752
+ arXiv:2002.07454, 2020.
1753
+ [11] C. Zhu, Z. Xu, M. Chen, J. Koneˇcn`y, A. Hard, and T. Goldstein, “Diurnal
1754
+ or Nocturnal? Federated Learning from Periodically Shifting Distribu-
1755
+ tions,” in NeurIPS 2021 Workshop on Distribution Shifts: Connecting
1756
+ Methods and Applications, 2021.
1757
+ [12] T. T. Doan, “Local stochastic approximation: A unified view of federated
1758
+ learning and distributed multi-task reinforcement learning algorithms,”
1759
+ arXiv preprint arXiv:2006.13460, 2020.
1760
+ [13] T. T. Doan, L. M. Nguyen, N. H. Pham, and J. Romberg, “Conver-
1761
+ gence rates of accelerated Markov gradient descent with applications in
1762
+ reinforcement learning,” arXiv preprint arXiv:2002.02873, 2020.
1763
+ [14] T. Sun, Y. Sun, and W. Yin, “On Markov chain gradient descent,”
1764
+ Advances in neural information processing systems, vol. 31, 2018.
1765
+ [15] M. McCloskey and N. J. Cohen, “Catastrophic Interference in Connec-
1766
+ tionist Networks: The Sequential Learning Problem,” in Psychology of
1767
+ Learning and Motivation, G. H. Bower, Ed.
1768
+ Academic Press, 1989,
1769
+ vol. 24, pp. 109–165.
1770
+ [16] J. Kirkpatrick, R. Pascanu, N. Rabinowitz, J. Veness, G. Desjardins,
1771
+ A. A. Rusu, K. Milan, J. Quan, T. Ramalho, A. Grabska-Barwinska
1772
+ et al., “Overcoming catastrophic forgetting in neural networks,” Pro-
1773
+ ceedings of the National Academy of Sciences, vol. 114, no. 13, pp.
1774
+ 3521–3526, 2017.
1775
+ [17] M. Tang, X. Ning, Y. Wang, J. Sun, Y. Wang, H. Li, and Y. Chen,
1776
+ “FedCor: Correlation-Based Active Client Selection Strategy for Het-
1777
+ erogeneous Federated Learning,” in Proceedings of the IEEE/CVF
1778
+ Conference on Computer Vision and Pattern Recognition, 2022.
1779
+ [18] M. Ribero, H. Vikalo, and G. De Veciana, “Federated Learning Un-
1780
+ der Intermittent Client Availability and Time-Varying Communication
1781
+ Constraints,” arXiv preprint arXiv:2205.06730, 2022.
1782
+ [19] L. Tan, X. Zhang, Y. Zhou, X. Che, M. Hu, X. Chen, and D. Wu,
1783
+ “AdaFed: Optimizing Participation-Aware Federated Learning with
1784
+ Adaptive Aggregation Weights,” IEEE Transactions on Network Science
1785
+ and Engineering, 2022.
1786
+ [20] A. Nichol, J. Achiam, and J. Schulman, “On first-order meta-learning
1787
+ algorithms,” arXiv preprint arXiv:1803.02999, 2018.
1788
+ [21] S. J. Reddi, Z. Charles, M. Zaheer, Z. Garrett, K. Rush, J. Koneˇcn´y,
1789
+ S. Kumar, and H. B. McMahan, “Adaptive Federated Optimization,” in
1790
+ International Conference on Learning Representations, 2021.
1791
+ [22] X. Li, K. Huang, W. Yang, S. Wang, and Z. Zhang, “On the Convergence
1792
+ of FedAvg on Non-IID Data,” in International Conference on Learning
1793
+ Representations, 2019.
1794
+ [23] T. Li, A. K. Sahu, M. Zaheer, M. Sanjabi, A. Talwalkar, and V. Smith,
1795
+ “Federated optimization in heterogeneous networks,” Proceedings of
1796
+ Machine Learning and Systems, vol. 2, pp. 429–450, 2020.
1797
+ [24] W. Chen, S. Horvath, and P. Richtarik, “Optimal client sampling for
1798
+ federated learning,” arXiv preprint arXiv:2010.13723, 2020.
1799
+ [25] Y. Fraboni, R. Vidal, L. Kameni, and M. Lorenzi, “Clustered sampling:
1800
+ Low-variance and improved representativity for clients selection in
1801
+ federated learning,” in International Conference on Machine Learning.
1802
+ PMLR, 2021, pp. 3407–3416.
1803
+ [26] Y. Jee Cho, J. Wang, and G. Joshi, “Towards Understanding Biased
1804
+ Client Selection in Federated Learning,” in Proceedings of The 25th In-
1805
+ ternational Conference on Artificial Intelligence and Statistics.
1806
+ PMLR,
1807
+ 2022, pp. 10 351–10 375.
1808
+ [27] T. T. Doan, L. M. Nguyen, N. H. Pham, and J. Romberg, “Finite-time
1809
+ analysis of stochastic gradient descent under Markov randomness,” arXiv
1810
+ preprint arXiv:2003.10973, 2020.
1811
+ [28] A. Meyers and H. Yang, “Markov Chains for Fault-Tolerance Modeling
1812
+ of Stochastic Networks,” IEEE Transactions on Automation Science and
1813
+ Engineering, 2021.
1814
+ [29] H. Olle, P. Yuval, and E. S. Jeffrey, “Dynamical percolation,” in Annales
1815
+ de l’Institut Henri Poincare (B) Probability and Statistics, vol. 33, no. 4.
1816
+ Elsevier, 1997, pp. 497–528.
1817
+ [30] D. A. Levin and Y. Peres, Markov chains and mixing times.
1818
+ American
1819
+ Mathematical Soc., 2017, vol. 107.
1820
+ [31] L. Bottou, F. E. Curtis, and J. Nocedal, “Optimization methods for large-
1821
+ scale machine learning,” Siam Review, vol. 60, no. 2, pp. 223–311, 2018.
1822
+ [32] J. Wang, Q. Liu, H. Liang, G. Joshi, and H. V. Poor, “Tackling the
1823
+ objective inconsistency problem in heterogeneous federated optimiza-
1824
+ tion,” Advances in neural information processing systems, vol. 33, pp.
1825
+ 7611–7623, 2020.
1826
+ [33] I. J. Goodfellow, M. Mirza, D. Xiao, A. Courville, and Y. Bengio, “An
1827
+ empirical investigation of catastrophic forgetting in gradient-based neu-
1828
+ ral networks,” in International Conference on Learning Representations,
1829
+ 2013, arXiv preprint arXiv:1312.6211.
1830
+ [34] R. Kemker, M. McClure, A. Abitino, T. Hayes, and C. Kanan, “Mea-
1831
+ suring catastrophic forgetting in neural networks,” in Proceedings of the
1832
+ AAAI Conference on Artificial Intelligence, vol. 32, no. 1, 2018.
1833
+ [35] Y. LeCun and C. Cortes, “MNIST handwritten digit database,” 2010.
1834
+ [36] F. Sattler, K.-R. M¨uller, and W. Samek, “Clustered federated learning:
1835
+ Model-agnostic distributed multitask optimization under privacy con-
1836
+ straints,” IEEE Transactions on Neural Networks and Learning Systems,
1837
+ vol. 32, no. 8, pp. 3710–3722, 2020.
1838
+ [37] H. Ludwig and N. Baracaldo, Federated Learning: A Comprehensive
1839
+ Overview of Methods and Applications.
1840
+ Springer Cham, 2022.
1841
+ [38] S. Boyd and L. Vandenberghe, Convex optimization.
1842
+ Cambridge
1843
+ university press, 2004.
1844
+ 11
1845
+
3dE3T4oBgHgl3EQfoQqU/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
4dFKT4oBgHgl3EQf9S5d/content/tmp_files/2301.11953v1.pdf.txt ADDED
@@ -0,0 +1,681 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FANO 4-FOLDS WITH b2 > 12 ARE PRODUCTS OF SURFACES
2
+ C. CASAGRANDE
3
+ Dedicated to Lorenzo, Sabrina, and Fabrizio
4
+ Abstract. Let X be a smooth, complex Fano 4-fold, and ρX its Picard num-
5
+ ber. We show that if ρX > 12, then X is a product of del Pezzo surfaces. The
6
+ proof relies on a careful study of divisorial elementary contractions f : X → Y
7
+ such that dim f(Exc(f)) = 2, together with the author’s previous work on Fano
8
+ 4-folds. In particular, given f : X → Y as above, under suitable assumptions we
9
+ show that S := f(Exc(f)) is a smooth del Pezzo surface with −KS = (−KY )|S.
10
+ 1. Introduction
11
+ Smooth, complex Fano varieties have been classically intensively studied, and
12
+ have attracted a lot of attention also in the last decades, due to their role in
13
+ the framework of the Minimal Model Program. The Fano condition is a natural
14
+ positivity condition of the tangent bundle, and it ensures a rich geometry, from
15
+ both the points of view of birational geometry and of families of rational curves.
16
+ It has been known since the 90’s that Fano varieties form a bounded family in
17
+ each dimension. Del Pezzo surfaces are known classically, and the classification
18
+ of Fano 3-folds have been in achieved in the 80’s, there are 105 families.
19
+ Starting from dimension 4, there are probably too many families to get a com-
20
+ plete classification; still we aim to better understand and describe the behavior
21
+ and properties of these varieties. In this paper we focus on Fano 4-folds X with
22
+ “large” Picard number ρX; let us recall that since X is Fano, ρX is equal to the
23
+ second Betti number b2(X). We show the following result.
24
+ Theorem 1.1. Let X be a smooth Fano 4-fold with ρX > 12. Then X ∼= S1 ×S2,
25
+ where Si are del Pezzo surfaces.
26
+ To the author’s knowledge, all known examples of Fano 4-folds which are not
27
+ products of surfaces have ρ ≤ 9, so that we do not know whether the condition
28
+ ρ > 12 in Th. 1.1 is sharp. We refer the reader to [Cas22b, §6] for an overview
29
+ of known Fano 4-folds with ρ ≥ 6; there are few examples and it is an interesting
30
+ problem to construct new ones.
31
+ As ρS1×S2 = ρS1 + ρS2, and del Pezzo surfaces have ρ ≤ 9, Th. 1.1 implies the
32
+ following.
33
+ Corollary 1.2. Let X be a smooth Fano 4-fold. Then ρX ≤ 18.
34
+ 2020 Mathematics Subject Classification. 14J45,14J35,14E30.
35
+ 1
36
+ arXiv:2301.11953v1 [math.AG] 27 Jan 2023
37
+
38
+ 2
39
+ C. CASAGRANDE
40
+ Let us note that Th. 1.1 and Cor. 1.2 generalize to dimension 4 the analogous
41
+ result for Fano 3-folds, established by Mori and Mukai in the 80’s:
42
+ Theorem 1.3 ([MM86], Th. 1.2). Let X be a smooth Fano 3-fold with ρX > 5.
43
+ Then X ∼= S × P1 where S is a del Pezzo surface. In particular ρX ≤ 10.
44
+ The proof of Th. 1.1 relies on a careful study of elementary contractions of X of
45
+ type (3, 2), together with the author’s previous work on Fano 4-folds. To explain
46
+ this, let us introduce some notation.
47
+ Let X be a Fano 4-fold. A contraction is a surjective morphism f : X → Y , with
48
+ connected fibers, where Y is normal and projective; f is elementary if ρX−ρY = 1.
49
+ As usual, an elementary contraction can be of fiber type, divisorial, or small.
50
+ We say that an elementary contraction f : X → Y is of type (3, 2) if it is
51
+ divisorial with dim S = 2, where E := Exc(f) and S := f(E) ⊂ Y . Such f
52
+ can have at most finitely many 2-dimensional fibers; outside the images of these
53
+ fibers, Y and S are smooth, and f is just the blow-up of the surface S. If y0 ∈ S is
54
+ the image of a two-dimensional fiber, then either Y or S are singular at y0; these
55
+ singularities have been described by Andreatta and Wi´sniewski, see Th. 2.1. In
56
+ any case, Y has at most isolated locally factorial and terminal singularities, while
57
+ S can be not normal.
58
+ We denote by N1(X) the real vector space of one-cycles with real coefficients,
59
+ modulo numerical equivalence; we have dim N1(X) = ρX. For any closed subset
60
+ Z ⊂ X, we set
61
+ N1(Z, X) := ι∗(N1(Z)) ⊂ N1(X)
62
+ where ι: Z �→ X is the inclusion, so that N1(Z, X) is the subspace of N1(X)
63
+ spanned by classes of curves in Z, and dim N1(Z, X) ≤ ρZ.
64
+ We study an elementary contraction f : X → Y of type (3, 2) under the hy-
65
+ pothesis that:
66
+ dim N1(E, X) ≥ 4.
67
+ In particular this implies that Y is Fano too (Lemma 2.2).
68
+ We would like to compare (−KY )|S to −KS, but since S may be singular,
69
+ we consider the minimal resolution of singularities µ: S′ → S and set L :=
70
+ µ∗((−KY )|S), a nef and big divisor class on S′. We show that KS′+L is semiample
71
+ (Lemma 3.1). Then our strategy is to look for curves in S′ on which KS′ + L is
72
+ trivial, using other elementary contractions of X of type (3, 2) whose exceptional
73
+ divisor intersects E in a suitable way.
74
+ Hence let us assume that X has another elementary contraction g1 of type (3, 2)
75
+ whose exceptional divisor E1 intersects E, and such that E ·Γ1 = 0 for a curve Γ1
76
+ contracted by g1. Set D := f(E1) ⊂ Y . We show that an irreducible component
77
+ C1 of D ∩ S is a (−1)-curve contained in the smooth locus Sreg, and such that
78
+ −KY · C1 = 1 (Lemma 3.2, see Fig. 3.1 on p. 7). If C′
79
+ 1 ⊂ S′ is the transform of
80
+ C1, we have (KS′ + L) · C′
81
+ 1 = 0.
82
+ Finally let us assume that X has three elementary contractions g1, g2, g3, all of
83
+ type (3, 2), satisfying the same assumptions as g1 above. We also assume that
84
+
85
+ FANO 4-FOLDS WITH b2 > 12 ARE PRODUCTS
86
+ 3
87
+ E1 · Γ2 > 0 and E1 · Γ3 > 0, where E1 = Exc(g1) and Γ2, Γ3 are curves contracted
88
+ by g2, g3 respectively. Then we show that S is a smooth del Pezzo surface with
89
+ −KS = (−KY )|S (Th. 3.6 and Prop. 3.10); let us give an overview of the proof.
90
+ The previous construction yields three distinct (−1)-curves C′
91
+ 1, C′
92
+ 2, C′
93
+ 3 ⊂ S′ such
94
+ that (KS′ + L) · C′
95
+ i = 0 and C′
96
+ 1 intersects both C′
97
+ 2 and C′
98
+ 3. This shows that the
99
+ contraction of S′ given by KS′ +L cannot be birational, namely KS′ +L is not big.
100
+ We also rule out the possibility of a contraction onto a curve, and conclude that
101
+ KS′ + L ≡ 0. Finally we show that ωS ∼= OY (KY )|S, where ωS is the dualizing
102
+ sheaf of S, and conclude that S is smooth and del Pezzo.
103
+ We believe that these results can be useful in the study of Fano 4-folds besides
104
+ their use in the present work. It would be interesting to generalize this technique
105
+ to higher dimensions.
106
+ Let us now explain how we use these results to prove Th. 1.1. We define the
107
+ Lefschetz defect of X as:
108
+ δX := max
109
+
110
+ codim N1(D, X) | D ⊂ X a prime divisor
111
+
112
+ .
113
+ This invariant, introduced in [Cas12], measures the difference between the Picard
114
+ number of X and that of its prime divisors; we refer the reader to [Cas22b] for a
115
+ survey on δX.
116
+ Fano 4-folds with δX ≥ 3 are classified, as follows.
117
+ Theorem 1.4 ([Cas12], Th. 3.3). Let X be a smooth Fano 4-fold. If δX ≥ 4,
118
+ then X ∼= S1 × S2 where Si are del Pezzo surfaces, and δX = maxi ρSi − 1.
119
+ Theorem 1.5 ([CRS22], Prop. 1.5). Smooth Fano 4-folds with δX = 3 are clas-
120
+ sified. They have 5 ≤ ρX ≤ 8, and if ρX ∈ {7, 8} then X is a product of surfaces.
121
+ Therefore in our study of Fano 4-folds we can assume that δX ≤ 2, that is,
122
+ codim N1(D, X) ≤ 2 for every prime divisor D ⊂ X. To prove that ρX ≤ 12, we
123
+ look for a prime divisor D ⊂ X with dim N1(D, X) ≤ 10.
124
+ To produce such a divisor, we look at contractions of X. If X has an elementary
125
+ contraction of fiber type, or a divisorial elementary contraction f : X → Y with
126
+ dim f(Exc(f)) ≤ 1, it is not difficult to find a prime divisor D ⊂ X such that
127
+ dim N1(D, X) ≤ 3, hence ρX ≤ 5 (Lemmas 2.5 and 2.6).
128
+ The case where X has a small elementary contraction is much harder and is
129
+ treated in [Cas22a], where the following result is proven.
130
+ Theorem 1.6 ([Cas22a], Th. 1.1). Let X be a smooth Fano 4-fold. If X has a
131
+ small elementary contraction, then ρX ≤ 12.
132
+ We are left with the case where every elementary contraction f : X → Y is of
133
+ type (3, 2). In this case we show (Th. 4.1) that, if ρX ≥ 8, we can apply our
134
+ previous study of elementary contractions of type (3, 2), so that if E := Exc(f)
135
+ and S := f(E) ⊂ Y , then S is a smooth del Pezzo surface. This implies that
136
+ dim N1(S, Y ) ≤ ρS ≤ 9, dim N1(E, X) = dim N1(S, Y ) + 1 ≤ 10, and finally that
137
+ ρX ≤ 12, proving Th. 1.1.
138
+
139
+ 4
140
+ C. CASAGRANDE
141
+ The structure of the paper is as follows. In §2 we gather some preliminary
142
+ results.
143
+ Then in §3 we develop our study of elementary contractions of type
144
+ (3, 2), while in §4 we prove Th. 1.1.
145
+ 1.1. Notation
146
+ We work over the field of complex numbers. Let X be a projective variety.
147
+ We denote by N1(X) (respectively, N 1(X)) the real vector space of one-cycles
148
+ (respectively, Cartier divisors) with real coefficients, modulo numerical equiva-
149
+ lence; dim N1(X) = dim N 1(X) = ρX is the Picard number of X.
150
+ Let C be a one-cycle of X, and D a Cartier divisor. We denote by [C] (respec-
151
+ tively, [D]) the numerical equivalence class in N1(X) (respectively, N 1(X)). We
152
+ also denote by D⊥ ⊂ N1(X) the orthogonal hyperplane to the class [D].
153
+ The symbol ≡ stands for numerical equivalence (for both one-cycles and divi-
154
+ sors), and ∼ stands for linear equivalence of divisors.
155
+ NE(X) ⊂ N1(X) is the convex cone generated by classes of effective curves,
156
+ and NE(X) is its closure. An extremal ray R is a one-dimensional face of NE(X).
157
+ If D is a Cartier divisor in X, we write D·R > 0, D·R = 0, and so on, if D·γ > 0,
158
+ D · γ = 0, and so on, for a non-zero class γ ∈ R. We say that R is K-negative if
159
+ KX · R < 0.
160
+ Suppose that X has terminal and locally factorial singularities, and is Fano.
161
+ Then NE(X) is a convex polyhedral cone. Given a contraction f : X → Y , we
162
+ denote by NE(f) the convex subcone of NE(X) generated by classes of curves
163
+ contracted by f; we recall that there is a bijection between contractions of X
164
+ and faces of NE(X), given by f �→ NE(f). Moreover dim NE(f) = ρX − ρY , in
165
+ particular f is elementary if and only if NE(f) is an extremal ray.
166
+ When dim X = 4, we say that an extremal ray R is of type (3, 2) if the as-
167
+ sociated elementary contraction f is of type (3, 2), namely if f is divisorial with
168
+ dim f(Exc(f)) = 2. We also set ER := Exc(f) and denote by CR ⊂ ER a general
169
+ fiber of f|ER; note that ER · CR = −1.
170
+ We will also consider the cones Eff(X) ⊂ N 1(X) of classes of effective divisors,
171
+ and mov(X) ⊂ N1(X) of classes of curves moving in a family covering X. Since
172
+ X is Fano, both cones are polyhedral; we have the duality relation Eff(X) =
173
+ mov(X)∨.
174
+ 2. Preliminaries
175
+ In this section we gather some preliminary results that will be used in the sequel.
176
+ Andreatta and Wi´sniewski have classified the possible 2-dimensional fibers of
177
+ an elementary contraction of type (3, 2) of a smooth Fano 4-fold. In doing this,
178
+ they also describe precisely the singularities both of the target, and of the image
179
+ of the exceptional divisor, as follows.
180
+ Theorem 2.1 ([AW98], Theorem on p. 256). Let X be a smooth Fano 4-fold and
181
+ f : X → Y an elementary contraction of type (3, 2). Set S := f(Exc(f)).
182
+
183
+ FANO 4-FOLDS WITH b2 > 12 ARE PRODUCTS
184
+ 5
185
+ Then f can have at most finitely many 2-dimensional fibers. Outside the images
186
+ of these fibers, Y and S are smooth, and f is the blow-up of S.
187
+ Let y0 ∈ S ⊂ Y be the image of a 2-dimensional fiber; then one of the following
188
+ holds:
189
+ (i) S is smooth at y0, while Y has an ordinary double point at y0, locally factorial
190
+ and terminal;
191
+ (ii) Y is smooth at y0, while S is singular at y0. More precisely either S is not
192
+ normal at y0, or it has a singularity of type 1
193
+ 3(1, 1) at y0 (as the cone over
194
+ a twisted cubic).
195
+ In particular the singularities of Y are at most isolated, locally factorial, and
196
+ terminal.
197
+ Now we give some simple preliminary results on extremal rays of type (3, 2).
198
+ Lemma 2.2. Let X be a smooth Fano 4-fold and f : X → Y an elementary
199
+ contraction of type (3, 2); set E := Exc(f). If dim N1(E, X) ≥ 4, then E · R ≥ 0
200
+ for every extremal ray R of X different from NE(f), and Y is Fano.
201
+ Proof. It follows from [Cas17, Lemma 2.16 and Rem. 2.17] that NE(f) is the
202
+ unique extremal ray of X having negative intersection with E, −KX + E =
203
+ f ∗(−KY ) is nef, and (−KX + E)⊥ ∩ NE(X) = NE(f), so that −KY is ample.
204
+
205
+ Lemma 2.3. Let X be a smooth Fano 4-fold and R1, R2 extremal rays of X of
206
+ type (3, 2) such that dim N1(ER1, X) ≥ 4 and ER1 · R2 = 0.
207
+ Then ER2 ·R1 = 0 and R1+R2 is a face of NE(X) whose associated contraction
208
+ is birational, with exceptional locus ER1 ∪ ER2.
209
+ Proof. Let H be a nef divisor on X such that H⊥ ∩ NE(X) = R2, and set H′ :=
210
+ H + (H · CR1)ER1. Then H′ · CR1 = H′ · CR2 = 0, and if R3 is an extremal ray
211
+ of NE(X) different from R1 and R2, we have ER1 · R3 ≥ 0 by Lemma 2.2, hence
212
+ H′·R3 > 0. Therefore H′ is nef and (H′)⊥∩NE(X) = R1+R2 is a face of NE(X).
213
+ If Γ ⊂ X is an irreducible curve with [Γ] ∈ R1 + R2, then H′ · Γ = 0, so that
214
+ either ER1 · Γ < 0 and Γ ⊂ ER1, or H · Γ = 0, [Γ] ∈ R2 and Γ ⊂ ER2. This shows
215
+ that the contraction of R1 + R2 is birational with exceptional locus ER1 ∪ ER2.
216
+ Finally we have ER2 · R1 = 0 by [Cas13b, Lemma 2.2(b) and its proof].
217
+
218
+ Lemma 2.4. Let X be a smooth Fano 4-fold and R1, R2 distinct extremal rays of
219
+ X of type (3, 2) with dim N1(ERi, X) ≥ 4 for i = 1, 2. If there exists a birational
220
+ contraction g: X → Z with R1, R2 ⊂ NE(g), then ER1 · R2 = ER2 · R1 = 0.
221
+ Proof. We note first of all that ERi · Rj ≥ 0 for i ̸= j by Lemma 2.2. Suppose
222
+ that ER1 · R2 > 0. Then ER1 · (CR1 + CR2) = ER1 · CR2 − 1 ≥ 0. Moreover
223
+ ER2 · R1 > 0 by Lemma 2.3, so that ER2 · (CR1 + CR2) ≥ 0. On the other hand
224
+ for every prime divisor D different from ER1, ER2 we have D · (CR1 + CR2) ≥ 0,
225
+ therefore [CR1 + CR2] ∈ Eff(X)∨ = mov(X). Since [CR1 + CR2] ∈ NE(g), g should
226
+ be of fiber type, a contradiction.
227
+
228
+
229
+ 6
230
+ C. CASAGRANDE
231
+ Lemma 2.5. Let X be a smooth Fano 4-fold with δX ≤ 2, and g: X → Z a
232
+ contraction of fiber type. Then ρZ ≤ 4.
233
+ Proof. This follows from [Cas12]; for the reader’s convenience we report the proof.
234
+ If dim Z ≤ 1, then ρZ ≤ 1. If Z is a surface, take any prime divisor D ⊂ X such
235
+ that g(D) ⊊ Z, so that N1(g(D), Z) = {0} if g(D) = {pt}, and N1(g(D), Z) =
236
+ R[g(D)] if g(D) is a curve. Consider the pushforward of one-cycles g∗ : N1(X) →
237
+ N1(Z), and note that dim ker g∗ = ρX−ρZ. We have g∗(N1(D, X)) = N1(g(D), Z)
238
+ and dim N1(g(D), Z) ≤ 1, thus codim N1(D, X) ≥ ρZ − 1, and δX ≤ 2 yields
239
+ ρZ ≤ 3.
240
+ If dim Z = 3, then as in [Cas12, proof of Cor. 1.6] one shows that there exists
241
+ a prime divisor D ⊂ X such that dim N1(g(D), Z) ≤ 2, and reasoning as before
242
+ we get ρZ ≤ 4.
243
+
244
+ Lemma 2.6 ([Cas17], Rem. 2.17(1)). Let X be a smooth Fano 4-fold. If X has
245
+ a divisorial elementary contraction not of type (3, 2), then ρX ≤ 5.
246
+ 3. Showing that S is a del Pezzo surface
247
+ In this section we study elementary contractions of type (3, 2) of a Fano 4-fold. We
248
+ focus on the surface S which is the image of the exceptional divisor; as explained
249
+ in the Introduction, our goal is to show that under suitable assumptions, S is a
250
+ smooth del Pezzo surface.
251
+ Recall that S has isolated singularities by Th. 2.1.
252
+ Lemma 3.1. Let X be a smooth Fano 4-fold and f : X → Y an elementary
253
+ contraction of type (3, 2). Set E := Exc(f) and S := f(E), and assume that
254
+ dim N1(E, X) ≥ 4.
255
+ Let µ: S′ → S be the minimal resolution of singularities, and set L := µ∗((−KY )|S).
256
+ Then KS′ + L is semiample.
257
+ Proof. Note that −KY is Cartier by Th. 2.1, and ample by Lemma 2.2, so that
258
+ L is nef and big on S′, and for every irreducible curve Γ ⊂ S′, we have L · Γ = 0
259
+ if and only if Γ is µ-exceptional.
260
+ Consider the pushforward of one-cycles f∗ : N1(X) → N1(Y ). Then f∗(N1(E, X)) =
261
+ N1(S, Y ), therefore ρS′ ≥ ρS ≥ dim N1(S, Y ) ≥ 3.
262
+ Let R be a KS′-negative extremal ray of NE(S′). The contraction associated
263
+ to R can be onto a point (if S′ ∼= P2), onto a curve (so that ρS′ = 2), or the
264
+ blow-up of a smooth point (see for instance [Mat02, Th. 1-4-8]). Since ρS′ > 2, R
265
+ is generated by the class of a (−1)-curve Γ, that cannot be µ-exceptional, because
266
+ µ is minimal. Then L · Γ > 0 and (KS′ + L) · Γ = L · Γ − 1 ≥ 0.
267
+ Moreover, if γ ∈ NE(S′)KS′≥0, then (KS′ + L) · γ = KS′ · γ + L · γ ≥ 0.
268
+ By the Cone Theorem, we conclude that KS′+L is nef on S′, and also semiample
269
+ by the Base-Point-Free Theorem.
270
+
271
+ Lemma 3.2. Let X be a smooth Fano 4-fold and f : X → Y an elementary
272
+ contraction of type (3, 2). Set E := Exc(f) and S := f(E), and assume that
273
+
274
+ FANO 4-FOLDS WITH b2 > 12 ARE PRODUCTS
275
+ 7
276
+ Figure 3.1. The varieties in Lemma 3.2.
277
+ g
278
+ E
279
+ X
280
+ ER1
281
+ T1
282
+ Y
283
+ S
284
+ C1
285
+ D = f(ER1)
286
+ f
287
+ Z
288
+ h(E)
289
+ h(ER1)
290
+ h
291
+ dim N1(E, X) ≥ 4. Let µ: S′ → S be the minimal resolution of singularities, and
292
+ set L := µ∗((−KY )|S).
293
+ Suppose that X has an extremal ray R1 of type (3, 2) such that:
294
+ E · R1 = 0
295
+ and
296
+ E ∩ ER1 ̸= ∅.
297
+ Set D := f(ER1) ⊂ Y .
298
+ Then D|S = C1 + · · · + Cr where Ci are pairwise disjoint (−1)-curves contained
299
+ in Sreg, ER1 = f ∗(D), and f∗(CR1) ≡Y Ci. Moreover if C′
300
+ i ⊂ S′ is the transform
301
+ of Ci, we have (KS′ + L) · C′
302
+ i = 0 for every i = 1, . . . , r.
303
+ Proof. By Lemma 2.3 we have ER1·NE(f) = 0 and NE(f)+R1 is a face of NE(X),
304
+ whose associated contraction h: X → Z is birational with Exc(h) = E ∪ER1. We
305
+ have a diagram (see Fig. 3.1):
306
+ (3.4)
307
+ X
308
+ f
309
+
310
+ h
311
+
312
+ Y
313
+ g
314
+ � Z
315
+ where g is an elementary, K-negative, divisorial contraction, with Exc(g) = D
316
+ (recall that Y is is locally factorial by Th. 2.1, and Fano by Lemma 2.2).
317
+ Since ER1·NE(f) = E·R1 = 0, both h(E) and h(ER1) are surfaces in Z, and the
318
+ general fiber of h over these surfaces is one-dimensional. Moreover h(E) ∩ h(ER1)
319
+ is finite, and the connected components of E ∩ ER1 are 2-dimensional fibers of h
320
+ over these points.
321
+ Using the classification of the possible 2-dimensional fibers of h in [AW98], as
322
+ in [Cas22a, Lemma 4.15] we see that every connected component Ti of E ∩ ER1
323
+
324
+ 8
325
+ C. CASAGRANDE
326
+ (which is non-empty by assumption) is isomorphic to P1 ×P1 with normal bundle
327
+ O(−1, 0) ⊕ O(0, −1), for i = 1, . . . , r. Set Ci := f(Ti), so that D ∩ S = f(E ∩
328
+ ER1) = f(∪iTi) = ∪iCi. Then Ci ∼= P1, Ci ∩ Cj = ∅ if i ̸= j, and f has fibers of
329
+ dimension one over Ci, therefore Ci ⊂ Sreg and Ci ⊂ Yreg by Th. 2.1.
330
+ Moreover g(D) = h(ER1) is a surface, namely g is of type (3, 2), and Ci is a
331
+ one-dimensional fiber of g contained in Yreg, hence KY · Ci = D · Ci = −1. We
332
+ also have ER1 = f ∗(D) and f∗(CR1) ≡Y Ci.
333
+ Since Ci ⊂ Sreg, it is a Cartier divisor in S, and we can write D|S = m1C1 +
334
+ · · · + mrCr with mi ∈ Z>0 for every i = 1, . . . , r. In S we have Ci · Cj = 0 for
335
+ i ̸= j, hence for i ∈ {1, . . . , r} we get
336
+ −1 = D · Ci = (m1C1 + · · · + mrCr) · Ci = miC2
337
+ i
338
+ and we conclude that mi = 1 and C2
339
+ i = −1, so that Ci is a (−1)-curve in S.
340
+ Finally −KS · Ci = −KY · Ci = 1, hence if C′
341
+ i ⊂ S′ is the transform of Ci, we
342
+ have (KS′ + L) · C′
343
+ i = 0.
344
+
345
+ Corollary 3.5. Let X be a smooth Fano 4-fold and f : X → Y an elementary
346
+ contraction of type (3, 2). Set E := Exc(f), and assume that dim N1(E, X) ≥ 4.
347
+ Suppose that X has an extremal ray R1 of type (3, 2) such that E · R1 = 0.
348
+ Then R′
349
+ 1 := f∗(R1) is an extremal ray of Y of type (3, 2), and ER1 = f ∗(ER′
350
+ 1).
351
+ Proof. If E∩ER1 ̸= ∅, we are in the setting of Lemma 3.2; consider the elementary
352
+ contraction g: Y → Z as in (3.4). Then NE(g) = f∗(R1) = R′
353
+ 1 is an extremal ray
354
+ of Y of type (3, 2), and f ∗(ER′
355
+ 1) = ER1.
356
+ If E ∩ ER1 = ∅, then we still have a diagram as (3.4), where g is locally
357
+ isomorphic to the contraction of R1 in X, and the statement is clear.
358
+
359
+ Theorem 3.6. Let X be a smooth Fano 4-fold and f : X → Y an elementary
360
+ contraction of type (3, 2). Set E := Exc(f) and S := f(E), and assume that
361
+ dim N1(E, X) ≥ 4.
362
+ Suppose that X has two extremal rays R1, R2 of type (3, 2) such that:
363
+ ER1 · R2 > 0 and E · Ri = 0, E ∩ ERi ̸= ∅ for i = 1, 2.
364
+ Then one of the following holds:
365
+ (i) S is a smooth del Pezzo surface and −KS = (−KY )|S;
366
+ (ii) ER1 · CR2 = ER2 · CR1 = 1.
367
+ Proof. We apply Lemma 3.2 to f, R1 and to f, R2. Write f(ER1)|S = C1+· · ·+Cr,
368
+ and let Γ2 be an irreducible component of f(ER2)|S, so that C1, . . . , Cr, Γ2 are
369
+ (−1)-curves contained in Sreg, and Γ2 ≡ f∗(CR2). Then
370
+ (3.7)
371
+ 0 < ER1 · CR2 = f ∗(f(ER1)) · CR2 = f(ER1) · Γ2 = (C1 + · · · + Cr) · Γ2,
372
+ hence Ci · Γ2 > 0 for some i, say i = 1.
373
+ Let µ: S′ → S be the minimal resolution of singularities, and set L := µ∗((−KY )|S).
374
+ Moreover let Γ′
375
+ 2 and C′
376
+ 1 in S′ be the transforms of Γ2 and C1 respectively;
377
+
378
+ FANO 4-FOLDS WITH b2 > 12 ARE PRODUCTS
379
+ 9
380
+ then Γ′
381
+ 2 and C′
382
+ 1 are disjoint from the µ-exceptional locus, are (−1)-curves in
383
+ S′, (KS′ + L) · C′
384
+ 1 = (KS′ + L) · Γ′
385
+ 2 = 0, and C′
386
+ 1 · Γ′
387
+ 2 > 0.
388
+ Recall that KS′ + L is semiample by Lemma 3.1. In particular, the face (KS′ +
389
+ L)⊥ ∩ NE(S′) contains the classes of two distinct (−1)-curves which meet. This
390
+ means that the associated contraction cannot be birational, and we have two
391
+ possibilities: either KS′ + L ≡ 0, or the contraction associated to KS′ + L is onto
392
+ a curve. We show that these two cases yield respectively (i) and (ii).
393
+ Suppose first that KS′ + L ≡ 0; in particular −KS′ is nef and big, namely S′ is
394
+ a weak del Pezzo surface.
395
+ Set for simplicity F := OY (KY )|S, invertible sheaf on S, and let ωS be the
396
+ dualizing sheaf of S. We have KS′ ≡ µ∗(F), and since S′ is rational, we also have
397
+ OS′(KS′) ∼= µ∗(F). By restricting to the open subset µ−1(Sreg), we conclude that
398
+ (ωS)|Sreg ∼= F|Sreg. Now we use the following.
399
+ Lemma 3.8. Let S be a reduced and irreducible projective surface with isolated
400
+ singularities, and ωS its dualizing sheaf. If there exists an invertible sheaf F on
401
+ S such that (ωS)|Sreg ∼= F|Sreg, then S is normal and ωS ∼= F.
402
+ This should be well-known to experts, we include a proof for lack of references.
403
+ We postpone the proof of Lemma 3.8 and carry on with the proof of Th. 3.6.
404
+ By Lemma 3.8 we have that S is normal and ωS ∼= F, in particular ωS is
405
+ locally free. If y0 is a singular point of S, then by Th. 2.1 y0 is a singularity of
406
+ type 1
407
+ 3(1, 1), but this contradicts the fact that ωS is locally free. We conclude
408
+ that S is smooth, and finally that −KS = (−KY )|S is ample, so that S is a del
409
+ Pezzo surface, and we have (i).
410
+ Assume now that KS′ +L yields a contraction g: S′ → B onto a smooth curve.
411
+ Let F ⊂ S′ be a general fiber F of g, so that −KS′ · F = L · F. Since F is
412
+ not µ-exceptional, we have L · F > 0 and hence −KS′ · F > 0. Thus there is
413
+ a non-empty open subset B0 ⊆ B such that (−KS′)|g−1(B0) is g-ample, therefore
414
+ g|g−1(B0) : g−1(B0) → B0 is a conic bundle, F ∼= P1, and −KS′ · F = 2.
415
+ The curves C′
416
+ 1 and Γ′
417
+ 2 are components of the same fiber F0 of g, and −KS′ ·F0 =
418
+ 2 = −KS′ · (C′
419
+ 1 + Γ′
420
+ 2). For any irreducible curve C0 contained in F0 we have
421
+ −KS′ · C0 = L · C0 ≥ 0, so that if C0 is different from C′
422
+ 1 and Γ′
423
+ 2, we must have
424
+ −KS′ · C0 = L · C0 = 0 and C0 is µ-exceptional. Thus C0 ∩ (C′
425
+ 1 ∪ Γ′
426
+ 2) = ∅, and
427
+ since F0 is connected, we conclude that F0 = C′
428
+ 1 + Γ′
429
+ 2 and F0 ⊂ g−1(B0), hence
430
+ F0 is isomorphic to a reducible conic.
431
+ This also shows that C′
432
+ i for i > 1 are contained in different fibers of g, so that
433
+ C1 · Γ2 = Γ2 · C1 = 1
434
+ and
435
+ Ci · Γ2 = 0
436
+ for every i = 2, . . . , r,
437
+ and finally using (3.7)
438
+ ER1 · CR2 = (C1 + · · · + Cr) · Γ2 = 1.
439
+ Similarly we conclude that ER2 · CR1 = 1.
440
+
441
+
442
+ 10
443
+ C. CASAGRANDE
444
+ Remark 3.9. In the setting of Th. 3.6(i), we cannot conclude that Y is smooth.
445
+ A priori Y could have isolated singularities at some y0 ∈ S; by [AW98] in this
446
+ case f −1(y0) ∼= P2.
447
+ Proof of Lemma 3.8. Recall that S has isolated singularities. The surface S is
448
+ reduced, thus it satisfies condition (S1), namely
449
+ depth OS,y ≥ 1
450
+ for every y ∈ S.
451
+ Then by [Har07, Lemma 1.3] the dualizing sheaf ωS satisfies condition (S2):
452
+ depth ωS,y ≥ 2
453
+ for every y ∈ S,
454
+ where depth ωS,y is the depth of the stalk ωS,y as an OS,y-module.
455
+ Then, for every open subset U ⊂ S such that S ∖ U is finite, we have ωS =
456
+ j∗((ωS)|U), where j : U �→ S is the inclusion, see [Har07, Rem. 1.8].
457
+ This is
458
+ analogous to the properties of reflexive sheaves on normal varieties, see [Har80,
459
+ Propositions 1.3 and 1.6], and can be proved using local cohomology [Gro67].
460
+ Hence we have ωS = j∗((ωS)|Sreg), where j : Sreg �→ S is the inclusion. Since F
461
+ is locally free, we get
462
+ ωS = j∗((ωS)|Sreg) ∼= j∗(F|Sreg) = F,
463
+ in particular ωS is an invertible sheaf and for every y ∈ Y we have ωS,y ∼= OS,y
464
+ as an OS,y-module, thus depth OS,y = 2. Therefore S has property (S2), and it is
465
+ normal by Serre’s criterion.
466
+
467
+ Proposition 3.10. Let X be a smooth Fano 4-fold and f : X → Y an elementary
468
+ contraction of type (3, 2). Set E := Exc(f) and S := f(E), and assume that
469
+ dim N1(E, X) �� 4.
470
+ Suppose that X has three distinct extremal rays R1, R2, R3 of type (3, 2) such
471
+ that:
472
+ E · Ri = 0, E ∩ ERi ̸= ∅ for i = 1, 2, 3, and ER1 · Rj > 0 for j = 2, 3.
473
+ Then S is a smooth del Pezzo surface and −KS = (−KY )|S.
474
+ Proof. We apply Th. 3.6 to f, R1, R2 and to f, R1, R3.
475
+ Let us keep the same
476
+ notation as in the proof of Th. 3.6; moreover we denote by Γ3 an irreducible
477
+ component of f(ER3)|S and Γ′
478
+ 3 ⊂ S′ its transform. We show that KS′ + L ≡ 0,
479
+ which yields the statement by the proof of Th. 3.6.
480
+ Otherwise, KS′ + L yields a contraction g: S′ → B onto a curve, and F0 =
481
+ C′
482
+ 1 + Γ′
483
+ 2 is a fiber of g. On the other hand also Γ′
484
+ 3 is contained in a fiber of g, it
485
+ is different from C′
486
+ 1 and Γ′
487
+ 2, and C′
488
+ 1 · Γ′
489
+ 3 > 0, which is impossible.
490
+
491
+ Corollary 3.11. Let X be a smooth Fano 4-fold with δX ≤ 2. Suppose that X
492
+ has four distinct extremal rays R0, R1, R2, R3 of type (3, 2) such that:
493
+ ER0 · Ri = 0 for i = 1, 2, 3, and ER1 · Rj > 0 for j = 2, 3.
494
+ Then one of the following holds:
495
+ (i) dim N1(ERi, X) ≤ 3 for some i ∈ {0, 1, 2, 3}, in particular ρX ≤ 5;
496
+
497
+ FANO 4-FOLDS WITH b2 > 12 ARE PRODUCTS
498
+ 11
499
+ (ii) dim N1(ER0, X) ≤ 10, in particular ρX ≤ 12.
500
+ Moreover if f : X → Y is the contraction of R0 and S := f(ER0), then S is
501
+ a smooth del Pezzo surface and −KS = (−KY )|S.
502
+ Proof. We assume that dim N1(ERi, X) ≥ 4 for every i = 0, 1, 2, 3, and prove (ii).
503
+ We show that ER0 ∩ ERi ̸= ∅ for every i = 1, 2, 3.
504
+ If ER0 ∩ ERi = ∅ for
505
+ some i ∈ {1, 2, 3}, then for every curve C ⊂ ER0 we have ERi · C = 0, so that
506
+ [C] ∈ (ERi)⊥, and N1(ER0, X) ⊂ (ERi)⊥.
507
+ Since the classes [ER1], [ER2], [ER3] ∈ N 1(X) generate distinct one dimensional
508
+ faces of Eff(X) (see [Cas13a, Rem. 2.19]), they are linearly independent, hence in
509
+ N1(X) we have
510
+ codim
511
+
512
+ (ER1)⊥ ∩ (ER2)⊥ ∩ (ER3)⊥�
513
+ = 3.
514
+ On the other hand codim N1(ER0, X) ≤ δX ≤ 2, thus N1(ER0, X) cannot be
515
+ contained in the above intersection. Then N1(ER0, X) ̸⊂ (ERh)⊥ for some h ∈
516
+ {1, 2, 3}, hence ER0 ∩ ERh ̸= ∅. In particular, since ER0 · Rh = 0, there exists an
517
+ irreducible curve C ⊂ ER0 with [C] ∈ Rh.
518
+ For j = 2, 3 we have ER1 · Rj > 0, and by Lemma 2.3 also ERj · R1 > 0. This
519
+ implies that ER0 ∩ ERi ̸= ∅ for every i = 1, 2, 3. For instance say h = 3: then
520
+ ER1 · R3 > 0 yields ER1 ∩ C ̸= ∅, hence ER0 ∩ ER1 ̸= ∅. Then there exists an
521
+ irreducible curve C′ ⊂ ER0 with [C′] ∈ R1, and ER2 ·R1 > 0 yields ER0 ∩ER2 ̸= ∅.
522
+ Finally we apply Prop. 3.10 to get that S is a smooth del Pezzo surface and
523
+ −KS = (−KY )|S.
524
+ Therefore dim N1(S, Y ) ≤ ρS ≤ 9 and dim N1(ER0, X) =
525
+ dim N1(S, X) + 1 ≤ 10, so we get (ii).
526
+
527
+ 4. Proof of Th. 1.1
528
+ In this section we show how to apply the results of §3 to bound ρX; the following
529
+ is our main result.
530
+ Theorem 4.1. Let X be a smooth Fano 4-fold with δX ≤ 2 and ρX ≥ 8, and with
531
+ no small elementary contraction.
532
+ Then ρX ≤ δX + 10 ≤ 12. Moreover every elementary contraction f : X → Y
533
+ is of type (3, 2), and S := f(Exc(f)) ⊂ Y is a smooth del Pezzo surface with
534
+ −KS = (−KY )|S.
535
+ In the proof we will use the following terminology: if R1, R2 are distinct one-
536
+ dimensional faces of a convex polyhedral cone C, we say that R1 and R2 are
537
+ adjacent if R1 + R2 is a face of C. A facet of C is a face of codimension one, and
538
+ RC is the linear span of C. We will also need the following elementary fact.
539
+ Lemma 4.2 ([Ewa96], Lemma II.2.6). Let C be a convex polyhedral cone not
540
+ containing non-zero linear subspaces, and R0 a one-dimensional face of C. Let
541
+ R1, . . . , Rm be the one-dimensional faces of C that are adjacent to R0. Then the
542
+ linear span of R0, R1, . . . , Rm is RC.
543
+
544
+ 12
545
+ C. CASAGRANDE
546
+ Proof of Th. 4.1. Let f : X → Y be an elementary contraction; note that ρY =
547
+ ρX − 1 ≥ 7.
548
+ Then f is not of fiber type by Lemma 2.5, and not small by
549
+ assumption, so that f is divisorial. Moreover f is of type (3, 2) by Lemma 2.6.
550
+ Set E := Exc(f) and S := f(E) ⊂ Y ; we have dim N1(E, X) ≥ ρX − δX ≥ 6,
551
+ and if R′ ̸= NE(f) is another extremal ray of X, we have E · R′ ≥ 0 by Lemma
552
+ 2.2. Moreover, if R′ is adjacent to NE(f), then E ·R′ = 0. Indeed the contraction
553
+ g: X → Z of the face R′ + NE(f) cannot be of fiber type by Lemma 2.5, thus it
554
+ is birational and we apply Lemma 2.4.
555
+ We are going to show that there exists three extremal rays R′
556
+ 1, R′
557
+ 2, R′
558
+ 3 adjacent
559
+ to NE(f) such that ER′
560
+ 1 · R′
561
+ j > 0 for j = 2, 3, and then apply Cor. 3.11.
562
+ Let us consider the cone NE(Y ). It is a convex polyhedral cone whose extremal
563
+ rays R are in bijection with the extremal rays R′ of X adjacent to NE(f), via
564
+ R = f∗(R′), see [Cas08, §2.5].
565
+ By Cor. 3.5, R is still of type (3, 2), and f ∗(ER) = ER′. Thus for every pair
566
+ R1, R2 of distinct extremal rays of Y , with Ri = f∗(R′
567
+ i) for i = 1, 2, we have
568
+ ER1 · R2 = ER′
569
+ 1 · R′
570
+ 2 ≥ 0.
571
+ If R1 and R2 are adjacent, we show that ER1·R2 = ER2·R1 = 0. Indeed consider
572
+ the contraction Y → Z of the face R1 + R2 and the composition g: X → Z,
573
+ which contracts R′
574
+ 1 and R′
575
+ 2. Again g cannot be of fiber type by Lemma 2.5, thus
576
+ it is birational and we apply Lemma 2.4 to get ER′
577
+ 1 · R′
578
+ 2 = ER′
579
+ 2 · R′
580
+ 1 = 0, thus
581
+ ER1 · R2 = ER2 · R1 = 0.
582
+ Fix an extremal ray R1 of Y . We show that there exist two distinct extremal
583
+ rays R2, R3 of Y with ER1 · Rj > 0 for j = 2, 3.
584
+ Indeed since ER1 is an effective divisor, there exists some curve C ⊂ Y with
585
+ ER1 · C > 0, hence there exists some extremal ray R2 with ER1 · R2 > 0.
586
+ By contradiction, let us assume that ER1 · R = 0 for every extremal ray R of Y
587
+ different from R1, R2. This means that the cone NE(Y ) has the extremal ray R1
588
+ in the halfspace N1(Y )ER1<0, the extremal ray R2 in the halfspace N1(Y )ER1>0,
589
+ and all other extremal rays in the hyperplane (ER1)⊥.
590
+ Fix R ̸= R1, R2, and let τ be a facet of NE(Y ) containing R and not R1. Note
591
+ that Rτ ̸= (ER1)⊥, as ER1 and −ER1 are not nef. By Lemma 4.2 the rays adjacent
592
+ to R in τ cannot be all contained in (ER1)⊥. We conclude that R2 is adjacent to
593
+ R, therefore ER2 · R = 0, namely R ⊂ (ER2)⊥.
594
+ Summing up, we have shown that every extremal ray R ̸= R1, R2 of Y is
595
+ contained in both (ER1)⊥ and (ER2)⊥. On the other hand these rays include all
596
+ the rays adjacent to R1, so by Lemma 4.2 their linear span must be at least a
597
+ hyperplane. Therefore (ER1)⊥ = (ER2)⊥ and the classes [ER1], [ER2] ∈ N 1(Y ) are
598
+ proportional, which is impossible, because they generate distinct one dimensional
599
+ faces of the cone Eff(Y ) (see [Cas13a, Rem. 2.19]).
600
+ We conclude that there exist two distinct extremal rays R2, R3 of Y with ER1 ·
601
+ Rj > 0 for j = 2, 3.
602
+
603
+ FANO 4-FOLDS WITH b2 > 12 ARE PRODUCTS
604
+ 13
605
+ For i = 1, 2, 3 we have Ri = f∗(R′
606
+ i) where R′
607
+ i is an extremal ray of X adjacent to
608
+ NE(f), so that E · R′
609
+ i = 0. Moreover for j = 2, 3 we have ER′
610
+ 1 · R′
611
+ j = ER1 · Rj > 0.
612
+ We apply Cor. 3.11 to NE(f), R′
613
+ 1, R′
614
+ 2, R′
615
+ 3. We have already excluded (i), and
616
+ (ii) yields the statement.
617
+
618
+ We can finally prove the following more detailed version of Th. 1.1.
619
+ Theorem 4.3. Let X be a smooth Fano 4-fold which is not a product of surfaces.
620
+ Then ρX ≤ 12, and if ρX = 12, then there exist X
621
+ ϕ
622
+ ��� X′
623
+ g→ Z where ϕ is a
624
+ finite sequence of flips, X′ is smooth, g is a contraction, and dim Z = 3.
625
+ Proof. Since X is not a product of surfaces, we have δX ≤ 3 by Th. 1.4. Moreover
626
+ δX = 3 yields ρX ≤ 6 by Th. 1.5, while δX ≤ 2 yields ρX ≤ 12 by Theorems 1.6
627
+ and 4.1.
628
+ If ρX = 12, the statement follows from [Cas22a, Theorems 2.7 and 9.1].
629
+
630
+ References
631
+ [AW98]
632
+ M. Andreatta and J.A. Wi´sniewski, On contractions of smooth varieties, J. Algebraic
633
+ Geom. 7 (1998), 253–312.
634
+ [Cas08]
635
+ C. Casagrande, Quasi-elementary contractions of Fano manifolds, Compos. Math.
636
+ 144 (2008), 1429–1460.
637
+ [Cas12]
638
+ ,
639
+ On
640
+ the
641
+ Picard
642
+ number
643
+ of
644
+ divisors
645
+ in
646
+ Fano
647
+ manifolds,
648
+ Ann. Sci. ´Ec. Norm. Sup´er. 45 (2012), 363–403.
649
+ [Cas13a]
650
+ , On the birational geometry of Fano 4-folds, Math. Ann. 355 (2013), 585–628.
651
+ [Cas13b]
652
+ , Numerical invariants of Fano 4-folds, Math. Nachr. 286 (2013), 1107–1113.
653
+ [Cas17]
654
+ , Fano 4-folds, flips, and blow-ups of points, J. Algebra 483 (2017), 362–414.
655
+ [Cas22a]
656
+ , Fano 4-folds with a small contraction, Adv. Math. 405 (2022), 1–55, paper
657
+ no. 108492.
658
+ [Cas22b]
659
+ , The Lefschetz defect of Fano varieties, Rend. Circ. Mat. Palermo (2), pub-
660
+ lished online 19 December, 2022.
661
+ [CRS22] C. Casagrande, E.A. Romano, and S.A.Secci, Fano manifolds with Lefschetz defect 3,
662
+ J. Math. Pures Appl. 163 (2022), 625–653, Corrigendum: 168 (2022), 108–109.
663
+ [Ewa96] G. Ewald, Combinatorial convexity and algebraic geometry, Graduate Texts in Math-
664
+ ematics, vol. 168, Springer-Verlag, 1996.
665
+ [Gro67]
666
+ A. Grothendieck, Local cohomology, Lecture Notes in Math., vol. 41, Springer-Verlag,
667
+ 1967.
668
+ [Har80]
669
+ R. Hartshorne, Stable reflexive sheaves, Math. Ann. 254 (1980), 121–176.
670
+ [Har07]
671
+ , Generalized divisors and biliaison, Illinois J. Math. 51 (2007), 83–98.
672
+ [Mat02]
673
+ K. Matsuki, Introduction to the Mori program, Universitext, Springer-Verlag, 2002.
674
+ [MM86]
675
+ S. Mori and S. Mukai, Classification of Fano 3-folds with b2 ≥ 2, I, Algebraic and
676
+ Topological Theories – to the memory of Dr. Takehiko Miyata (Kinosaki, 1984), Ki-
677
+ nokuniya, Tokyo, 1986, pp. 496–545.
678
+ Universit`a di Torino, Dipartimento di Matematica, via Carlo Alberto 10, 10123
679
+ Torino - Italy
680
+ Email address: [email protected]
681
+
4dFKT4oBgHgl3EQf9S5d/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
4tAzT4oBgHgl3EQfEPpQ/content/tmp_files/2301.00989v1.pdf.txt ADDED
@@ -0,0 +1,763 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.00989v1 [cs.CV] 3 Jan 2023
2
+ STUDENT, PROF, COLLABORATOR: BMVC AUTHOR GUIDELINES
3
+ 1
4
+ A New Perspective to Boost Vision
5
+ Transformer for Medical Image Classification
6
+ Yuexiang Li
7
8
+ Yawen Huang
9
10
+ Nanjun He
11
12
+ Kai Ma
13
14
+ Yefeng Zheng
15
16
+ Tencent Jarvis Lab
17
+ Shenzhen
18
+ China
19
+ Abstract
20
+ Transformer has achieved impressive successes for various computer vision tasks.
21
+ However, most of existing studies require to pretrain the Transformer backbone on a
22
+ large-scale labeled dataset (e.g., ImageNet) for achieving satisfactory performance, which
23
+ is usually unavailable for medical images. Additionally, due to the gap between medical
24
+ and natural images, the improvement generated by the ImageNet pretrained weights sig-
25
+ nificantly degrades while transferring the weights to medical image processing tasks. In
26
+ this paper, we propose Bootstrap Own Latent of Transformer (BOLT), a self-supervised
27
+ learning approach specifically for medical image classification with the Transformer
28
+ backbone. Our BOLT consists of two networks, namely online and target branches,
29
+ for self-supervised representation learning. Concretely, the online network is trained to
30
+ predict the target network representation of the same patch embedding tokens with a dif-
31
+ ferent perturbation. To maximally excavate the impact of Transformer from limited med-
32
+ ical data, we propose an auxiliary difficulty ranking task. The Transformer is enforced
33
+ to identify which branch (i.e., online/target) is processing the more difficult perturbed
34
+ tokens. Overall, the Transformer endeavours itself to distill the transformation-invariant
35
+ features from the perturbed tokens to simultaneously achieve difficulty measurement and
36
+ maintain the consistency of self-supervised representations. The proposed BOLT is eval-
37
+ uated on three medical image processing tasks, i.e., skin lesion classification, knee fatigue
38
+ fracture grading and diabetic retinopathy grading. The experimental results validate the
39
+ superiority of our BOLT for medical image classification, compared to ImageNet pre-
40
+ trained weights and state-of-the-art self-supervised learning approaches.
41
+ 1
42
+ Introduction
43
+ Recently, vision Transformer (ViT) [10] and its variants [23, 32, 36] has been introduced
44
+ for various computer vision tasks (e.g., image classification [10, 18], object detection [9,
45
+ © 2022. The copyright of this document resides with its authors.
46
+ It may be distributed unchanged freely in print or electronic forms.
47
+
48
+ 2
49
+ STUDENT, PROF, COLLABORATOR: BMVC AUTHOR GUIDELINES
50
+ 41], semantic segmentation [34, 39] and medical image processing [11, 15, 16, 31, 38])
51
+ and gained increasing attentions from the community. The common ViT usually requires
52
+ pretrainig on large-scale natural image datasets, e.g., ImageNet, to achieve the satisfactory
53
+ performance. For natural images, the labels for pretraining dataset can be efficiently obtained
54
+ by crowdsourcing, as even ordinary people possess the ability to effectively identify and label
55
+ objects in natural images. However, the same strategy cannot be adopted for medical images,
56
+ as professional expertise is mandatory for high-quality medical image annotations. Hence,
57
+ the limited amount of annotated medical data is the major obstacle for the improvement of
58
+ diagnosis accuracy even with the powerful vision Transformer.
59
+ Self-supervised learning (SSL) approach is a potential solution to tackle the challenge of
60
+ insufficient annotated data. The typical self-supervised learning formulates a proxy task to
61
+ extract representative features from unlabeled data, which can boost the accuracy of subse-
62
+ quent target task. Existing studies have proposed various proxy tasks, including grayscale
63
+ image colorization [19], patch re-ordering [25], and context restoration [27]. The SSL was
64
+ firstly brought to medical image processing by Zhang et al. [37]. Concretely, the neural
65
+ network was pretrained with a proxy task that sorted the 2D slices from the conventional
66
+ 3D medical volumes for the subsequent fine-grained body part recognition. Zhu et al. [40]
67
+ enforced 3D networks to play a Rubik’s cube game for pretraining, which can be seen as an
68
+ extension of 2D Jigsaw puzzles [24]. Contrastive learning [13] has been recently popular-
69
+ ized for self-supervised representation learning. These approaches enforce neural networks
70
+ to spontaneously exploit useful information from pairs of positive and negative samples,
71
+ instead of permuting the contextual information of images for self-supervised signal for-
72
+ mulation. He et al. [14] firstly introduced the idea of contrastive learning into the area
73
+ of self-supervised learning. They proposed an approach, namely MoCo, which addressed
74
+ the problem of large number of negative samples for contrastive learning by maintaining a
75
+ memory bank of negative samples. Following the direction, various contrastive-learning-
76
+ based self-supervised approaches have been proposed [4, 6, 7, 12, 26, 33, 35]. Inspired by
77
+ the success of self-supervised learning for CNNs, researchers began to make their efforts to
78
+ ViT. Atito et al. [1] directly utilized the existing SSL approaches, including rotation pre-
79
+ diction, contrastive learning and image restoration, to pretrain vision Transformers. Several
80
+ studies [2, 3] have been proposed along this direction. However, taking the architecture dif-
81
+ ference between CNN and ViT into account, i.e., CNN takes the whole image as input, while
82
+ the input of ViT is the embedding tokens of image tiles, the self-supervised learning approach
83
+ specifically for ViT is worthwhile to develop.
84
+ In the recent study, Chen et al. [7] proposed MoCo V3 as a token-based constrastive
85
+ learning approach, specifically for ViT to extract self-supervised features from raw data.
86
+ The network pretrained with MoCo V3 outperformed the ImageNet-pretrained one, which
87
+ demonstrated the effectiveness of token-based self-supervised learning. In this paper, we
88
+ follow the direction and propose a token-wise perturbation based self-supervised learning
89
+ framework specifically for medical image classification with vision Transformer, namely
90
+ Bootstrap Own Latent of Transformer (BOLT). Similar to the existing Bootstrap Your Own
91
+ Latent (BYOL) [12], our BOLT consists of two networks, namely online and target branches,
92
+ for self-supervised representation learning. Instead of image-wise transformation adopted by
93
+ BYOL, the online network of our BOLT is trained to predict the target network representa-
94
+ tion of the same patch embedding tokens with a different perturbation. Moreover, to encour-
95
+ age the vision Transformer to deeply exploit useful information from limited medical data,
96
+ we propose an auxiliary difficulty ranking task. The difference between the original patch
97
+ embedding tokens and the perturbed ones is measured as the difficulty (i.e., the larger dif-
98
+
99
+ STUDENT, PROF, COLLABORATOR: BMVC AUTHOR GUIDELINES
100
+ 3
101
+ Linear Projection of Flattened Patches
102
+ Permutation
103
+ Linear Projection
104
+ Split
105
+ Sliding Window
106
+ Token Permutation Module
107
+ x
108
+ Vision Transformer ��
109
+ Vision Transformer ��
110
+ Patch
111
+ Embedding
112
+ Token
113
+ Perturbation
114
+ Module
115
+ Token
116
+ Perturbation
117
+ Module
118
+ ��
119
+ ��
120
+ ��
121
+ ��
122
+ ��(��)
123
+ �� (��)
124
+ Difficulty-awareness Loss
125
+ Exponential Moving Average
126
+ ��
127
+ ��
128
+ Similarity Loss
129
+ Online
130
+ Target
131
+ Embedded Token ��
132
+ Content perturbed Token ��
133
+ Long Token ��
134
+ Figure 1: The architecture of our BOLT framework. Compared to the original BYOL, our
135
+ BOLT consists of two main revisions: 1) The proposed BOLT generates two views of em-
136
+ bedding tokens for self-supervised learning; 2) A novel difficulty-awareness loss is proposed
137
+ to encourage the ViT to deeply exploit useful information from raw data. sg(.) means stop-
138
+ gradient.
139
+ ference means more difficult for the vision Transformer to process), which is then adopted
140
+ as the supervision signal. In other words, the vision Transformer is required to identify
141
+ which branch (online/target) is processing the more difficult perturbed tokens. Under the
142
+ co-supervision of the two tasks, the vision Transformer is encouraged to endeavour itself to
143
+ distill the transformation-invariantfeatures from the perturbed tokens, which should be capa-
144
+ ble for simultaneous difficulty measurement and maintain the consistency of self-supervised
145
+ representations. In summary, the main contributions of our work can be concluded into
146
+ four-fold:
147
+ • A token perturbation based self-supervised learning approach, namely BOLT, specif-
148
+ ically designed for vision Transformer is proposed. A token perturbation module is
149
+ integrated to the existing BYOL framework for the more effective ViT pretraining.
150
+ • An auxiliary self-supervised task, i.e., difficulty ranking, is proposed to encourage
151
+ ViTs to deeply exploit useful information from limited medical data. The self-supervised
152
+ signal of this auxiliary task also derives from the perturbed tokens generated by our
153
+ perturbation module. To our best knowledge, this is the first SSL framework based on
154
+ the difficulty-awareness paradigm.
155
+ • The proposed BOLT is evaluated on three medical image processing tasks, i.e., skin
156
+ lesion classification, knee fatigue fracture grading and diabetic retinopathy grading.
157
+ The experimental results demonstrate the superiority of our BOLT, compared to the
158
+ widely-used ImageNet pretrained weights.
159
+ • Last but not least, we pretrain the ViT using different self-supervised learning ap-
160
+ proaches on a large-scale private fundus image dataset captured from a collaborating
161
+ hospital for diabetic retinopathy grading task. The dataset consists of 350,000 fundus
162
+ images of normal cohort and patients with various diseases, which may be the largest
163
+ fundus image dataset in the worldwide. The pretraining on our private large-scale
164
+ dataset is verified to benefit the related downstream target task. To advance the de-
165
+ velopment of automated fundus image processing, we will release the ViT pretrained
166
+ models to the community.
167
+
168
+ 4
169
+ STUDENT, PROF, COLLABORATOR: BMVC AUTHOR GUIDELINES
170
+ Linear Projection of Flattened Patches
171
+ 1
172
+ 2
173
+ 3
174
+ 4
175
+ 5
176
+ 6
177
+ 7
178
+ 8
179
+ 9
180
+ 6
181
+ 1
182
+ 5
183
+ 7
184
+ 3
185
+ 9
186
+ 8
187
+ 2
188
+ 4
189
+ Permutation
190
+ 6 1 5
191
+ 7 3 9
192
+ 8 2 4
193
+ Linear Projection
194
+ 1
195
+ 2
196
+ 3
197
+ Split
198
+ 6
199
+ 1
200
+ 5
201
+ 7
202
+ 3
203
+ 9
204
+ 8
205
+ 2
206
+ 4
207
+ Sliding Window
208
+ Token Permutation Module
209
+ Vision Transformer ��
210
+ Vision Transformer ��
211
+ Patch
212
+ Embedding
213
+ Token
214
+ Permutation
215
+ Module
216
+ Token
217
+ Permutation
218
+ Module
219
+ ��
220
+ ��
221
+ ��
222
+ ��
223
+ �� ��
224
+ �� ��
225
+ Difficulty awareness Loss
226
+ Exponential Moving Average
227
+ ��
228
+ ��
229
+ Similarity Loss
230
+ Online
231
+ Target
232
+ Embedded Token ��
233
+ Content-perturbed Token ��
234
+ Long Token ��
235
+ Figure 2: The architecture of the proposed token perturbation module. The module consists
236
+ of three operations (i.e., permutation, linear projection and split) to perturb the order and
237
+ content of embedded tokens. Note that nine embedding tokens in this figure are taken as an
238
+ example. The exact number (N) of embedding tokens is decided by HW
239
+ P2 , where H and W are
240
+ the height and width of the original image, respectively, and (P, P) is the size of each image
241
+ patch.
242
+ 2
243
+ Method
244
+ In this section, we introduce the proposed BOLT framework in details. The pipeline of our
245
+ Bootstrap Own Latent of Transformer (BOLT) is illustrated in Fig. 1. Similar to BYOL, the
246
+ proposed BOLT adopts two branches to extract useful information from raw data, i.e., the
247
+ online and target branches. The online branch consists of a set of weights θ, including a
248
+ vision Transformer fθ, a projector gθ and a predictor qθ. The target branch is of the same
249
+ architecture with a different set of weights ξ. The target branch generates the regression
250
+ targets for the online branch to learn, and its parameters ξ are an exponential moving average
251
+ of the online branch parameters θ, which can be defined as:
252
+ ξ ← τξ + (1 − τ)θ
253
+ (1)
254
+ where τ ∈ [0,1] is the decay rate.
255
+ Compared to the existing BYOL [12], the proposed BOLT has two differences: First,
256
+ instead of image-based perturbation, we implement a token-based perturbation module for
257
+ the constrastive learning. The underlying reason for the token-based perturbation is that the
258
+ vision Transformer is insensitive to the order of input embedded tokens due to the mechanism
259
+ of self-attention, which neutralizes the effectiveness of typical image-based transformation
260
+ (e.g., Jigsaw puzzle permutation [24]) made to the self-supervised learning of ViT. Inspired
261
+ by recent studies [8, 36], our token perturbation module involves permutation, fusion and
262
+ split operations to simultaneously disarrange the order and content of tokens. Second, since
263
+ the recent study [29] demonstrated the difficulty-awareness can boost the performance of
264
+ CNNs, a difficulty-awareness auxiliary task, i.e., requiring the ViT to identify which branch
265
+ (online/target) is processing the more difficult perturbed tokens, is integrated to the existing
266
+ BYOL framework.
267
+
268
+ STUDENT, PROF, COLLABORATOR: BMVC AUTHOR GUIDELINES
269
+ 5
270
+ 2.1
271
+ Token Perturbation Module
272
+ Instead of permuting the image content, we propose a token perturbation module to per-
273
+ turb the order and content of embedded tokens for the self-supervised learning of a vision
274
+ Transformer. The architecture of our token perturbation module is presented in Fig. 2, which
275
+ involves three operations, i.e., permutation, linear projection and split.
276
+ Permutation. Similar to the typical vision Transformer, the input image x ∈ RH×W×C is
277
+ cropped into a sequence of flattened 2D patches xp ∈ RN×(P2C), where H and W are the
278
+ height and width of the original image, respectively, C is the number of channels, (P, P) is
279
+ the size of each image patch, and N = HW
280
+ P2 is the resulting number of patches. Therefore, the
281
+ embedded tokens zo can be written as:
282
+ zo = [x1
283
+ pE;x2
284
+ pE;··· ;xN
285
+ p E],
286
+ (2)
287
+ where E ∈ R(P2C)×D is a trainable linear projection (D is the latent vector size of the vision
288
+ Transformer). Then, the permuted tokens zp are obtained using a permutation operation
289
+ (Perm(·)), which randomly disarranges the order of zo: zp = Perm(zo). Fig. 2 shows an
290
+ example, the order of zo is disarranged to [z6
291
+ o;z1
292
+ o;z5
293
+ o;z7
294
+ o;z3
295
+ o;z9
296
+ o;z8
297
+ o;z2
298
+ o;z4
299
+ o].
300
+ Linear Projection. After the permutation, we concatenate M adjacent tokens using a sliding
301
+ window with a stride S = W
302
+ P , which results in K = N
303
+ S long tokens (z′
304
+ p) with the length of
305
+ M × D. The obtained tokens are then fed to a linear projection layer (Efuse ∈ RMD×SD) for
306
+ information fusion, which yields K content-perturbed long tokens (zl):
307
+ zl = z′
308
+ pEfuse.
309
+ (3)
310
+ Split. As previously mentioned, the typical vision Transformer uses the constant latent vec-
311
+ tor size D through all of its layers; hence, the fused tokens with the length of S×D need to be
312
+ reshaped back to the length of D to fulfill the input requirement of ViT. To achieve that, the
313
+ proposed token perturbation module adopts a split operation to separate each long token into
314
+ S D-length tokens. The splitted tokens (zs) is then fed to ViT for self-supervised learning.
315
+ 2.2
316
+ Loss Function
317
+ As shown in Fig. 1, our BOLT is jointly supervised by two loss functions, i.e., similarity
318
+ loss and difficulty-awareness loss. The similarity loss is consistent to the existing BYOL
319
+ framework. Concretely, for a set of embedded tokens zo, our BOLT produces two augmented
320
+ perturbed tokens zt and z′
321
+ t for online and target branches, respectively. The perturbed tokens
322
+ zt are then fed to a ViT fθ, which yields a representation yθ = fθ(zt) and a projection zθ =
323
+ gθ(yθ). For the perturbed tokens for the target branch, a representation yξ = fξ(z′
324
+ t) and a
325
+ projection zξ = gξ(yξ) are accordingly generated. Consistent to BYOL, a prediction network
326
+ qθ(.) is adopted to yield the prediction of zξ and l2-norm is calculated for network training:
327
+ Lθ =
328
+ ��qθ(zθ)− zξ
329
+ ��2
330
+ 2
331
+ (4)
332
+ where θ denotes the network weights of the online branch including fθ, gθ and qθ. The loss
333
+ LBOLT
334
+ θ
335
+ = Lθ + ˜Lθ only optimizes the weights of online branch θ, where ˜Lθ is the symmetric
336
+ loss of Lθ by feeding z′
337
+ t and zt to online and target branches, respectively.
338
+
339
+ 6
340
+ STUDENT, PROF, COLLABORATOR: BMVC AUTHOR GUIDELINES
341
+ Difficulty-awareness Loss. Apart from the similarity loss, inspired by the curriculum learn-
342
+ ing [17], we propose an auxiliary task—identifying which branch is processing the tokens
343
+ with a larger level of perturbation. Such an auxiliary task can drive ViTs to self-adaptively
344
+ pay more attention on the hard case and accordingly better exploit the semantic information
345
+ from the embedded tokens, since they are required to understand the content of tokens for
346
+ the accurate difficulty ranking.
347
+ To formulate the auxiliary task, the self-supervised signal needs to be first generated. As-
348
+ suming the perturbed tokens feeding to online and target branches as zt and z′
349
+ t, respectively,
350
+ the self-supervised signal ysel f can be defined as:
351
+ ysel f =
352
+
353
+ 0,
354
+ MSE(Perm−1
355
+ zt (zt)−zo) < MSE(Perm−1
356
+ z′t (z′
357
+ t)−zo)
358
+ 1,
359
+ MSE(Perm−1
360
+ zt (zt)−zo) ⩾ MSE(Perm−1
361
+ z′t (z′
362
+ t)−zo)
363
+ (5)
364
+ where MSE(.) is the mean squared error function; Perm−1(.) is the inverse permutation
365
+ operation rearranging the perturbed tokens back to the original order.
366
+ After the self-supervision is obtained, the features extracted by the online and target
367
+ ViTs (i.e., yθ and yξ) are concatenated (Cat(.)) and sent to a fully-connected layer (FC(.))
368
+ for difficulty classification. Specifically, the process can be written as:
369
+ LDif f
370
+
371
+ = −ysel f ∗ log(p)− (1 − ysel f)∗ log(1 − p))
372
+ (6)
373
+ where p = FC(Cat(yθ,yξ))) is the probability of ysel f = 1. Similar to LBOLT
374
+ θ,ξ
375
+ , the difficulty-
376
+ awareness loss only optimizes the online branch (fθ).
377
+ We notice that the recent study [29] has already proposed a difficulty-awareness loss for
378
+ scleral spur localization. Hence, it is worthwhile to emphasize the difference between it and
379
+ our loss function. Concretely, Tao et al. [29] explicitly enforced networks to predict the Dice
380
+ score of input images using segmentation ground truth to achieve difficulty-awareness. Due
381
+ to the lack of manual annotations, few study introduces the idea of difficulty-awareness for
382
+ self-supervised learning (SSL). In this study, we obtain the difficulty-related information in
383
+ a self-supervised manner using the token perturbation module, and implicitly formulate the
384
+ difficulty-ranking proxy task. To our best knowledge, this is the first SSL framework based
385
+ on the difficulty-awareness paradigm.
386
+ Overall Objective. Combining the aforementioned loss functions (LBOLT and LDif f ), the
387
+ full objective L for the optimization of the online branch can be written as:
388
+ L = LBOLT
389
+ θ
390
+ + αLDif f
391
+
392
+ (7)
393
+ where α = 0.1 is the loss weight of LDif f
394
+
395
+ . According to Eq. (1), the weights of target branch
396
+ ξ are updated via exponential moving average.
397
+ 3
398
+ Experiments
399
+ We evaluate the proposed BOLT on three target tasks, including skin lesion classification,
400
+ knee fatigue grading and diabetic retinopathy grading, using publicly available and private
401
+ datasets. Conventional self-supervised learning approaches often pretrain the models on
402
+ a large-scale unlabeled dataset (i.e., proxy set), and then finetune them on the relatively
403
+ smaller target set. In this paper, three different medical image processing tasks are involved
404
+
405
+ STUDENT, PROF, COLLABORATOR: BMVC AUTHOR GUIDELINES
406
+ 7
407
+ for performance evaluation and the corresponding proxy and target datasets (example images
408
+ are shown in Supplementary Material) for each task are introduced in the followings:
409
+ Skin Lesion Classification. The publicly available ISIC 2019 dataset1 is used to validate
410
+ the effectiveness of the proposed BOLT. Specifically, the dataset [30] is provided by the
411
+ ISIC 2019 challenge, which encourages researchers to develop the automated systems pre-
412
+ dicting eight skin disease categories with dermoscopic images, i.e., squamous cell carci-
413
+ noma, melanocytic nevus, benign keratosis, actinic keratosis, dermatofibroma, basal cell
414
+ carcinoma, vascular lesion, and melanoma. The whole ISIC 2019 dataset, consisting of over
415
+ 20,000 dermoscopic images, is adopted as the proxy set. Due to the class imbalance prob-
416
+ lem of original ISIC dataset, consistent to [21], 628 images are randomly sampled from each
417
+ class to establish a balanced target set. It is worthwhile to mention that the images from the
418
+ two classes consisting of fewer than 628 images are all taken into the target set. After that,
419
+ the balanced target set with 4,260 images is randomly separated into training, validation and
420
+ test sets based on the ratio of 70:10:20. Note that the ViT is first pretrained on the proxy set
421
+ and finetuned on the training and validation sets, and then evaluated on the test set.
422
+ Knee Fatigue Grading. The publicly available MURA dataset2 (musculoskeletal radio-
423
+ graphs) [28], which is a large dataset of bone X-rays (over 40,000 images), is adopted as the
424
+ proxy set to pretrain ViTs for the subsequent target task (i.e., knee fatigue grading). For the
425
+ knee fatigue grading, 2,725 X-ray images are collected from a collaborating hospital as the
426
+ target set [20]. The positions of fatigue fracture are different, i.e., navicular bone, tibia and
427
+ fibula. Each X-ray image is labeled by three physicians, and the final grade is decided via
428
+ majority-voting. In particular, the target set has 1,785 normal, 190 grade-1, 452 grade-2, 196
429
+ grade-3 and 102 grade-4 cases, respectively. For the evaluation on our private knee fatigue
430
+ grading dataset, the target set is divided to training, validation and test sets according to the
431
+ ratio of 70:10:20. Similar to [20], due to the imbalance problem (normal vs. fatigue fracture
432
+ and grade-2 vs. other fracture grades), an equal number (20) of test images from each cate-
433
+ gory are randomly sampled to form an uniform-distribution set for performance evaluation,
434
+ instead of using the whole test set.
435
+ Diabetic Retinopathy Grading. For the diabetic retinopathy grading task, we pretrain the
436
+ ViT on a large-scale private dataset captured from a collaborating hospital (proxy set), with
437
+ approval obtained from the institutional review board of the hospital. The dataset consists of
438
+ 350,000 fundus images of normal cohort and patients with various diseases. Then, the pre-
439
+ trained ViT is finetuned on the publicly available APTOS 2019 blindness detection dataset
440
+ (target set) for performance evaluation.3 In particular, there are 3,662 fundus images con-
441
+ tained in the target set and the severity of diabetic retinopathy (DR) can be classified to
442
+ four grades, i.e., normal (1,805), mild DR (370), moderate DR (999), severe DR (193) and
443
+ proliferative DR (295). Consistent to [22], a five fold cross-validation is conducted on this
444
+ dataset.4
445
+ Baselines & Evaluation Criterion. To demonstrate the effectiveness of our BOLT pretrain-
446
+ ing, we finetune ViTs with ImageNet pretrained weights on the target tasks and evaluate
447
+ 1https://challenge2019.isic-archive.com/
448
+ 2https://stanfordmlgroup.github.io/competitions/mura/
449
+ 3https://www.kaggle.com/c/aptos2019-blindness-detection
450
+ 4The ViT pretrained on our private large-scale dataset may benefit the related downstream target tasks. To
451
+ advance the development of automated fundus image processing, we will release the ViT pretrained models to the
452
+ community soon.
453
+
454
+ 8
455
+ STUDENT, PROF, COLLABORATOR: BMVC AUTHOR GUIDELINES
456
+ Table 1: The classification accuracy (ACC) presented in percentage (%) of ViTs using dif-
457
+ ferent training strategies with different amounts of training data on the ISIC 2019 test set.
458
+ 100%
459
+ 50%
460
+ 10%
461
+ Train-from-scratch
462
+ 39.4
463
+ 35.2
464
+ 31.3
465
+ ImageNet Pretrained
466
+ 80.5
467
+ 76.1
468
+ 62.1
469
+ SimSam [5]
470
+ 79.9
471
+ 75.9
472
+ 61.2
473
+ BYOL [12]
474
+ 80.1
475
+ 75.4
476
+ 61.3
477
+ MoCo V3 [7]
478
+ 80.3
479
+ 75.2
480
+ 61.2
481
+ BOLT w./o. LDi f f
482
+ 80.8
483
+ 75.8
484
+ 62.1
485
+ BOLT (ours)
486
+ 81.5
487
+ 76.6
488
+ 62.4
489
+ ImageNet Pretrained ResNet-50
490
+ 75.7
491
+ 72.5
492
+ 61.2
493
+ their performances on the test set. Consistent to MoCo V3 [7], the basic ViT-B/16 is adopted
494
+ as backbone. The original BYOL [12], state-of-the-art self-supervised learning approach
495
+ SimSam [5] and token-based self-supervised learning approach MoCo V3 [7] are assessed
496
+ for comparison. It is worthwhile to mention that the backbones of representation networks
497
+ of BYOL and SimSam implemented in this study are ViT-B/16. The average classification
498
+ accuracy (ACC) is adopted as metric for the performance evaluation.
499
+ 3.1
500
+ Performance Evaluation
501
+ In this section, we evaluate the effectiveness of different training strategies on different
502
+ datasets and present the experimental results. The widely-used ImageNet pretrained ResNet-
503
+ 50 is also adopted as a baseline for comparison. Some detailed discussions are presented in
504
+ Supplementary Material.
505
+ Skin Lesion Classification. First, the different training strategies are evaluated on the pub-
506
+ licly available ISIC 2019 dataset. The evaluation results of models finetuned with all train-
507
+ ing data (100%) on the test set are listed in Table 1. The ImageNet pretrained ViT is ob-
508
+ served to surpass the ImageNet pretrained ResNet-50 by a large margin (i.e., +4.8%), which
509
+ demonstrates the superiority of ViT for medical image classification. Compared to the state-
510
+ of-the-art self-supervised learning approaches (i.e., SimSam, BYOL and MoCo V3), our
511
+ token-based BOLT achieves a higher ACC (80.8%). By using the difficulty-awareness loss
512
+ (LDif f ), the ACC of BOLT can be further improved to 81.5%, which outperforms the runner-
513
+ up (MoCo V3) by a margin of +1.2%.
514
+ The goal of self-supervised learning approach primarily is to deal with the insufficient
515
+ training data. Hence, to better verify the superiority of our BOLT approach, we conduct
516
+ an experiment to assess the performance of BOLT pretrained ViTs with different numbers
517
+ of labeled samples used for finetuning (i.e., 10% and 50% in Table 1). It can be observed
518
+ that our BOLT can effectively tackle the situation with few labeled training samples—the
519
+ proposed BOLT with difficulty-awareness loss achieves the best ACC under both 50% and
520
+ 10% settings.
521
+ Knee Fatigue Grading. Consistent to the previous study [20], apart from classification ac-
522
+ curacy, the F1 score is also adopted for performance evaluation. The experimental results on
523
+ the uniform test set are listed in Table 2. As shown, the ViT pretrained with the proposed
524
+ BOLT outperforms the ones using existing self-supervised learning approaches and the Ima-
525
+ geNet pretrained weights, i.e., an ACC of 54.0% is achieved (+2.0% higher than the runner-
526
+
527
+ STUDENT, PROF, COLLABORATOR: BMVC AUTHOR GUIDELINES
528
+ 9
529
+ Table 2: The accuracy (ACC and F1 score) presented in percentage (%) of different training
530
+ strategies on knee fatigue grading and diabetic retinopathy grading tasks.
531
+ Knee Fatigue Grading
532
+ Diabetic Retinopathy Grading
533
+ ACC
534
+ F1
535
+ ACC
536
+ F1
537
+ Train-from-scratch
538
+ 30.0
539
+ 23.1
540
+ 71.0
541
+ 65.3
542
+ ImageNet Pretrained
543
+ 51.0
544
+ 49.4
545
+ 83.6
546
+ 83.2
547
+ SimSam [5]
548
+ 52.0
549
+ 51.1
550
+ 84.5
551
+ 84.3
552
+ BYOL [12]
553
+ 51.0
554
+ 50.2
555
+ 84.8
556
+ 84.7
557
+ MoCo V3 [7]
558
+ 52.0
559
+ 51.2
560
+ 84.7
561
+ 84.3
562
+ BOLT w./o. LDif f
563
+ 52.0
564
+ 51.2
565
+ 85.4
566
+ 85.3
567
+ BOLT (ours)
568
+ 54.0
569
+ 53.6
570
+ 85.9
571
+ 85.8
572
+ ImageNet Pretrained ResNet-50
573
+ 36.0
574
+ 31.7
575
+ 81.7
576
+ 82.0
577
+ up). Similar trend to ISIC 2019 is observed—the ACC of ImageNet pretrained ViT (51%)
578
+ is significantly higher than that of ImageNet pretrained ResNet-50 (36%), demonstrating
579
+ the effectiveness of ViT backbone. We notice that the improvements to train-from-scratch
580
+ yielded by pretraining are more obvious on our knee fatigue grading dataset (over +20%),
581
+ compared to the skin lesion classification task. The reason may be that the target set of knee
582
+ fatigue grading contains less training samples (around 1,000 X-ray images); thus, it is more
583
+ difficult to well train the model from scratch, compared to the skin lesion classification task
584
+ with a target set of 4,260 images.
585
+ Diabetic Retinopathy Grading. Consistent to [22], we split the APTOS 2019 dataset into
586
+ five folds for cross-validation and adopt the F1 score for performance evaluation. The grad-
587
+ ing accuracy of models using different training strategies is shown in Table 2. The proposed
588
+ BOLT pretrained ViT achieves the best ACC (85.9%) and F1 score (85.8%) among the listed
589
+ approaches, which are +1.1% and +1.1% higher than the original BYOL, respectively.
590
+ 4
591
+ Conclusion
592
+ In this paper, a self-supervised learning approach, termed Boostrap Own Latent of Trans-
593
+ former (BOLT), was proposed specifically for medical image classification with the vision
594
+ Transformer backbone. The proposed BOLT involved online and target branches, which ex-
595
+ tracted the self-supervised representation from raw data via contrastive learning. Concretely,
596
+ the online network was trained to predict the target network representation of the same patch
597
+ embedding tokens with a different perturbation. Furthermore, we proposed an auxiliary dif-
598
+ ficulty ranking task to enable the vision Transformer to exploit diverse information from the
599
+ limited medical data. The difference between the original patch embedding tokens and the
600
+ perturbed ones was calculated as the difficulty measurement (i.e., the larger difference means
601
+ more difficult for the vision Transformer to process), which was then adopted as the supervi-
602
+ sion signal for self-supervised learning. The vision Transformer was trained to identify the
603
+ branch (online/target) processing for the more difficult perturbed tokens, which enabled it
604
+ to distill the transformation-invariant features from the perturbed tokens. The experimental
605
+ results on three medical image classification tasks (i.e., skin lesion classification, knee fa-
606
+ tigue fracture grading and dabetic retinopathy grading) demonstrated the effectiveness of the
607
+ proposed BOLT. We notice several limitations of this study and plan to address them in the
608
+ future works:
609
+ Extension to Medical Image Segmentation Task. The proposed BOLT can be easily ex-
610
+
611
+ 10
612
+ STUDENT, PROF, COLLABORATOR: BMVC AUTHOR GUIDELINES
613
+ tended to medical image segmentation in a similar way like [40], i.e., pretraining the encoder
614
+ and using a random initialization for the decoder. Yet, the randomly initialized decoder may
615
+ neutralize the performance improvement. Therefore, we plan to explore a more effective
616
+ way extending our pretrained ViTs for medical image segmentation task in the future.
617
+ Pretrained Weights for ViT Variants. Recently, many powerful ViT-based backbones, such
618
+ as Swin Transformer [23], have been proposed. The weights of these ViT variants pretrained
619
+ on our large-scale fundus image dataset will be continuously provided in the future.
620
+ References
621
+ [1] Sara Atito, Muhammad Awais, and Josef Kittler. SiT: Self-supervised vision Trans-
622
+ former. arXiv preprint arXiv:2104.03602, 2021.
623
+ [2] H. Bao, L. Dong, and F. Wei. BEiT: BERT pre-training of image Transformers. arXiv
624
+ preprint arXiv:2106.08254, 2021.
625
+ [3] M. Caron, H. Touvron, I. Misra, H. Jegou, J. Mairal, P. Bojanowski, and
626
+ A. Joulin. Emerging properties in self-supervised vision Transformers. arXiv preprint
627
+ arXiv:2104.14294, 2021.
628
+ [4] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple
629
+ framework for contrastive learning of visual representations. In International Confer-
630
+ ence on Machine Learning, 2020.
631
+ [5] Xinlei Chen and Kaiming He. Exploring simple Siamese representation learning. In
632
+ IEEE Conference on Computer Vision and Pattern Recognition, June 2021.
633
+ [6] Xinlei Chen, Haoqi Fan, Ross Girshick, and Kaiming He. Improved baselines with
634
+ momentum contrastive learning. arXiv preprint arXiv:2003.04297, 2020.
635
+ [7] Xinlei Chen, Saining Xie, and Kaiming He.
636
+ An empirical study of training self-
637
+ supervised vision Transformers. arXiv preprint arXiv:2104.02057, 2021.
638
+ [8] Xiangxiang Chu, Zhi Tian, Bo Zhang, Xinlong Wang, Xiaolin Wei, Huaxia Xia, and
639
+ Chunhua Shen.
640
+ Conditional positional encodings for vision Transformers.
641
+ arXiv
642
+ preprint arXiv:2102.10882, 2021.
643
+ [9] Zhigang Dai, Bolun Cai, Yugeng Lin, and Junying Chen. UP-DETR: Unsupervised
644
+ pre-training for object detection with Transformers. In IEEE Conference on Computer
645
+ Vision and Pattern Recognition, 2021.
646
+ [10] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiao-
647
+ hua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold,
648
+ Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words:
649
+ Transformers for image recognition at scale. In International Conference on Learning
650
+ Representations, 2021.
651
+ [11] Yunhe Gao, Mu Zhou, and Dimitris Metaxas. UTNet: A hybrid Transformer architec-
652
+ ture for medical image segmentation. arXiv preprint arXiv:2107.00781, 2021.
653
+
654
+ STUDENT, PROF, COLLABORATOR: BMVC AUTHOR GUIDELINES
655
+ 11
656
+ [12] Jean-Bastien Grill, Florian Strub, Florent Altche, Corentin Tallec,
657
+ Pierre H.
658
+ Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Daniel
659
+ Guo, Mohammad Gheshlaghi Azar, Bilal Piot, Koray Kavukcuoglu, Remi Munos, and
660
+ Michal Valko. Bootstrap your own latent: A new approach to self-supervised learning.
661
+ In Advances in Neural Information Processing Systems, 2020.
662
+ [13] R. Hadsell, S. Chopra, and Y. LeCun. Dimensionality reduction by learning an invariant
663
+ mapping. In IEEE Conference on Computer Vision and Pattern Recognition, 2006.
664
+ [14] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum con-
665
+ trast for unsupervised visual representation learning. In IEEE Conference on Computer
666
+ Vision and Pattern Recognition, 2020.
667
+ [15] Ge-Peng Ji, Yu-Cheng Chou, Deng-Ping Fan, Geng Chen, Debesh Jha Huazhu Fu, and
668
+ Ling Shao. Progressively normalized self-attention network for video polyp segmenta-
669
+ tion. arXiv preprint arXiv:2105.08468, 2021.
670
+ [16] Yuanfeng Ji, Ruimao Zhang, Huijie Wang, Zhen Li, Lingyun Wu, Shaoting Zhang, and
671
+ Ping Luo. Multi-compound Transformer for accurate biomedical image segmentation.
672
+ arXiv preprint arXiv:2106.14385, 2021.
673
+ [17] Hoel Kervadec, Jose DolzÉric Granger, and Ismail Ben Ayed.
674
+ Curriculum semi-
675
+ supervised segmentation. In International Conference on Medical Image Computing
676
+ and Computer Assisted Intervention, 2019.
677
+ [18] Jack Lanchantin, Tianlu Wang, Vicente Ordonez, and Yanjun Qi. General multi-label
678
+ image classification with Transformers. In IEEE Conference on Computer Vision and
679
+ Pattern Recognition, 2021.
680
+ [19] G. Larsson, M. Maire, and G. Shakhnarovich. Colorization as a proxy task for vi-
681
+ sual understanding. In IEEE Conference on Computer Vision and Pattern Recognition,
682
+ 2017.
683
+ [20] Yuexiang Li, Yanping Wang, Guang Lin, Yi Lin, Dong Wei, Qirui Zhang, Kai Ma,
684
+ Zhiqiang Zhang, and Yefeng Zheng.
685
+ Triplet-branch network with prior-knowledge
686
+ embedding for fatigue fracture grading. In International Conference on Medical Image
687
+ Computing and Computer Assisted Intervention, 2021.
688
+ [21] Zhuoyun Li, Changhong Zhong, Ruixuan Wang, and Wei-Shi Zheng. Continual learn-
689
+ ing of new diseases with dual distillation and ensemble strategy. In International Con-
690
+ ference on Medical Image Computing and Computer Assisted Intervention, 2020.
691
+ [22] Shaoteng Liu, Lijun Gong, Kai Ma, and Yefeng Zheng. GREEN: a graph residual
692
+ re-ranking network for grading diabetic retinopathy. In International Conference on
693
+ Medical Image Computing and Computer Assisted Intervention, 2020.
694
+ [23] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and
695
+ Baining Guo. Swin Transformer: Hierarchical vision Transformer using shifted win-
696
+ dows. arXiv preprint arXiv:2103.14030, 2021.
697
+ [24] M. Noroozi and P. Favaro. Unsupervised learning of visual representations by solving
698
+ Jigsaw puzzles. In European Conference on Computer Vision, 2016.
699
+
700
+ 12
701
+ STUDENT, PROF, COLLABORATOR: BMVC AUTHOR GUIDELINES
702
+ [25] M. Noroozi, A. Vinjimoor, P. Favaro, and H. Pirsiavash.
703
+ Boosting self-supervised
704
+ learning via knowledge transfer. In IEEE Conference on Computer Vision and Pattern
705
+ Recognition, 2018.
706
+ [26] Tian Pan, Yibing Song, Tianyu Yang, Wenhao Jiang, and Wei Liu. VideoMoCo: Con-
707
+ trastive video representation learning with temporally adversarial examples. In IEEE
708
+ Conference on Computer Vision and Pattern Recognition, 2021.
709
+ [27] D. Pathak, P. Krähenbühl, J. Donahue, T. Darrell, and A. A. Efros. Context encoders:
710
+ Feature learning by inpainting. In IEEE Conference on Computer Vision and Pattern
711
+ Recognition, 2016.
712
+ [28] Pranav Rajpurkar, Jeremy Irvin, Aarti Bagul, Daisy Ding, Tony Duan, Hershel Mehta,
713
+ Brandon Yang, Kaylie Zhu, Dillon Laird, Robyn L. Ball, Curtis Langlotz, Katie Shpan-
714
+ skaya, Matthew P. Lungren, and Andrew Y. Ng. MURA: Large dataset for abnormal-
715
+ ity detection in musculoskeletal radiographs. In International Conference on Medical
716
+ Imaging with Deep Learning, 2018.
717
+ [29] Xing Tao, Chenglang Yuan, Cheng Bian, Yuexiang Li, Kai Ma, Dong Ni, and Yefeng
718
+ Zheng. The winner of age challenge: Going one step further from keypoint detection
719
+ to scleral spur localization. In IEEE International Symposium on Biomedical Imaging,
720
+ 2021.
721
+ [30] Philipp Tschandl, Cliff Rosendahl, and Harald Kittler. The HAM10000 dataset, a large
722
+ collection of multi-source dermatoscopic images of common pigmented skin lesions.
723
+ Scientific Data, 5(1):1–9, 2018.
724
+ [31] Jeya Maria Jose Valanarasu, Poojan Oza, Ilker Hacihaliloglu, and Vishal M. Patel.
725
+ Medical Transformer: Gated axial-attention for medical image segmentation. arXiv
726
+ preprint arXiv:2102.10662, 2021.
727
+ [32] Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, Kaitao Song, Ding Liang, Tong
728
+ Lu, Ping Luo, and Ling Shao. Pyramid vision Transformer: A versatile backbone for
729
+ dense prediction without convolutions. arXiv preprint arXiv:2102.12122, 2021.
730
+ [33] Xinlong Wang, Rufeng Zhang, Chunhua Shen, Tao Kong, and Lei Li. Dense contrastive
731
+ learning for self-supervised visual pre-training. In IEEE Conference on Computer Vi-
732
+ sion and Pattern Recognition, 2021.
733
+ [34] Yuqing Wang, Zhaoliang Xu, Xinlong Wang, Chunhua Shen, Baoshan Cheng, Hao
734
+ Shen, and Huaxia Xia. End-to-end video instance segmentation with Transformers. In
735
+ IEEE Conference on Computer Vision and Pattern Recognition, 2021.
736
+ [35] Zhenda Xie, Yutong Lin, Zhuliang Yao, Zheng Zhang, Qi Dai, Yue Cao, and Han Hu.
737
+ Self-supervised learning with Swin Transformers. arXiv preprint arXiv:2105.04553,
738
+ 2021.
739
+ [36] Li Yuan, Yunpeng Chen, Tao Wang, Weihao Yu, Yujun Shi, Zihang Jiang, Francis EH
740
+ Tay, Jiashi Feng, and Shuicheng Yan. Tokens-to-Token ViT: Training vision Trans-
741
+ formers from scratch on ImageNet. arXiv preprint arXiv:2101.11986, 2021.
742
+
743
+ STUDENT, PROF, COLLABORATOR: BMVC AUTHOR GUIDELINES
744
+ 13
745
+ [37] P. Zhang, F. Wang, and Y. Zheng. Self supervised deep representation learning for fine-
746
+ grained body part recognition. In International Symposium on Biomedical Imaging,
747
+ 2017.
748
+ [38] Yinglin Zhang, Risa Higashita, Huazhu Fu, Yanwu Xu, Yang Zhang, Haofeng Liu,
749
+ Jian Zhang, and Jiang Liu. A multi-branch hybrid Transformer network for corneal
750
+ endothelial cell segmentation. arXiv preprint arXiv:2106.07557, 2021.
751
+ [39] Sixiao Zheng, Jiachen Lu, Hengshuang Zhao, Xiatian Zhu, Zekun Luo, Yabiao Wang,
752
+ Yanwei Fu, Jianfeng Feng, Tao Xiang, Philip H.S. Torr, and Li Zhang. Rethinking
753
+ semantic segmentation from a sequence-to-sequence perspective with Transformers.
754
+ In IEEE Conference on Computer Vision and Pattern Recognition, 2021.
755
+ [40] Jiuwen Zhu, Yuexiang Li, Yifan Hu, Kai Ma, S. Kevin Zhou, and Yefeng Zheng. Ru-
756
+ bik’s cube+: A self-supervised feature learning framework for 3D medical image anal-
757
+ ysis. Medical Image Analysis, 64:101746, 2020.
758
+ [41] Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, and Jifeng Dai.
759
+ De-
760
+ formable DETR: Deformable Transformers for end-to-end object detection.
761
+ arXiv
762
+ preprint arXiv:2010.04159, 2020.
763
+
4tAzT4oBgHgl3EQfEPpQ/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
5dE2T4oBgHgl3EQfOgbi/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:137ae83a69bef31f7b7014f4f2ba821e3912bea0c7b1b6f2acc64cf021d791ea
3
+ size 420825
69AzT4oBgHgl3EQfgPxu/content/2301.01465v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d571d5fd2ce35e03ed65fbd689762be09fa868ccc281f960d18bd8703db46d6b
3
+ size 2853173
69AzT4oBgHgl3EQfgPxu/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:977ae6d2610066b0f8a012f46b0c1fab321a869348bd9b0c36633141526753d4
3
+ size 140904
8tE4T4oBgHgl3EQfdQys/content/tmp_files/2301.05090v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
8tE4T4oBgHgl3EQfdQys/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
9NAzT4oBgHgl3EQfSft5/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:699d659916475d5a176f80d2344c17620a4125e30152ca8c255e934e34f43ad0
3
+ size 3407917
9tAzT4oBgHgl3EQfSvsB/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eec4cbac565c93ae65e064fd979af8365964718ccffdb6d0508dc3cf2285b3ba
3
+ size 4128813
AtE0T4oBgHgl3EQfxwIb/content/2301.02649v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12cd729b1c7b3a87aaae3b27f2439a732fea07a18039f6cf2d53292c0c3023c6
3
+ size 1822541
B9E0T4oBgHgl3EQfyAKb/content/2301.02654v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4150316d83b5f854ab90b5d8cd155afed56eccf57a74ffd12feb5ee0d7c6fe54
3
+ size 571805
B9E0T4oBgHgl3EQfyAKb/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54213c61f603e6f9842d652236a72bade96f5524a63f0efe721d5820caa0e016
3
+ size 236921
B9FRT4oBgHgl3EQfvjiF/content/2301.13635v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e70db94bfeeb716d567ad6fa1de160ac0b36edd8f79608a35e9aff5da5d1c9e5
3
+ size 4727096
B9FRT4oBgHgl3EQfvjiF/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edab6ba1dbf3d7503c80e68c8b81283accaddf3938de40f3fc67f5a5a3886cac
3
+ size 4259885
BdFAT4oBgHgl3EQfsR4z/content/tmp_files/2301.08657v1.pdf.txt ADDED
@@ -0,0 +1,2261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.08657v1 [cs.FL] 20 Jan 2023
2
+ Certificates for Probabilistic Pushdown Automata
3
+ via Optimistic Value Iteration
4
+ Tobias Winkler and Joost-Pieter Katoen
5
+ RWTH Aachen University, Germany
6
+ Abstract. Probabilistic pushdown automata (pPDA) are a standard
7
+ model for discrete probabilistic programs with procedures and recur-
8
+ sion. In pPDA, many quantitative properties are characterized as least
9
+ fixpoints of polynomial equation systems. In this paper, we study the
10
+ problem of certifying that these quantities lie within certain bounds.
11
+ To this end, we first characterize the polynomial systems that admit
12
+ easy-to-check certificates for validating bounds on their least fixpoint.
13
+ Second, we present a sound and complete Optimistic Value Iteration al-
14
+ gorithm for computing such certificates. Third, we show how certificates
15
+ for polynomial systems can be transferred to certificates for various quan-
16
+ titative pPDA properties. Experiments demonstrate that our algorithm
17
+ computes succinct certificates for several intricate example programs as
18
+ well as stochastic context-free grammars with > 104 production rules.
19
+ Keywords: Probabilistic Pushdown Automata · Probabilistic Model
20
+ Checking · Certified Algorithms · Probabilistic Recursive Programs.
21
+ 1
22
+ Introduction
23
+ Complex software is likely to contain bugs. This applies in particular to model
24
+ checking tools. This is a serious problem, as the possibility of such bugs com-
25
+ promises the trust one can put in the verification results, rendering the process
26
+ of formal modeling and analysis less useful. Ideally, the implementation of a
27
+ model checker should be formally verified itself [15]. However, due to the great
28
+ complexity of these tools, this is often out of reach in practice. Certifying algo-
29
+ rithms [31] mitigate this problem by providing an easy-to-check certificate along
30
+ with their regular output. This means that there exists a verifier that, given the
31
+ input problem, the output, and the certificate, constructs a formal proof that the
32
+ output is indeed correct. The idea is that the verifier is much simpler than the
33
+ algorithm, and thus likely to be bug-free or even amenable to formal verification.
34
+ This paper extends the recent line of research on probabilistic certifica-
35
+ tion [19,23,24,40] to probabilistic pushdown automata [13,30] (pPDA). pPDA and
36
+ related models have applications in, amongst others, pattern recognition [38],
37
+ computational biology [28], and speech recognition [25]. They are moreover a
38
+ natural operational model for programs with procedures, recursion, and (dis-
39
+ crete) probabilistic constructs such as the ability to flip coins. With the advent
40
+
41
+ 2
42
+ Tobias Winkler and Joost-Pieter Katoen
43
+ X → a | XY Y
44
+ x = 1
45
+ 2(1 + xy2)
46
+ Y
47
+ → b | X | Y Y
48
+ y = 1
49
+ 3(1 + x + y2)
50
+ 0.4
51
+ 0.6
52
+ 0.8
53
+ 1
54
+ 0.4
55
+ 0.6
56
+ 0.8
57
+ 1
58
+ ≈ (.66, .7)
59
+ (1, 1)
60
+ x
61
+ y
62
+ Fig. 1: Left: A stochastic context-free grammar (SCFG) and the associated pos-
63
+ itive polynomial system (PPS) which encodes the termination probabilities of
64
+ each non-terminal, assuming production rules are taken uniformly at random.
65
+ Right: The curves defined by the two equations. The least fixpoint (lfp) is
66
+ ≈ (0.66, 0.70). The thin colored area to the top right of the lfp is the set of
67
+ inductive, i.e., self-certifying upper bounds on the lfp.
68
+ of probabilistic programming [32] as a paradigm for model-based machine learn-
69
+ ing [6], such programs have received lots of attention recently. Moreover, several
70
+ efficient algorithms such as Hoare’s quicksort with randomized pivot selection
71
+ (e.g. [26]) are readily encoded as probabilistic recursive programs.
72
+ A pPDA can be seen as a purely probabilistic variant of a standard pushdown
73
+ automaton: Instead of reading an input word, it takes its transitions randomly
74
+ based on fixed probability distributions over successor states. Quantities of inter-
75
+ est in pPDA include reachability probabilities [13], expected runtimes [8], vari-
76
+ ances [14], satisfaction probabilities of temporal logic formulas [44,41], and others
77
+ (see [7] for an overview). pPDA are equivalent to recursive Markov chains [17].
78
+ One of the difficulties of pPDA is that they induce infinite Markov chains.
79
+ Despite this fact, many interesting quantitative properties are decidable, albeit
80
+ with rather high complexity. Therefore, in the past two decades there have been
81
+ significant research efforts on efficient approximative algorithms for pPDA, espe-
82
+ cially a decomposed variant of Newton iteration [16,27,11,17,12,10,39] which pro-
83
+ vides guaranteed lower, and occasionally upper [10,12] bounds on key quantities.
84
+ However, even though implementations might be complex [43], these algorithms
85
+ do not produce certificates for their results.
86
+ Our technique for certificate generation is an adaption of Optimistic Value
87
+ Iteration [22] (OVI) to the pPDA setting. In a nutshell, OVI computes some
88
+ lower bound ⃗l on the solution—which can be done using an approximative iter-
89
+ ative algorithm—and then optimistically guesses an upper bound ⃗u = ⃗l + ⃗ε and
90
+ verifies that the guess was correct. Originally, OVI was formulated for Markov
91
+ Decision Processes (MDP) where it is used to compute lower and upper bounds
92
+ on minimal or maximal reachability probabilities and expected rewards. The up-
93
+ per bounds computed by OVI have a special property: They are self-certifying
94
+ (also called inductive in this paper). This means that, given the MDP and the
95
+ upper bounds, one can check that the bounds are correct without the need for
96
+ an additional certificate; and this check is conceptually and practically easier
97
+ than finding the bounds in the first place.
98
+
99
+ Certificates for Probabilistic Pushdown Automata via OVI
100
+ 3
101
+ The analysis of pPDA, however, is more involved than that of MDP. In
102
+ MDP, many quantitative properties are characterized as least fixpoints (lfp) of
103
+ piece-wise linear equation systems and can be computed in PTIME via, e.g.,
104
+ LP solving. In pPDA, on the other hand, the equation systems for the same
105
+ properties may contain non-linear polynomials, and the best known complexity
106
+ bounds are usually as high as PSPACE. An example of such a non-linear system
107
+ is illustrated in Figure 1 which shows the translation of a stochastic context-free
108
+ grammar (SCFG; special case of pPDA with a single state) to a polynomial
109
+ equation system encoding termination probabilities. An important observation
110
+ is that the polynomials arising in this context only have positive coefficients.
111
+ Such systems are called positive polynomial systems (PPS) in this paper.
112
+ Applications of PPS beyond the analysis of pPDA include the recent factor
113
+ graph grammars [9] as well as obtaining approximate counting formulas for many
114
+ classes of trees in the framework of analytic combinatorics [18].
115
+ Contributions. In summary, this paper makes the following contributions:
116
+ – We present an optimistic algorithm for computing inductive, self-certifying
117
+ upper bounds of any desired precision ε > 0 on the lfp of a positive poly-
118
+ nomial system. Compared to OVI from [22], the key innovation of our algo-
119
+ rithm is to compute a certain direction ⃗v in which to guess, i.e., the guess is
120
+ ⃗u = ⃗l + ε⃗v rather than ⃗u = ⃗l + ⃗ε. This is to ensure that we eventually hit an
121
+ inductive bound, even if the latter lie in a very “thin strip” as in Figure 1.
122
+ – We prove that our algorithm is sound and complete in the sense that if a
123
+ (non-trivial) inductive upper bound exists, then such a bound will be found.
124
+ – We show how inductive bounds on the lfp of PPS can be used to certify
125
+ various quantities of interest in pPDA and SCFG, such as non-termination
126
+ or bounds on expected rewards/costs.
127
+ – We implement our algorithm in the software tool pray and compare the new
128
+ technique to an out-of-the-box approach based on SMT solving.
129
+ Related Work. Certification of pPDA has not been addressed explicitly in the
130
+ literature, but some existing technical results go in this direction. We mention
131
+ [17, Prop. 8.7] which yields certificates for non almost-sure termination of SCFG.
132
+ However, checking such certificates is not straightforward as it requires an SCC
133
+ decomposition. The tool PReMo [43] implements iterative algorithms for lower
134
+ bounds, but it supports neither certificates nor upper bounds.
135
+ Beyond pPDA, OVI was recently generalized to stochastic games [1]. Farkas
136
+ certificates for MDP [19] are verified by checking a set of linear constraints, which
137
+ is in spirit similar to our certificates that requires checking a set of polynomial
138
+ constraints. A deductive approach for verifying probabilistic recursive programs
139
+ on the syntax level was studied in [35]. The same paper also includes inductive
140
+ proof rules for verifying upper bounds just like we do. Recently, a higher-order
141
+ generalization of pPDA called PHORS was introduced in [29], and an algorithm
142
+ for finding upper bounds inspired by the Finite Elements method was proposed.
143
+
144
+ 4
145
+ Tobias Winkler and Joost-Pieter Katoen
146
+ Paper Outline. We review the relevant background information on PPS in Sec-
147
+ tion 2. Section 3 presents our theoretical results on inductive upper bounds in
148
+ PPS as well as the new Optimistic Value Iteration algorithm. In Section 4 we
149
+ explain how inductive bounds in PPS are used to certify quantitative properties
150
+ of pPPA. The experimental evaluation is in Section 5. We conclude in Section 6.
151
+ 2
152
+ Preliminaries
153
+ Notation for Vectors. All vectors in this paper are column vectors and are written
154
+ in boldface, e.g., ⃗u = (u1, . . . , un)T . For vectors ⃗u, ⃗u′, we write ⃗u ≤ ⃗u′ if ⃗u is
155
+ component-wise less than or equal to ⃗u′. Moreover, we write ⃗u < ⃗u′ if ⃗u ≤ ⃗u′
156
+ and ⃗u ̸= ⃗u′, and ⃗u ≺ ⃗u′ if ⃗u is component-wise strictly smaller than ⃗u′. The zero
157
+ vector is denoted ⃗0. The max norm of a vector ⃗u is ||⃗u||∞ = max1≤i≤n |ui|. We
158
+ say that ⃗u is normalized if ||⃗u||∞ = 1.
159
+ Positive Polynomial Systems (PPS). Let n ≥ 1 and ⃗x = (x1, . . . , xn)T be a
160
+ vector of variables. An n-dimensional PPS is an equation system of the form
161
+ x1 = f1(x1, . . . , xn)
162
+ . . .
163
+ xn = fn(x1, . . . , xn)
164
+ where for all 1 ≤ i ≤ n, the function fi is a polynomial with non-negative real
165
+ coefficients. An example PPS is the system x = 1
166
+ 2(1+xy2), y = 1
167
+ 3(1+x+y2) from
168
+ Figure 1. We also use vector notation for PPS: ⃗x = ⃗f(⃗x) = (f1(⃗x), . . . , fn(⃗x))T .
169
+ We write R≥0 = R≥0 ∪ {∞} for the extended non-negative reals. By conven-
170
+ tion, for all a ∈ R≥0, a ≤ ∞, a + ∞ = ∞ + a = ∞, and a · ∞ = ∞ · a equals 0 if
171
+ a = 0 and ∞ otherwise. For n ≥ 1, the partial order (R
172
+ n
173
+ ≥0, ≤) is a complete lat-
174
+ tice, i.e., all subsets of R
175
+ n
176
+ ≥0 have an infimum and a supremum. In particular, there
177
+ exists a least element ⃗0 and a greatest element ⃗∞ = (∞, . . . , ∞)T . Every PPS
178
+ induces a monotone function ⃗f : R
179
+ n
180
+ ≥0 → R
181
+ n
182
+ ≥0, i.e., ⃗u ≤ ⃗v =⇒ ⃗f(⃗u) ≤ ⃗f(⃗v). By
183
+ the Knaster-Tarski fixpoint theorem, the set of fixpoints of ⃗f is also a complete
184
+ lattice, and thus there exists a least fixpoint (lfp) denoted by µ⃗f.
185
+ In general, the lfp µ⃗f is a vector which may contain ∞ as an entry. For
186
+ instance, this happens in the PPS x = x+1. A PPS ⃗f is called feasible if µ⃗f ≺ ⃗∞
187
+ (or equivalently, µ⃗f ∈ Rn
188
+ ≥0), i.e., the lfp is a vector of real numbers. Besides
189
+ existence of the lfp, the Knaster-Tarski theorem also implies the following:
190
+ Lemma 1 (Inductive upper bounds). For all ⃗u ∈ R
191
+ n
192
+ ≥0 it holds that
193
+ ⃗f(⃗u) ≤ ⃗u
194
+ implies
195
+ µ⃗f ≤ ⃗u .
196
+ Such a vector ⃗u with ⃗u ≺ ⃗∞ is called inductive upper bound.
197
+ Given a feasible PPS ⃗f, find an inductive upper bound ⃗u ≥ µ⃗f.
198
+ Problem statement of this paper
199
+
200
+ Certificates for Probabilistic Pushdown Automata via OVI
201
+ 5
202
+ If ⃗f is feasible, then µ⃗f is obviously an inductive upper bound. In Section 3
203
+ we show under which conditions there exist more useful inductive upper bounds.
204
+ A PPS is called clean if µ⃗f ≻ ⃗0. Every PPS can be cleaned in linear time by
205
+ identifying and removing the variables that are assigned 0 in the lfp [17,12].
206
+ Given a PPS ⃗f and a point ⃗u ∈ Rn
207
+ ≥0, we define the Jacobi matrix of ⃗f at ⃗u
208
+ as the n×n-matrix ∂ ⃗f(⃗u) with coefficients ∂ ⃗f(⃗u)1≤i,j≤n =
209
+
210
+ ∂xj fi(⃗u).
211
+ Example 1. Consider the example PPS ⃗fex with variables ⃗x = (x, y)T :
212
+ x = f1(x, y) = y + 0.1
213
+ y = f2(x, y) = 0.2x2 + 0.8xy + 0.1 .
214
+ The line and the hyperbola defined by these equations are depicted in Figure 2
215
+ on Page 7. The fixpoints of ⃗fex are the intersections of these geometric objects;
216
+ in this case there are two. In particular, ⃗fex is feasible and its lfp is
217
+ µ⃗fex =
218
+
219
+ (27−
220
+
221
+ 229)/50 , (22−
222
+
223
+ 229)/50
224
+ �T ≈ (0.237 , 0.137)T .
225
+ Therefore, ⃗fex is clean as µ⃗fex ≻ ⃗0. The Jacobi matrix of ⃗fex is
226
+ ∂ ⃗fex(x, y) =
227
+
228
+
229
+ ∂xf1
230
+
231
+ ∂yf1
232
+
233
+ ∂xf2
234
+
235
+ ∂yf2
236
+
237
+ =
238
+
239
+ 0
240
+ 1
241
+ 0.4x + 0.8y 0.8x
242
+
243
+ .
244
+ Note that the lfp µ⃗fex contains irrational numbers. However, we can still give ex-
245
+ act expressions for these numbers (involving square roots) because the fixpoints
246
+ of ⃗fex are the zeros of a quadratic polynomial. However, there are PPS whose lfp
247
+ cannot be expressed using radicals, i.e., square roots, cubic roots, etc. [16]. This
248
+ means that in general, there is no easy way to compute least fixpoints exactly.
249
+ It is thus desirable to provide bounds, which we do in this paper.
250
+
251
+ Matrices and Eigenvectors. Let M be a real n×n-matrix. We say that M is non-
252
+ negative (in symbols: M ≥ 0) if it has no negative entries. M is called irreducible
253
+ if for all 1 ≤ i, j ≤ n there exists 0 ≤ k < n such that (M k)i,j ̸= 0. It is easy
254
+ to show that M is irreducible iff the directed graph GM = ({1, . . . , n}, E) with
255
+ (i, j) ∈ E iff Mi,j ̸= 0 is strongly connected. A maximal irreducible submatrix
256
+ of M is a square submatrix induced by a strongly connected component of GM.
257
+ The period of a strongly connected M is the length of the shortest cycle in GM.
258
+ It is instructive to note that PPS ⃗x = ⃗f(⃗x) are generalizations of linear equation
259
+ systems of the form ⃗x = M⃗x + ⃗c, with M ≥ 0 and ⃗c ≥ ⃗0. Moreover, note that
260
+ for any PPS ⃗f it holds that ∂ ⃗f(⃗u) ≥ 0 for all ⃗u ≻ ⃗0.
261
+ An eigenvector of an n×n-matrix M with eigenvalue λ ∈ C is a (complex)
262
+ vector ⃗v ̸= ⃗0 satisfying M⃗v = λ⃗v. There are at most n different eigenvalues. The
263
+ spectral radius ρ(M) ∈ R≥0 is the largest absolute value of the eigenvalues of
264
+ M. The following is a fundamental theorem about non-negative matrices:
265
+ Theorem 1 (Perron-Frobenius). Let M ≥ 0 be irreducible.
266
+
267
+ 6
268
+ Tobias Winkler and Joost-Pieter Katoen
269
+ (1) M has a strictly positive eigenvector ⃗v ≻ ⃗0 with eigenvalue ρ(M), the spectral
270
+ radius of M, and all other eigenvectors ⃗v′ ≻ ⃗0 are scalar multiples of ⃗v.
271
+ (2) The eigenvalues of M with absolute value ρ(M) are exactly the h numbers
272
+ ρ(M), ξρ(M), . . . , ξh−1ρ(M), where ξ is a primitive hth root of unity.
273
+ The unique eigenvector ⃗v ≻ ⃗0 with ||⃗v||∞ = 1 of an irreducible non-negative
274
+ matrix M is called the Perron-Frobenius eigenvector of M.
275
+ Strongly Connected Components. To each PPS ⃗f we associate a finite directed
276
+ graph G ⃗f = ({x1, . . . , xn}, E), which, intuitively speaking, captures the depen-
277
+ dency structure among the variables. Formally, (xi, xj) ∈ E if the polynomial fi
278
+ depends on xj, i.e., xj appears in at least one term of fi with a non-zero coef-
279
+ ficient. This is equivalent to saying that the partial derivative
280
+
281
+ ∂xj fi is not the
282
+ zero polynomial. We say that ⃗f is strongly connected if G ⃗f is strongly connected,
283
+ i.e., for each pair (xi, xj) of variables, there exists a path from xi to xj in G ⃗f.
284
+ For instance, ⃗fex from Example 1 is strongly connected because the dependency
285
+ graph has the edges E = {(x, y), (y, x), (y, y)}. Strong connectivity of PPS is a
286
+ generalization of irreducibility of matrices; indeed, a matrix M is irreducible iff
287
+ the PPS ⃗x = M⃗x is strongly connected. We often use the fact that ∂ ⃗f(⃗u) for
288
+ ⃗u ≻ ⃗0 is irreducible iff ⃗f is strongly connected.
289
+ PPS are usually analyzed in a decomposed fashion by considering the sub-
290
+ systems induced by the strongly connected components (SCCs) of G ⃗f in bottom-
291
+ up order [16]. Here we also follow this approach and therefore focus on strongly
292
+ connected PPS. The following was proved in [17, Lem. 6.5] and later generalized
293
+ in [12, Thm. 4.1] (also see remark below [12, Prop. 5.4] and [17, Lem. 8.2]):
294
+ Theorem 2 ([17,12]). If ⃗f is feasible, strongly connected and clean, then for
295
+ all ⃗u < µ⃗f, we have ρ(∂ ⃗f(⃗u)) < 1. As a consequence, ρ(∂ ⃗f(µ⃗f)) ≤ 1.
296
+ Theorem 2 partitions all PPS ⃗f which satisfy its precondition into two classes:
297
+ Either (1) ρ(∂ ⃗f(µ⃗f)) < 1, or (2) ρ(∂ ⃗f(µ⃗f)) = 1. In the next section we show
298
+ that ⃗f admits non-trivial inductive upper bounds iff it is in class (1).
299
+ Example 2. Reconsider the PPS ⃗fex from Example 1. It can be shown that
300
+ ⃗v = (1, λ1)T where λ1 ≈ 0.557 is an eigenvector of ∂ ⃗fex(µ⃗fex) with eigenvalue λ1.
301
+ Thus by the Perron-Frobenius Theorem, ρ(∂ ⃗fex(µ⃗fex)) = λ1 < 1. As promised,
302
+ there exist inductive upper bounds as can be seen in Figure 2.
303
+
304
+ 3
305
+ Finding Inductive Upper Bounds in PPS
306
+ In this section, we are concerned with the following problem: Given a feasible,
307
+ clean, and strongly connected PPS ⃗f, find a vector ⃗0 ≺ ⃗u ≺ ⃗∞ such that
308
+ ⃗f(⃗u) ≤ ⃗u, i.e., an inductive upper bound on the lfp of ⃗f (see Lemma 1).
309
+
310
+ Certificates for Probabilistic Pushdown Automata via OVI
311
+ 7
312
+ 0.2
313
+ 0.4
314
+ 0.6
315
+ 0.8
316
+ 0.2
317
+ 0.4
318
+ 0.6
319
+ 0.8
320
+ µ⃗fex
321
+ ε
322
+ ⃗v
323
+ µ⃗˜fex
324
+ x
325
+ y
326
+ x = y + 0.1
327
+ y = 0.2x2 + 0.8xy + 0.1
328
+ y = 0.2x2 + 0.8xy + 0.1916
329
+ Fig. 2: The PPS ⃗fex corresponds to the solid red line and the solid blue curve. Its
330
+ inductive upper bounds form the shaded area above the lfp µ⃗fex. Lemma 2(4)
331
+ ensures that one can fit the gray “cone” pointing in direction of the Perron-
332
+ Frobenius eigenvector ⃗v inside the inductive region. The PPS ⃗˜fex which com-
333
+ prises the dashed curve and the solid line does not have any non-trivial inductive
334
+ upper bounds. Note that the tangent lines at µ⃗˜fex are parallel to each other.
335
+ 3.1
336
+ Existence of Inductive Upper Bounds
337
+ An important first observation is that inductive upper bounds other than the
338
+ exact lfp do not necessarily exist. As a simple counter-example consider the 1-
339
+ dimensional PPS x = 1
340
+ 2x2 + 1
341
+ 2. If u is an inductive upper bound, then
342
+ 1
343
+ 2u2 + 1
344
+ 2 ≤ u
345
+ =⇒
346
+ u2 − 2u + 1 ≤ 0
347
+ =⇒
348
+ (u − 1)2 ≤ 0
349
+ =⇒
350
+ u = 1 ,
351
+ and thus the only inductive upper bound is the exact lfp u = 1. Another example
352
+ is the PPS ⃗˜fex from Figure 2. What these examples have in common is the
353
+ following property: Their derivative evaluated at the lfp is not invertible. Indeed,
354
+ we have
355
+
356
+ ∂x( 1
357
+ 2x2 + 1
358
+ 2 − x) = x − 1, and inserting the lfp x = 1 yields zero. The
359
+ higher dimensional generalization of this property to arbitrary PPS ⃗f is that the
360
+ Jacobi matrix of the function ⃗f − ⃗x evaluated at µ⃗f is singular; note that this
361
+ is precisely the matrix ∂ ⃗f(µ⃗f) − I. Geometrically, this means that the tangent
362
+ lines at µ⃗f are parallel, as can be seen in Figure 2 for the example PPS ⃗˜fex. It
363
+ should be intuitively clear from the figure that inductive upper bounds only exist
364
+ if the tangent lines are not parallel. The next lemma makes this more precise:
365
+ Lemma 2 (Existence of inductive upper bounds).
366
+ Let ⃗f be a feasible,
367
+ clean, and strongly connected PPS. Then the following are equivalent:
368
+ (1) The matrix I − ∂ ⃗f(µ⃗f) is non-singular.
369
+ (2) The spectral radius of ∂ ⃗f(µ⃗f) satisfies ρ(∂ ⃗f(µ⃗f)) < 1.
370
+ (3) There exists ⃗0 ≺ ⃗u ≺ ⃗∞ s.t. ⃗f(⃗u) < ⃗u (i.e. ⃗u is inductive but not a fixpoint).
371
+
372
+ 8
373
+ Tobias Winkler and Joost-Pieter Katoen
374
+ (4) The matrix ∂ ⃗f(µ⃗f) has a unique (normalized) eigenvector ⃗v ≻ ⃗0 and there
375
+ exist numbers δmax > 0 and ε > 0 s.t.
376
+ ⃗f( µ���f + δ · ⃗˜v )
377
+
378
+ µ⃗f + δ · ⃗˜v
379
+ holds for all 0 < δ ≤ δmax and vectors ⃗˜v ≥ ⃗v with ||⃗v − ⃗˜v||∞ ≤ ε.
380
+ The proof of Lemma 2 (see appendix) relies on a linear approximation of
381
+ ⃗f via Taylor’s familiar theorem as well as Theorems 1 and 2. Condition (4) of
382
+ Lemma 2 means that there exists a “truncated cone”
383
+ C(µ⃗f,⃗v, ε, δmax) = { µ⃗f + δ⃗˜v | 0 ≤ δ ≤ δmax,⃗˜v ≥ ⃗v, ||⃗˜v − ⃗v||∞ ≤ ε }
384
+ which is entirely contained in the inductive region. This cone is located at the
385
+ lfp µ⃗f and points in the direction of the Perron-Frobenius eigenvector ⃗v, as
386
+ illustrated in Figure 2 (assuming δmax = 1 for simplicity). The length δmax
387
+ and the radius ε of the cone depend quantitatively on ρ(∂ ⃗f(µ⃗f)), but for our
388
+ purposes it suffices that they are non-zero. The idea of our Optimistic Value
389
+ Iteration is to construct a sequence of guesses that eventually hits this cone.
390
+ 3.2
391
+ The Optimistic Value Iteration Algorithm
392
+ The basic idea of Optimistic Value Iteration (OVI) can be applied to monotone
393
+ functions of the form ⃗φ: Rn
394
+ ≥0 → Rn
395
+ ≥0 (in [22], ⃗φ is the Bellman operator of an
396
+ MDP). Kleene’s fixpoint theorem suggests a simple method for approximating
397
+ the lfp µ⃗φ from below: Simply iterate ⃗φ starting at ⃗0, i.e., compute the sequence
398
+ ⃗l0 = ⃗0, ⃗l1 = ⃗φ(⃗l0), ⃗l2 = ⃗φ(⃗l1), etc.1 In the context of MDP, this iterative scheme
399
+ is known as Value Iteration (VI). VI is easy to implement, but it is difficult
400
+ to decide when to stop the iteration. In particular, standard stopping criteria
401
+ such as small absolute difference of consecutive approximations are formally un-
402
+ sound [20]. OVI and other algorithms [3,36] cope with this problem by computing
403
+ not only a lower but also an upper bound on µ⃗φ. In the case of OVI, an upper
404
+ bound with absolute error ≤ ε is obtained as follows (we omit some details):
405
+ (1) Compute ⃗lk ≤ µ⃗φ such that ||⃗lk −⃗lk−1||∞ ≤ τ, for some (small) τ > 0.
406
+ (2) Guess a candidate upper bound ⃗u = ⃗lk + ⃗ε.
407
+ (a) If ⃗φ(⃗u) ≤ ⃗u holds, i.e., ⃗u is inductive, then return ⃗u.
408
+ (b) If not, refine ⃗u (see [22] for details). If the refined ⃗u is still not inductive,
409
+ then go back to step (1) and try again with 0 < τ ′ < τ.
410
+ We present our variant of OVI for PPS as Algorithm 1. The main differences
411
+ to the above scheme are that (i) we do not insist on Kleene iteration for obtaining
412
+ the lower bounds ⃗l, and (ii) we approximate the eigenvector ⃗v from condition (4)
413
+ of Lemma 2 and compute the “more informed” guesses ⃗u = ⃗l + ε⃗v, for various ε.
414
+ Refining the guesses as original OVI does is not necessary (but see our remarks
415
+ in Section 3.3 regarding floating point computations).
416
+ 1 In order for the Kleene seqence to converge to the lfp, i.e., limk→∞⃗lk = µφ, it suffices
417
+ that ⃗φ is ω-continuous. This already implies monotonicity.
418
+
419
+ Certificates for Probabilistic Pushdown Automata via OVI
420
+ 9
421
+ Algorithm 1: Optimistic Value Iteration (OVI) for PPS
422
+ input
423
+ : strongly connected clean PPS ⃗f; maximum abs. error ε > 0
424
+ output
425
+ : a pair (⃗l, ⃗u) of real vectors s.t. ⃗l ≤ µ⃗f, ⃗f(⃗u) ≤ ⃗u (hence
426
+ µ⃗f ≤ ⃗u), and ||⃗l − ⃗u||∞ ≤ ε
427
+ termination : guaranteed if ⃗f is feasible and I − ∂ ⃗f(µ⃗f) is non-singular
428
+ 1 ⃗l ← ⃗0 ; N ← 0 ;
429
+ 2 τ ← ε ;
430
+ /* τ is the current tolerance */
431
+ 3 while true do
432
+ 4
433
+ ⃗l′ ← improveLowerBound(⃗f,⃗l) ;
434
+ /* e.g. Kleene or Newton update */
435
+ /* guess and verify phase starts here
436
+ */
437
+ 5
438
+ if ||⃗l − ⃗l′||∞ ≤ τ then
439
+ 6
440
+ ⃗v ← approxEigenvec(∂ ⃗f(⃗l), τ) ;
441
+ /* recall ⃗v is normalized */
442
+ 7
443
+ for k from 0 to N do
444
+ 8
445
+ ⃗u ← ⃗l + dkε · ⃗v ;
446
+ /* optimistic guess, d ∈ (0, 1) */
447
+ 9
448
+ if ⃗f(⃗u) ≤ ⃗u then
449
+ 10
450
+ return (⃗l, ⃗u) ;
451
+ /* guess was successful */
452
+ 11
453
+ N ← N + 1 ;
454
+ 12
455
+ τ ← c · τ ;
456
+ /* decrease tolerance for next guess, c ∈ (0, 1) */
457
+ 13
458
+ ⃗l ← ⃗l′ ;
459
+ The functions improveLowerBound and approxEigenvec used in Algorithm 1
460
+ must satisfy the following contracts:
461
+ – The sequence ⃗l0 = ⃗0, ⃗li+1 = improveLowerBound(⃗f,⃗li) is a monotonically
462
+ increasing sequence converging to the lfp µ⃗f.
463
+ – approxEigenvec must satisfy the following: Let M ≥ 0 be an irreducible
464
+ matrix with (normalized) Perron-Frobenius eigenvector ⃗v ≻ ⃗0. Then for all
465
+ ε > 0, we require that there exists τ > 0 such that ||approxEigenvec(M, τ)−
466
+ ⃗v||∞ ≤ ε. In words, approxEigenvec approximates ⃗v up to arbitrarily small
467
+ absolute error if the tolerance τ is chosen sufficiently small.
468
+ In practice, both the Kleene and the Newton [16,17,12] update operator can
469
+ be used to implement improveLowerBound. We outline a possible implementa-
470
+ tion of approxEigenvec further below in Section 3.3.
471
+ Example 3. Consider the following PPS ⃗f: x = 1
472
+ 4x2 + 1
473
+ 8, y = 1
474
+ 4xy + 1
475
+ 4y + 1
476
+ 4. The
477
+ table illustrates the execution of Algorithm 1 on ⃗f with ε = 0.1 and c = 0.5:
478
+ # N
479
+ τ
480
+ ⃗l
481
+ ⃗l′
482
+ ||⃗l −⃗l′||∞
483
+ ⃗v
484
+ ⃗u
485
+ ⃗f(⃗u) ≤ ⃗u
486
+ 1
487
+ 0
488
+ 0.1
489
+ (0, 0)
490
+ (0.4, 0.3)
491
+ 0.4
492
+ 2
493
+ 0
494
+ 0.1
495
+ (0.4, 0.3)
496
+ (0.5, 0.4)
497
+ 0.1
498
+ (1.0, 0.8) (0.5, 0.38)
499
+
500
+ 3
501
+ 1
502
+ 0.05 (0.5, 0.4) (0.55, 0.41)
503
+ 0.05
504
+ (1.0, 0.9) (0.6, 0.49)
505
+
506
+
507
+ 10
508
+ Tobias Winkler and Joost-Pieter Katoen
509
+ The algorithm has to improve the lower bound 3 times (corresponding to the
510
+ 3 lines of the table). After the second improvement, the difference between the
511
+ current lower bound ⃗l2 and the new bound ⃗l′2 does not exceed the current tol-
512
+ erance τ2 = 0.1 and the algorithm enters the optimistic guessing stage. The first
513
+ guess ⃗u2 is not successful. The tolerance is then decreased to τ3 = c · τ2 = 0.05
514
+ and the lower bound is improved to ⃗l′3. The next guess ⃗u3 is inductive.
515
+
516
+ Theorem 3. Algorithm 1 is correct: when invoked with a strongly connected
517
+ clean PPS ⃗f and ε > 0, then (if it terminates) it outputs a pair (⃗l, ⃗u) s.t. ⃗l ≤ µ⃗f,
518
+ ⃗f(⃗u) ≤ ⃗u (and thus µ⃗f ≤ ⃗u), and ||⃗l − ⃗u||∞ ≤ ε. Moreover, if ⃗f is feasible and
519
+ I − ∂ ⃗f(µ⃗f) is non-singular, then the algorithm terminates.
520
+ The proof of Theorem 3 (see appendix) crucially relies on condition (4) of
521
+ Lemma 2 that assures the existence of a “truncated cone” of inductive bounds
522
+ centered around the Perron-Frobenius eigenvector of ∂ ⃗f(µ⃗f) (see Figure 2 for
523
+ an illustration). Intuitively, since the lower bounds ⃗l computed by the algorithm
524
+ approach the lfp µ⃗f, the eigenvectors of ∂ ⃗f(⃗l) approach those of ∂ ⃗f(µ⃗f). As a
525
+ consequence, it is guaranteed that the algorithm eventually finds an eigenvector
526
+ that intersects the cone. The inner loop starting on line 7 is needed because the
527
+ “length” of the cone is a priori unknown; the purpose of the loop is to scale the
528
+ eigenvector down so that it is ultimately small enough to fit inside the cone.
529
+ 3.3
530
+ Considerations for Implementing OVI
531
+ As mentioned above, there are at least two options for improveLowerBound:
532
+ Kleene or Newton iteration. We now show that approxEigenvec can be effec-
533
+ tively implemented as well. Further below we make some remarks on floating
534
+ point arithmetic.
535
+ Approximating the Eigenvector. A possible implementation of approxEigenvec
536
+ relies on the power iteration method (e.g. [37, Thm. 4.1]). Given a square matrix
537
+ M and an initial vector ⃗v0 with M⃗v0 ̸= ⃗0, power iteration computes the sequence
538
+ (⃗vi)i≥0 such that for i > 0, ⃗vi = M⃗vi−1/||M⃗vi−1||∞.
539
+ Lemma 3. Let M ≥ 0 be irreducible. Then power iteration applied to M + I
540
+ and any ⃗v0 > ⃗0 converges to the Perron-Frobenius eigenvector ⃗v ≻ ⃗0 of M.
541
+ The convergence rate of power iteration is determined by the ratio |λ2|/|λ1|
542
+ where λ1 and λ2 are eigenvalues of largest and second largest absolute value,
543
+ respectively. Each time approxEigenvec is called in Algorithm 1, the result of
544
+ the previous call to approxEigenvec (if available) may be used as an initial
545
+ approximation ⃗v0.
546
+
547
+ Certificates for Probabilistic Pushdown Automata via OVI
548
+ 11
549
+ Exact vs Floating Point Arithmetic. So far we have assumed exact arithmetic
550
+ for the computations in Algorithm 1, but an actual implementation should use
551
+ floating point arithmetic for efficiency. However, this may (and actually does)
552
+ lead to unsound results. More specifically, the condition ⃗f(⃗u) ≤ ⃗u may hold in
553
+ floating point arithmetic even though it is actually violated. As a remedy, we
554
+ propose to nevertheless run the algorithm with floats, but then verify its output ⃗u
555
+ with exact arbitrary-precision rational arithmetic. That is, we compute a rational
556
+ number approximation ⃗uQ of ⃗u and check ⃗f(⃗uQ) ≤ ⃗uQ with exact arithmetic. If
557
+ the check fails, we resort to the following refinement scheme which is an instance
558
+ of the general k-induction principle for complete lattices from [5]: We iteratively
559
+ check the conditions
560
+ ⃗f(⃗uQ ⊓ ⃗f(⃗uQ)) ≤ ⃗uQ ,
561
+ ⃗f(⃗uQ ⊓ ⃗f(⃗uQ ⊓ ⃗f(⃗uQ))) ≤ ⃗uQ ,
562
+ and so on,
563
+ where ⊓ denotes pointwise minimum. If one of the checks is satisfied, then µ⃗f ≤
564
+ ⃗uQ [5]. This scheme often works well in practice (see Section 5). The original
565
+ OVI from [22] uses a similar technique to refine its guesses.
566
+ 4
567
+ Certificates for Probabilistic Pushdown Automata
568
+ This section shows how the results from Section 3 can be applied to pPDA.
569
+ We introduce some additional notation. For finite sets A, D(A) denotes the
570
+ set of probability distributions on A. We often denote tuples or triples without
571
+ parentheses and separating commata when this causes no confusion, e.g., we may
572
+ write ab rather than (a, b).
573
+ Definition 1 (pPDA [13]). A probabilistic pushdown automaton (pPDA) is a
574
+ triple ∆ = (Q, Γ, P) where Q ̸= ∅ is a finite set of states, Γ ̸= ∅ is a finite stack
575
+ alphabet, and P : Q × Γ → D(Q × Γ ≤2) is a probabilistic transition function.
576
+ In the following, we often write qZ
577
+ p−→ rα instead of P(qZ)(rα) = p [13]. Intu-
578
+ itively, qZ
579
+ p−→ rα means that if the pPDA is in state q and Z is on top of the
580
+ stack, then with probability p, the pPDA moves to state r, pops Z and pushes α
581
+ on the stack. More formally, the semantics of a pPDA ∆ = (Q, Γ, P) is a count-
582
+ ably infinite Markov chain with state space Q × Γ ∗ and transition probability
583
+ matrix M such that for all q, r ∈ Q, Z ∈ Γ, α ∈ Γ ≤2, γ ∈ Γ ∗, we have
584
+ M(qZγ, rαγ) = P(qZ)(rα) ,
585
+ M(qε, qε) = 1 ,
586
+ and all other transition probabilities are zero. This Markov chain, where the
587
+ initial state is fixed to qZ, is denoted MqZ
588
+ ∆ (see Figure 3 for an example). As
589
+ usual, one can formally define a probability measure PqZ
590
+ ∆ on the infinite runs of
591
+ MqZ
592
+ ∆ via the standard cylinder construction (e.g., [2, Sec. 10]).
593
+ Consider a triple qZr ∈ Q×Γ×Q. We define the return probability2 [qZr] as
594
+ the probability of reaching rε in the Markov chain MqZ
595
+ ∆ , i.e., [qZr] = PqZ
596
+ ∆ (♦{rε}),
597
+ where ♦{rε} is the set of infinite runs of MqZ
598
+ ∆ that eventually hit state rε.
599
+ 2 When modeling procedural programs with pPDA, [qZr] is the probability that a
600
+ given procedure returns a specific value to its calling context. These probabilities
601
+
602
+ 12
603
+ Tobias Winkler and Joost-Pieter Katoen
604
+ q
605
+ r
606
+ (1/2, Z, ε)
607
+ (1/4, Z, ZZ)
608
+ (1/4, Z, ε)
609
+ (1, Z, ε)
610
+
611
+ qZ
612
+ qZZ
613
+ · · ·
614
+
615
+ rZ
616
+ rZZ
617
+ · · ·
618
+ 1/4
619
+ 1/4
620
+ 1/2
621
+ 1/2
622
+ 1/2
623
+ 1/4
624
+ 1/4
625
+ 1/4
626
+ 1
627
+ 1
628
+ 1
629
+ 1
630
+ 1
631
+ ⟨qZq⟩ =
632
+ 1/4
633
+
634
+ ⟨qZq⟩⟨qZq⟩ + ⟨qZr⟩⟨rZq⟩
635
+
636
+ + 1/2
637
+ ⟨rZq⟩ = 0
638
+ ⟨qZr⟩ =
639
+ 1/4
640
+
641
+ ⟨qZq⟩⟨qZr⟩ + ⟨qZr⟩⟨rZr⟩
642
+
643
+ + 1/4
644
+ ⟨rZr⟩ = 1
645
+ Fig. 3: Top left: The pPDA ∆ex = ({q, r}, {Z}, P) where P comprises the tran-
646
+ sitions qZ
647
+ 1/4
648
+ −−→ qZZ, qZ
649
+ 1/2
650
+ −−→ qε, qZ
651
+ 1/4
652
+ −−→ rε, rZ
653
+ 1−→ rε. Top right: A fragment of
654
+ the infinite underlying Markov chain, assuming initial configuration qZ. Bottom:
655
+ The associated equation system from Theorem 4.
656
+ Theorem 4 (The PPS of return probabilities [13]). Let ∆ = (Q, Γ, P) be
657
+ a pPDA and (⟨qZr⟩)qZr ∈ Q×Γ ×Q be variables. For each ⟨qZr⟩, define
658
+ ⟨qZr⟩
659
+ =
660
+
661
+ qZ
662
+ p−→sY X
663
+ p ·
664
+
665
+ t∈Q
666
+ ⟨sY t⟩ · ⟨tXr⟩ +
667
+
668
+ qZ
669
+ p−→sY
670
+ p · ⟨sY r⟩ +
671
+
672
+ qZ
673
+ p−→rε
674
+ p
675
+ and call the resulting PPS ⃗f∆. Then µ⃗f∆ = ([qZr])qZr ∈ Q×Γ ×Q.
676
+ We refer to [30, Sec. 3] for an intuitive explanation of the equations in ⃗f∆.
677
+ Example 4. Figure 3 shows a pPDA ∆ex and the associated PPS ⃗f∆ex. The
678
+ least non-negative solution is ⟨qZq⟩ = 2 −
679
+
680
+ 2 ≈ 0.586 and ⟨qZr⟩ =
681
+
682
+ 2 − 1 ≈
683
+ 0.414 (and, of course, ⟨rZq⟩ = 0, ⟨rZr⟩ = 1). Thus by Theorem 4, the return
684
+ probabilities are [qZq] = 2 −
685
+
686
+ 2 and [qZr] =
687
+
688
+ 2 − 1.
689
+
690
+ The PPS ⃗f∆ is always feasible (because µ⃗f∆ ≤ ⃗1). ⃗f∆ is neither necessarily
691
+ strongly connected nor clean. Let ⃗ˆf∆ denote the cleaned up version of ⃗f∆.
692
+ Proposition 1 (Basic Certificates for pPDA).
693
+ A basic certificate for
694
+ ∆ = (Q, Γ, P) is a rational inductive upper bound ⃗u ∈ QQ×Γ ×Q
695
+ ≥0
696
+ on the lfp of the
697
+ return probabilities system ⃗f∆ (see Thm. 4). They have the following properties:
698
+ – (Existence) ∀ε > 0 there exists a basic certificate ⃗u with ||µ⃗f∆ − ⃗u||∞ ≤ ε if
699
+ all maximal irreducible submatrices M of ∂ ⃗ˆf∆(µ⃗ˆf∆) satisfy ρ(M) < 1.
700
+ were called termination probabilities in previous works [12,7] but we believe this
701
+ term is more appropriate for the numbers [qZ↓] = �
702
+ r[qZr], i.e., the probability to
703
+ eventually reach the empty stack from initial configuration qZ.
704
+
705
+ Certificates for Probabilistic Pushdown Automata via OVI
706
+ 13
707
+ – (Complexity) Let β be the maximum number of bits used to encode any of
708
+ the numerators and denominators of the fractions occurring in ⃗u ∈ QQ×Γ ×Q
709
+ ≥0
710
+ .
711
+ Then checking ⃗f∆(⃗u) ≤ ⃗u, i.e., whether ⃗u is basic certificate for ∆, can be
712
+ done in time polynomial in β and the size of ∆.
713
+ Existence of basic certificates follows from Lemma 2 applied to each SCC of
714
+ the cleaned-up version of ⃗f∆ individually. However, note that in order to merely
715
+ check the certificate, i.e., verify the inequality ⃗f(⃗u) ≤ ⃗u, neither do SCCs need
716
+ to be computed nor does the system has to be cleaned up.
717
+ Example 5. Reconsider the example pPDA and its associated (non-strongly con-
718
+ nected) system of return probabilities from Figure 3. We verify that ⃗uqZq = 3/5
719
+ and ⃗uqZr = 1/2 (as well as ⃗urZq = 0, ⃗urZr = 1) is a basic certificate:
720
+ 1
721
+ 4
722
+ �3
723
+ 5 · 3
724
+ 5 + 1
725
+ 2 · 0
726
+
727
+ + 1
728
+ 2 = 59
729
+ 100
730
+
731
+ ≤ 3
732
+ 5
733
+ ,
734
+ 1
735
+ 4
736
+ �3
737
+ 5 · 1
738
+ 2 + 1
739
+ 2 · 1
740
+
741
+ + 1
742
+ 4 = 45
743
+ 100
744
+
745
+ ≤ 1
746
+ 2 .
747
+ Note that [qZq] ≈ 0.586 ≤ 3/5 = 0.6 and [qZr] ≈ 0.414 ≤ 1/2 = 0.5.
748
+
749
+ In the following we outline how a variety of key quantities associated to pPDA
750
+ can be verified using basic certificates. More details are in the appendix.
751
+ Upper Bounds on Temporal Properties. We may use basic certificates to verify
752
+ that a bad state rbad is reached with low probability, e.g., at most p = 0.01.
753
+ To this end, we remove the outgoing transitions of rbad and add the transitions
754
+ rbadZ
755
+ 1−→ rbadε for all Z ∈ Γ. Clearly, rbad is reached with probability at most p
756
+ from initial configuration qZ iff [qZrbad] ≤ p. The results of [13] imply that this
757
+ idea can be generalized to until-properties of the form C1 U C2, where C1 and C2
758
+ are regular sets of configurations. (This requires a small extension of the basic
759
+ certificates, but the overall idea stays the same).
760
+ Certificates for the Output Distribution. Once a pPDA reaches the empty stack,
761
+ we say that it has terminated. When modeling procedural programs, this cor-
762
+ responds to returning from a program’s main procedure. Assuming initial con-
763
+ figuration qZ, the probability sub-distribution over the possible return values is
764
+ then given by the return probabilities {[qZr] | r ∈ Q}. Missing probability mass
765
+ models the probability of non-termination. A basic certificate can thus be used
766
+ immediately to verify a point-wise upper bound on the output distribution as
767
+ well as to certify that a program is not almost-surely terminating (AST). If a
768
+ pPDA ∆ is already known to be AST, then we can also certify a lower bound on
769
+ the output distribution: Suppose that ⃗u is a basic certificate for ∆ and assume
770
+ that ∆ is AST from initial configuration qZ. Define ε = �
771
+ r∈Q ⃗uqZr − 1. Then
772
+ for all r ∈ Q, we have ⃗uqZr − ε ≤ [qZr] ≤ ⃗uqZr.
773
+ Example 6. The pPDA ∆ex from Figure 3 is AST from initial configuration qZ,
774
+ as the transition qZ
775
+ 1/4
776
+ −−→ rε is eventually taken with probability 1, and the stack
777
+ is emptied certainly once r is reached. Using the basic certificate from Example 5
778
+ we can thus (correctly) certify that 0.5 ≤ [qZq] ≤ 0.6 and 0.4 ≤ [qZr] ≤ 0.5.
779
+
780
+ 14
781
+ Tobias Winkler and Joost-Pieter Katoen
782
+ Certificates for Expected Rewards or Costs. Suppose we have equipped a pPDA
783
+ with a state-based reward (or cost) function Q → R≥0. It was shown in [14] that
784
+ the expected total reward accumulated during the run of a pPDA is the solution
785
+ of a linear equation system where the return probabilities [qZr] appear as coef-
786
+ ficients. Given a basic certificate ⃗u, we can replace each coefficient [qZr] by ⃗uqZr
787
+ and thus obtain an equation system whose solution is an over-approximation of
788
+ the true expected reward. We may extend the basic certificate ⃗u by the solution
789
+ of this linear system to make verification straightforward. Note that a program’s
790
+ expected runtime [8,35] is a special case of total expected reward.
791
+ 5
792
+ Implementation and Experiments
793
+ Our Tool: pray. We implemented our algorithm in the prototypical Java-tool
794
+ pray (Probabilistic Recursion AnalYzer). It supports two input formats: (i)
795
+ Recursive probabilistic programs in a Java-like syntax (e.g. Figure 4); these
796
+ programs are automatically translated to pPDA. (ii) Explicit PPS in the same
797
+ syntax used by the tool PReMo [43]. The output of pray is a rational inductive
798
+ upper bound on the lfp of the return probability PPS of the input program’s
799
+ pPDA model (a basic certificate), or on the lfp of the explicitly given PPS. The
800
+ absolute precision ε is configurable. The implementation works as follows:
801
+ (1) It parses the input and, if the latter was a program, constructs a pPDA
802
+ model and the associated PPS of return probabilities.
803
+ (2) It computes an SCC decomposition of the PPS under consideration using
804
+ standard algorithms implemented in the jGraphT library [33].
805
+ (3) It applies Algorithm 1 to the individual SCC in reverse topological order
806
+ using floating point arithmetic. Algorithm 1 is instantiated with Kleene it-
807
+ eration3, the power iteration for approximating eigenvectors as outlined in
808
+ Section 3.3, and constants c = 0.1, d = 0.5. We allow ≤ 10 guesses per SCC.
809
+ (4) If stage (3) is successful, the tool verifies the resulting floating point certifi-
810
+ cate using exact rational number arithmetic as described in Section 3.3.
811
+ Baselines. To the best of our knowledge, no alternative techniques for finding
812
+ inductive upper bounds in PPS have been described explicitly in the literature.
813
+ However, there is an (almost) out-of-the-box approach using an SMT solver:
814
+ Given a PPS ⃗x = ⃗f(⃗x), compute some lower bound ⃗l ≤ µ⃗f using an iterative
815
+ technique. Then query the SMT solver for a model (variable assignment) of the
816
+ quantifier-free first-order logic formula ϕ⃗f(⃗x) = �n
817
+ i=1 fi(⃗x) ≤ xi ∧⃗li ≤ xi ≤ ⃗li +ε
818
+ in the (decidable) theory of polynomial real arithmetic with inequality (aka
819
+ QF_NRA in the SMT community). If such a model ⃗u exists, then clearly µ⃗f ≤ ⃗u
820
+ and ||⃗l − ⃗u||∞ ≤ ε. If no model exists, then improve ⃗l and try again. We have
821
+ 3 In fact, we use the slightly optimized Gauss-Seidel iteration (see [42, Sec. 5.2]) which
822
+ provides a good trade-off between ease of implementation and efficiency [42].
823
+
824
+ Certificates for Probabilistic Pushdown Automata via OVI
825
+ 15
826
+ bool and() {
827
+ prob {
828
+ 1//2: return
829
+ (1//2: true | 1//2: false);
830
+ 1//2: {
831
+ if(!or()) return false;
832
+ else return or(); } } }
833
+ bool or() {
834
+ prob {
835
+ 1//2: return
836
+ (1//2: true | 1//2: false);
837
+ 1//2: {
838
+ if(and()) return true;
839
+ else return and(); } } }
840
+ Fig. 4: Program evaluating a random and-or tree [8]. The prob-blocks execute
841
+ the contained statements with the respective probabilities (syntax inspired by
842
+ Java’s switch). Our tool automatically translates this program to a pPDA and
843
+ computes a basic certificate (Proposition 1) witnessing that calling and() returns
844
+ true and false with probability ≤ 382/657 ≈ 0.58 and 391/933 ≈ 0.42, resp.
845
+ implemented this approach using the state-of-the-art SMT solvers cvc5 [4] and
846
+ z3 [34], the winners of the 2022 SMT-COMP in the category QF_NRA4.
847
+ As yet another baseline, we have also implemented a variant of OVI for PPS
848
+ which is closer to the original MDP algorithm from [22]. In this variant, called
849
+ “standard OVI” from now on, we compute the candidate ⃗u based on the “relative”
850
+ update rule ⃗u = (1 + ε)⃗l, where ⃗l is the current lower bound [22].
851
+ Research Questions. We aim to shed some light on the following questions: (A)
852
+ How well does our algorithm scale? (B) Is the algorithm suitable for PPS with dif-
853
+ ferent characteristics, e.g., dense or sparse? (C) Is the requirement ρ(∂ ⃗f(µ⃗f)) < 1
854
+ restrictive in practice? (D) How does our OVI compare to the baselines?
855
+ Benchmarks. To answer the above questions we run our implementation on two
856
+ sets of benchmarks (Table 3 and Table 2, respectively). The first set consists of
857
+ various example programs from the literature as well as a few new programs,
858
+ which are automatically translated to pPDA. This translation is standard and
859
+ usually takes not more than a few seconds. The programs golden, and-or (see Fig-
860
+ ure 4), virus, gen-fun are adapted from [35,8,41] and [32, Program 5.6], respec-
861
+ tively. The source code of all considered programs is in the appendix. We have
862
+ selected only programs with possibly unbounded recursion depth which induce
863
+ infinite Markov chains. The second benchmark set comprises explicitly given
864
+ PPS5. The instances brown, lemonde, negra, swbd, tiger, tuebadz, and wsj all en-
865
+ code SCFG from the area of language processing (see [43] for details). random is
866
+ the return probability system of a randomly generated pPDA.
867
+ Summary of Experimental Results. We ran the experiments on a standard note-
868
+ book. The approach based on cvc5 turns out to be not competitive (see Ap-
869
+ pendix D). We thus focus on z3 in the following. Both pray and the z3 approach
870
+ could handle most of the programs from Table 3 within a 10 minute time limit.
871
+ The considered programs induce sparse PPS with 38 - 26,367 variables, and most
872
+ 4 https://smt-comp.github.io/2022/results
873
+ 5 These examples come with PReMo: https://cgi.csc.liv.ac.uk/~dominik/premo/
874
+
875
+ 16
876
+ Tobias Winkler and Joost-Pieter Katoen
877
+ Table 1: Experiments with PPS obtained from recursive probabilistic programs.
878
+ Columns vars and terms display the number of variables and terms in the PPS.
879
+ Columns sccs and sccmax indicate the number of non-trivial SCC and the size of
880
+ the largest SCC. G is total number of guesses made by OVI (at least one guess per
881
+ SCC). ttot is the total runtime excluding the time for model construction. tQ is
882
+ the percentage of ttot spent on exact rational arithmetic. D is the average number
883
+ of decimal digits of the rational numbers in the certificate. The timeout (TO)
884
+ was set to 10 minutes. Timings are in ms. The absolute precision is ε = 10−3.
885
+ benchmark
886
+ |Q|
887
+ |P|
888
+ |Γ|
889
+ vars terms sccs sccmax cert G D
890
+ tQ
891
+ ttot certz3 Dz3
892
+ tz3 certstd Gstd Dstd
893
+ tstd
894
+ rw-0.499
895
+ 18
896
+ 29
897
+ 5
898
+ 38
899
+ 45
900
+ 1
901
+ 12
902
+
903
+ 5 5 17%
904
+ 163
905
+
906
+ 2
907
+ 11
908
+
909
+ 4
910
+ 5
911
+ 59
912
+ rw-0.500
913
+ 18
914
+ 29
915
+ 5
916
+ 38
917
+ 45
918
+ 1
919
+ 12
920
+
921
+ 10
922
+ -
923
+ -
924
+ 7327
925
+
926
+ 2
927
+ 10
928
+
929
+ 10
930
+ -
931
+ 8083
932
+ rw-0.501
933
+ 18
934
+ 29
935
+ 5
936
+ 38
937
+ 45
938
+ 1
939
+ 12
940
+
941
+ 5 4
942
+ 6%
943
+ 36
944
+
945
+ 13
946
+ 12
947
+
948
+ 4
949
+ 5
950
+ 23
951
+ geom-offspring
952
+ 24
953
+ 40
954
+ 5
955
+ 52
956
+ 80
957
+ 4
958
+ 24
959
+
960
+ 8 6 13%
961
+ 15
962
+
963
+ 9
964
+ 16
965
+
966
+ 8
967
+ 6
968
+ 14
969
+ golden
970
+ 27
971
+ 49
972
+ 6
973
+ 81
974
+ 94
975
+ 1
976
+ 36
977
+
978
+ 1 5 30%
979
+ 10
980
+
981
+ 7
982
+ 14
983
+
984
+ 2
985
+ 4
986
+ 12
987
+ and-or
988
+ 50
989
+ 90
990
+ 7
991
+ 149
992
+ 182
993
+ 1
994
+ 48
995
+
996
+ 2 4 26%
997
+ 19
998
+
999
+ 12
1000
+ 15260
1001
+
1002
+ 2
1003
+ 4
1004
+ 19
1005
+ gen-fun
1006
+ 85
1007
+ 219
1008
+ 7
1009
+ 202
1010
+ 327
1011
+ 1
1012
+ 16
1013
+
1014
+ 2 3 32%
1015
+ 22
1016
+
1017
+ 15
1018
+ 141
1019
+
1020
+ 2
1021
+ 3
1022
+ 21
1023
+ virus
1024
+ 68
1025
+ 149
1026
+ 27
1027
+ 341
1028
+ 551
1029
+ 1
1030
+ 220
1031
+
1032
+ 1 5 38%
1033
+ 40
1034
+
1035
+ 7
1036
+ 139
1037
+
1038
+ 1
1039
+ 6
1040
+ 59
1041
+ escape10
1042
+ 109
1043
+ 174
1044
+ 23
1045
+ 220
1046
+ 263
1047
+ 1
1048
+ 122
1049
+
1050
+ 1 4
1051
+ 5%
1052
+ 56
1053
+
1054
+ 7
1055
+ 48
1056
+
1057
+ 1
1058
+ 8
1059
+ 71
1060
+ escape25
1061
+ 258
1062
+ 413
1063
+ 53
1064
+ 518
1065
+ 621
1066
+ 1
1067
+ 300
1068
+
1069
+ 1 5 17%
1070
+ 245
1071
+
1072
+ 7
1073
+ 15958
1074
+
1075
+ 1
1076
+ 9
1077
+ 172
1078
+ escape50
1079
+ 508
1080
+ 813
1081
+ 103
1082
+ 1018
1083
+ 1221
1084
+ 1
1085
+ 600
1086
+
1087
+ 1 7 23%
1088
+ 653
1089
+
1090
+ 7
1091
+ 410
1092
+
1093
+ 1
1094
+ -
1095
+ 400
1096
+ escape75
1097
+ 760 1215
1098
+ 153
1099
+ 1522
1100
+ 1825
1101
+ 1
1102
+ 904
1103
+
1104
+ 2 9 10%
1105
+ 3803
1106
+
1107
+ -
1108
+ TO
1109
+
1110
+ 1
1111
+ -
1112
+ 635
1113
+ escape100
1114
+ 1009 1614
1115
+ 203
1116
+ 2020
1117
+ 2423
1118
+ 1
1119
+ 1202
1120
+
1121
+ 5
1122
+ -
1123
+ -
1124
+ 29027
1125
+
1126
+ 6
1127
+ 939
1128
+
1129
+ 1
1130
+ -
1131
+ 901
1132
+ escape200
1133
+ 2008 3213
1134
+ 403
1135
+ 4018
1136
+ 4821
1137
+ 1
1138
+ 2400
1139
+
1140
+ 6
1141
+ -
1142
+ -
1143
+ 83781
1144
+
1145
+ -
1146
+ TO
1147
+
1148
+ 1
1149
+ -
1150
+ 2206
1151
+ sequential5
1152
+ 230
1153
+ 490
1154
+ 39
1155
+ 1017
1156
+ 1200
1157
+ 10
1158
+ 12
1159
+
1160
+ 15
1161
+ 4 26%
1162
+ 103
1163
+
1164
+ 8
1165
+ 1074
1166
+
1167
+ 15
1168
+ 5
1169
+ 204
1170
+ sequential7
1171
+ 572 1354
1172
+ 137
1173
+ 3349
1174
+ 3856
1175
+ 14
1176
+ 12
1177
+
1178
+ 21
1179
+ 5 27%
1180
+ 1049
1181
+
1182
+ 8
1183
+ 12822
1184
+
1185
+ 20
1186
+ 5
1187
+ 1042
1188
+ sequential10
1189
+ 3341 8666 1036 26367 29616
1190
+ 20
1191
+ 12
1192
+
1193
+ 30
1194
+ 5
1195
+ 2% 100613
1196
+
1197
+ 8 453718
1198
+
1199
+ 30
1200
+ 6 101554
1201
+ mod5
1202
+ 44
1203
+ 103
1204
+ 10
1205
+ 296
1206
+ 425
1207
+ 1
1208
+ 86
1209
+
1210
+ 1 5 39%
1211
+ 28
1212
+
1213
+ 9
1214
+ 34150
1215
+
1216
+ 2
1217
+ -
1218
+ 178
1219
+ mod7
1220
+ 64
1221
+ 159
1222
+ 14
1223
+ 680
1224
+ 1017
1225
+ 1
1226
+ 222
1227
+
1228
+ 1 6 69%
1229
+ 172
1230
+
1231
+ 7
1232
+ 443
1233
+
1234
+ 2
1235
+ -
1236
+ 624
1237
+ mod10
1238
+ 95
1239
+ 244
1240
+ 20
1241
+ 1574
1242
+ 2403
1243
+ 1
1244
+ 557
1245
+
1246
+ 1
1247
+ -
1248
+ -
1249
+ 675
1250
+
1251
+ 7
1252
+ 1245
1253
+
1254
+ 2
1255
+ -
1256
+ 882
1257
+ of them have just a single SCC. Notably, the examples with greatest maximum
1258
+ SCC size were only solved by z3. pray and z3 need at most 95 and 31 seconds,
1259
+ respectively, for the instances where they succeed. In many cases (e.g., rw-5.01,
1260
+ golden, virus, brown, swbd), the resulting certificates formally disprove AST. For
1261
+ the explicit PPS in Table 2, pray solves all instances whereas z3 only solves
1262
+ 3/8 within the time limit, and only finds the trivial solution ⃗1. Most of these
1263
+ benchmarks contain dense high-degree polynomials and our tool spends most
1264
+ time on performing exact arithmetic. pray never needs more than 6 guesses per
1265
+ SCC if it succeeds.
1266
+ Evaluation of Research Questions. (A) Scalability: Our algorithm succeeded on
1267
+ instances with maximum SCC size of up to 8,000 and number of terms over
1268
+ 50,000. pray could solve all instances with a maximum SCC size of ≤ 1,000 in
1269
+ less than 2 minutes per instance. For the examples where our algorithm does
1270
+ not succeed (e.g., escape100) it is mostly because it fails converting a floating
1271
+ point to a rational certificate. (B) PPS with different flavors: The problems
1272
+ in Table 3 (low degree and sparse, i.e., few terms per polynomials) and Table 2
1273
+ (higher degree and dense) are quite different. A comparison to the SMT approach
1274
+ suggests that our technique might be especially well suited for dense problems
1275
+ with higher degrees. (C) Non-singularity: The only instance where our algorithm
1276
+ fails because of the non-singularity condition is the symmetric random walk rw-
1277
+
1278
+ Certificates for Probabilistic Pushdown Automata via OVI
1279
+ 17
1280
+ Table 2: Experiments with explicitly given PPS (setup as in Table 3).
1281
+ benchmark
1282
+ vars terms sccs sccmax cert G D
1283
+ tQ
1284
+ ttot certz3 Dz3
1285
+ tz3 certstd Gstd Dstd
1286
+ tstd
1287
+ brown
1288
+ 37 22866
1289
+ 1
1290
+ 22
1291
+
1292
+ 2
1293
+ 6 74%
1294
+ 3212
1295
+
1296
+ -
1297
+ TO
1298
+
1299
+ 2
1300
+ 8
1301
+ 9065
1302
+ lemonde
1303
+ 121 32885
1304
+ 1
1305
+ 48
1306
+
1307
+ 2
1308
+ 5 97% 40738
1309
+
1310
+ -
1311
+ TO
1312
+
1313
+ 2
1314
+ 5 38107
1315
+ negra
1316
+ 256 29297
1317
+ 1
1318
+ 149
1319
+
1320
+ 2
1321
+ 7 89% 10174
1322
+
1323
+ 1 37248
1324
+
1325
+ 1
1326
+ 7
1327
+ 8873
1328
+ swbd
1329
+ 309 47578
1330
+ 1
1331
+ 243
1332
+
1333
+ 1
1334
+ 7 93% 18989
1335
+
1336
+ -
1337
+ TO
1338
+
1339
+ 1
1340
+ 8 67314
1341
+ tiger
1342
+ 318 52184
1343
+ 1
1344
+ 214
1345
+
1346
+ 2
1347
+ 8 98% 94490
1348
+
1349
+ 1 17454
1350
+
1351
+ 1
1352
+ 8 90801
1353
+ tuebadz
1354
+ 196
1355
+ 8932
1356
+ 2
1357
+ 168
1358
+
1359
+ 4
1360
+ 9 85%
1361
+ 2666
1362
+
1363
+ 1 15323
1364
+
1365
+ 3
1366
+ 9
1367
+ 2700
1368
+ wsj
1369
+ 240 31170
1370
+ 1
1371
+ 194
1372
+
1373
+ 2
1374
+ 9 96% 30275
1375
+
1376
+ -
1377
+ TO
1378
+
1379
+ 2
1380
+ 9 29038
1381
+ random
1382
+ 10000 20129
1383
+ 1
1384
+ 8072
1385
+
1386
+ 3
1387
+ 7
1388
+ 5% 17585
1389
+
1390
+ -
1391
+ TO
1392
+
1393
+ 4
1394
+ 8 16357
1395
+ 0.500. We therefore conjecture that this condition is often satisfied in practice.
1396
+ (D) Comparison to SMT: There is no clear winner. Some instances can only be
1397
+ solved by one tool or the other (e.g. escape100 and brown). However, pray often
1398
+ delivers more succinct certificates, i.e., the rational numbers have less digits.
1399
+ Overall, z3 behaves less predictably than pray.
1400
+ 6
1401
+ Conclusion and Future Work
1402
+ We have proposed using inductive bounds as certificates for various properties in
1403
+ probabilistic recursive models. Moreoever, we have presented the first dedicated
1404
+ algorithm for computing inductive upper bounds. While our algorithm already
1405
+ scales to non-trivial problems, the main bottleneck is the generation of an exact
1406
+ rational bound from a floating point approximation. This might be improved
1407
+ using appropriate rounding modes as in [21]. Additional future work includes
1408
+ further certificates for pPDA, especially for lower bounds and termination.
1409
+ References
1410
+ 1. Azeem, M., Evangelidis, A., Kretínský, J., Slivinskiy, A., Weininger, M.: Op-
1411
+ timistic and Topological Value Iteration for Simple Stochastic Games. CoRR
1412
+ abs/2207.14417 (2022)
1413
+ 2. Baier, C., Katoen, J.: Principles of model checking. MIT Press (2008)
1414
+ 3. Baier, C., Klein, J., Leuschner, L., Parker, D., Wunderlich, S.: Ensuring the Relia-
1415
+ bility of Your Model Checker: Interval Iteration for Markov Decision Processes. In:
1416
+ CAV (1). Lecture Notes in Computer Science, vol. 10426, pp. 160–180. Springer
1417
+ (2017)
1418
+ 4. Barbosa, H., Barrett, C.W., Brain, M., Kremer, G., Lachnitt, H., Mann, M., Mo-
1419
+ hamed, A., Mohamed, M., Niemetz, A., Nötzli, A., Ozdemir, A., Preiner, M.,
1420
+ Reynolds, A., Sheng, Y., Tinelli, C., Zohar, Y.: cvc5: A versatile and industrial-
1421
+ strength SMT solver. In: TACAS (1). Lecture Notes in Computer Science, vol.
1422
+ 13243, pp. 415–442. Springer (2022)
1423
+ 5. Batz, K., Chen, M., Kaminski, B.L., Katoen, J., Matheja, C., Schröer, P.: Latticed
1424
+ k-induction with an application to probabilistic programs. In: CAV (2). Lecture
1425
+ Notes in Computer Science, vol. 12760, pp. 524–549. Springer (2021)
1426
+
1427
+ 18
1428
+ Tobias Winkler and Joost-Pieter Katoen
1429
+ 6. Bishop, C.M.: Model-based machine learning. Philosophical Transactions of the
1430
+ Royal Society A: Mathematical, Physical and Engineering Sciences 371(1984),
1431
+ 20120222 (2013)
1432
+ 7. Brázdil, T., Esparza, J., Kiefer, S., Kucera, A.: Analyzing probabilistic pushdown
1433
+ automata. Formal Methods Syst. Des. 43(2), 124–163 (2013)
1434
+ 8. Brázdil, T., Kiefer, S., Kucera, A., Vareková, I.H.: Runtime analysis of probabilistic
1435
+ programs with unbounded recursion. J. Comput. Syst. Sci. 81(1), 288–310 (2015)
1436
+ 9. Chiang, D., Riley, D.: Factor Graph Grammars. In: NeurIPS (2020)
1437
+ 10. Esparza, J., Gaiser, A., Kiefer, S.: Computing Least Fixed Points of Probabilistic
1438
+ Systems of Polynomials. In: STACS. LIPIcs, vol. 5, pp. 359–370. Schloss Dagstuhl
1439
+ - Leibniz-Zentrum für Informatik (2010)
1440
+ 11. Esparza, J., Kiefer, S., Luttenberger, M.: Convergence Thresholds of Newton’s
1441
+ Method for Monotone Polynomial Equations. In: STACS. LIPIcs, vol. 1, pp. 289–
1442
+ 300. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, Germany (2008)
1443
+ 12. Esparza, J., Kiefer, S., Luttenberger, M.: Computing the Least Fixed Point of
1444
+ Positive Polynomial Systems. SIAM J. Comput. 39(6), 2282–2335 (2010)
1445
+ 13. Esparza, J., Kucera, A., Mayr, R.: Model Checking Probabilistic Pushdown Au-
1446
+ tomata. In: LICS. pp. 12–21. IEEE Computer Society (2004)
1447
+ 14. Esparza, J., Kucera, A., Mayr, R.: Quantitative Analysis of Probabilistic Pushdown
1448
+ Automata: Expectations and Variances. In: LICS. pp. 117–126. IEEE Computer
1449
+ Society (2005)
1450
+ 15. Esparza, J., Lammich, P., Neumann, R., Nipkow, T., Schimpf, A., Smaus, J.: A
1451
+ fully verified executable LTL model checker. In: CAV. Lecture Notes in Computer
1452
+ Science, vol. 8044, pp. 463–478. Springer (2013)
1453
+ 16. Etessami, K., Yannakakis, M.: Recursive Markov Chains, Stochastic Grammars,
1454
+ and Monotone Systems of Nonlinear Equations. In: STACS. Lecture Notes in Com-
1455
+ puter Science, vol. 3404, pp. 340–352. Springer (2005)
1456
+ 17. Etessami, K., Yannakakis, M.: Recursive Markov chains, stochastic grammars, and
1457
+ monotone systems of nonlinear equations. J. ACM 56(1), 1:1–1:66 (2009)
1458
+ 18. Flajolet, P., Sedgewick, R.: Analytic Combinatorics. Cambridge University Press
1459
+ (2009)
1460
+ 19. Funke, F., Jantsch, S., Baier, C.: Farkas Certificates and Minimal Witnesses for
1461
+ Probabilistic Reachability Constraints. In: TACAS (1). Lecture Notes in Computer
1462
+ Science, vol. 12078, pp. 324–345. Springer (2020)
1463
+ 20. Haddad, S., Monmege, B.: Reachability in mdps: Refining convergence of value
1464
+ iteration. In: RP. Lecture Notes in Computer Science, vol. 8762, pp. 125–137.
1465
+ Springer (2014)
1466
+ 21. Hartmanns, A.: Correct Probabilistic Model Checking with Floating-Point Arith-
1467
+ metic. In: TACAS (2). Lecture Notes in Computer Science, vol. 13244, pp. 41–59.
1468
+ Springer (2022)
1469
+ 22. Hartmanns, A., Kaminski, B.L.: Optimistic value iteration. In: CAV (2). Lecture
1470
+ Notes in Computer Science, vol. 12225, pp. 488–511. Springer (2020)
1471
+ 23. Jantsch, S.: Certificates and Witnesses for Probabilistic Model Checking. Ph.D.
1472
+ thesis, Dresden University of Technology, Germany (2022)
1473
+ 24. Jantsch, S., Funke, F., Baier, C.: Minimal Witnesses for Probabilistic Timed Au-
1474
+ tomata. In: ATVA. Lecture Notes in Computer Science, vol. 12302, pp. 501–517.
1475
+ Springer (2020)
1476
+ 25. Jurafsky, D., Wooters, C., Segal, J., Stolcke, A., Fosler, E., Tajchman, G.N., Mor-
1477
+ gan, N.: Using a stochastic context-free grammar as a language model for speech
1478
+ recognition. In: ICASSP. pp. 189–192. IEEE Computer Society (1995)
1479
+
1480
+ Certificates for Probabilistic Pushdown Automata via OVI
1481
+ 19
1482
+ 26. Karp, R.M.: An introduction to randomized algorithms. Discret. Appl. Math. 34(1-
1483
+ 3), 165–201 (1991)
1484
+ 27. Kiefer, S., Luttenberger, M., Esparza, J.: On the convergence of newton’s method
1485
+ for monotone systems of polynomial equations. In: STOC. pp. 217–226. ACM
1486
+ (2007)
1487
+ 28. Knudsen, B., Hein, J.: Pfold: RNA secondary structure prediction using stochastic
1488
+ context-free grammars. Nucleic Acids Res. 31(13), 3423–3428 (2003)
1489
+ 29. Kobayashi, N., Lago, U.D., Grellois, C.: On the Termination Problem for Proba-
1490
+ bilistic Higher-Order Recursive Programs. Log. Methods Comput. Sci. 16(4) (2020)
1491
+ 30. Kucera, A., Esparza, J., Mayr, R.: Model checking probabilistic pushdown au-
1492
+ tomata. Log. Methods Comput. Sci. 2(1) (2006)
1493
+ 31. McConnell, R.M., Mehlhorn, K., Näher, S., Schweitzer, P.: Certifying algorithms.
1494
+ Comput. Sci. Rev. 5(2), 119–161 (2011)
1495
+ 32. van de Meent, J., Paige, B., Yang, H., Wood, F.: An Introduction to Probabilistic
1496
+ Programming. CoRR abs/1809.10756 (2018)
1497
+ 33. Michail, D., Kinable, J., Naveh, B., Sichi, J.V.: Jgrapht - A java library for graph
1498
+ data structures and algorithms. ACM Trans. Math. Softw. 46(2), 16:1–16:29 (2020)
1499
+ 34. de Moura, L.M., Bjørner, N.S.: Z3: an efficient SMT solver. In: TACAS. Lecture
1500
+ Notes in Computer Science, vol. 4963, pp. 337–340. Springer (2008)
1501
+ 35. Olmedo, F., Kaminski, B.L., Katoen, J., Matheja, C.: Reasoning about Recursive
1502
+ Probabilistic Programs. In: LICS. pp. 672–681. ACM (2016)
1503
+ 36. Quatmann, T., Katoen, J.: Sound Value Iteration. In: CAV (1). Lecture Notes in
1504
+ Computer Science, vol. 10981, pp. 643–661. Springer (2018)
1505
+ 37. Saad, Y.: Numerical methods for large eigenvalue problems: revised edition. SIAM
1506
+ (2011)
1507
+ 38. Simistira, F., Katsouros, V., Carayannis, G.: Recognition of online handwritten
1508
+ mathematical formulas using probabilistic SVMs and stochastic context free gram-
1509
+ mars. Pattern Recognit. Lett. 53, 85–92 (2015)
1510
+ 39. Stewart, A., Etessami, K., Yannakakis, M.: Upper Bounds for Newton’s Method
1511
+ on Monotone Polynomial Systems, and P-Time Model Checking of Probabilistic
1512
+ One-Counter Automata. J. ACM 62(4), 30:1–30:33 (2015)
1513
+ 40. Wimmer, S., von Mutius, J.: Verified Certification of Reachability Checking for
1514
+ Timed Automata. In: TACAS (1). Lecture Notes in Computer Science, vol. 12078,
1515
+ pp. 425–443. Springer (2020)
1516
+ 41. Winkler, T., Gehnen, C., Katoen, J.: Model Checking Temporal Properties of Re-
1517
+ cursive Probabilistic Programs. In: FoSSaCS. Lecture Notes in Computer Science,
1518
+ vol. 13242, pp. 449–469. Springer (2022)
1519
+ 42. Wojtczak, D.: Recursive probabilistic models : efficient analysis and implementa-
1520
+ tion. Ph.D. thesis, University of Edinburgh, UK (2009)
1521
+ 43. Wojtczak, D., Etessami, K.: PReMo : An Analyzer for Probabilistic Recursive
1522
+ Models. In: TACAS. Lecture Notes in Computer Science, vol. 4424, pp. 66–71.
1523
+ Springer (2007)
1524
+ 44. Yannakakis, M., Etessami, K.: Checking LTL properties of recursive markov chains.
1525
+ In: QEST. pp. 155–165. IEEE Computer Society (2005)
1526
+
1527
+ 20
1528
+ Tobias Winkler and Joost-Pieter Katoen
1529
+ A
1530
+ Full Proofs
1531
+ A.1
1532
+ Proof of Lemma 2
1533
+ Lemma 2 (Existence of inductive upper bounds).
1534
+ Let ⃗f be a feasible,
1535
+ clean, and strongly connected PPS. Then the following are equivalent:
1536
+ (1) The matrix I − ∂ ⃗f(µ⃗f) is non-singular.
1537
+ (2) The spectral radius of ∂ ⃗f(µ⃗f) satisfies ρ(∂ ⃗f(µ⃗f)) < 1.
1538
+ (3) There exists ⃗0 ≺ ⃗u ≺ ⃗∞ s.t. ⃗f(⃗u) < ⃗u (i.e. ⃗u is inductive but not a fixpoint).
1539
+ (4) The matrix ∂ ⃗f(µ⃗f) has a unique (normalized) eigenvector ⃗v ≻ ⃗0 and there
1540
+ exist numbers δmax > 0 and ε > 0 s.t.
1541
+ ⃗f( µ⃗f + δ · ⃗˜v )
1542
+
1543
+ µ⃗f + δ · ⃗˜v
1544
+ holds for all 0 < δ ≤ δmax and vectors ⃗˜v ≥ ⃗v with ||⃗v − ⃗˜v||∞ ≤ ε.
1545
+ We now explain the proof of Lemma 2. The proof heavily relies on a linear
1546
+ approximation of ⃗f around the lfp µ⃗f. Intuitively, this is where the Jacobi matrix
1547
+ ∂ ⃗f(µ⃗f) comes into play. This is formalized via Taylor’s familiar theorem.
1548
+ Lemma 4 (Taylor’s Theorem; cf. [12, Lem. 2.3]). Let ⃗f be a feasible PPS.
1549
+ Then for all vectors ⃗u ≥ ⃗0, we have
1550
+ ⃗f(µ⃗f + ⃗u)
1551
+ =
1552
+ µ⃗f + ∂ ⃗f(µ⃗f)⃗u + R⃗u⃗u
1553
+ where R⃗u is a matrix that depends on ⃗u such that lim⃗u→⃗0 R⃗u = 0. More specifi-
1554
+ cally, it holds that ⃗0 ≤ R⃗u⃗u ≤
1555
+
1556
+ ∂ ⃗f(µ⃗f + ⃗u) − ∂ ⃗f(µ⃗f)
1557
+
1558
+ ⃗u.
1559
+ Proof (Proof of Lemma 2). “(1) =⇒ (2)”: By Theorem 2 we have ρ(∂ ⃗f(µ⃗f)) ≤
1560
+ 1. Towards contradiction assume that ρ(∂ ⃗f(µ⃗f)) = 1. By the Perron-Frobenius
1561
+ Theorem, 1 is an eigenvalue of ∂ ⃗f(µ⃗f), which means that there exists ⃗u ̸= ⃗0 such
1562
+ that ∂ ⃗f(µ⃗f)⃗u = ⃗u. This ⃗u is in the kernel of I − ∂ ⃗f(µ⃗f), which contradicts the
1563
+ assumption that I − ∂ ⃗f(µ⃗f) is non-singular.
1564
+ “(2) =⇒ (1)”: It is a well-known result that for an arbitrary real matrix M
1565
+ the series �∞
1566
+ k=0 M k converges iff ρ(M) < 1. The limit of the series is the inverse
1567
+ of I − M because
1568
+ (I − M)
1569
+
1570
+
1571
+ k=0
1572
+ M =
1573
+
1574
+
1575
+ k=0
1576
+ M k −
1577
+
1578
+
1579
+ k=1
1580
+ M k = M 0 = I .
1581
+ “(2)
1582
+ =⇒
1583
+ (4)”: Let ρ(∂ ⃗f(µ⃗f)) =: λ < 1. By the Perron-Frobenius Theo-
1584
+ rem, the Jacobi matrix ∂ ⃗f(µ⃗f) has a unique normalized eigenvector ⃗v ≻ ⃗0 wrt.
1585
+ eigenvalue λ:
1586
+ ∂ ⃗f(µ⃗f)⃗v = λ⃗v ≺ ⃗v .
1587
+ (1)
1588
+
1589
+ Certificates for Probabilistic Pushdown Automata via OVI
1590
+ 21
1591
+ Our goal is to define the values ε and δmax whose existence we claimed in
1592
+ Lemma 2(4). Let cmin > 0 be the smallest component of (1−λ)⃗v ≻ ⃗0. We define
1593
+ ε :=
1594
+ cmin
1595
+ 3||∂ ⃗f(µ⃗f)||∞
1596
+ ,
1597
+ (2)
1598
+ where ||∂ ⃗f(µ⃗f)||∞ = max||⃗y||∞=1 ||∂ ⃗f(µ⃗f)⃗y||∞ is the maximum row sum of
1599
+ ∂ ⃗f(µ⃗f). Note that || · ||∞ is the operator norm induced by the maximum norm.
1600
+ Then it holds for all ⃗ε with ||⃗ε||∞ ≤ ε that
1601
+ ||∂ ⃗f(µ⃗f)⃗ε||∞ ≤ ||∂ ⃗f(µ⃗f)||∞||⃗ε||∞ ≤ ||∂ ⃗f(µ⃗f)||∞
1602
+ cmin
1603
+ 3||∂ ⃗f(µ⃗f)||∞
1604
+ = 1
1605
+ 3cmin .
1606
+ (3)
1607
+ The first inequality in (3) is a property of operator norms (which is straightfor-
1608
+ ward in the case of the maximum norm). Since cmin was the smallest component
1609
+ of (1 − λ)⃗v, (3) implies
1610
+ ∂ ⃗f(µ⃗f)⃗ε ≤ 1
1611
+ 3(1 − λ)⃗v .
1612
+ (4)
1613
+ We now define δmax as follows:
1614
+ δmax := sup {δ > 0 | ∀⃗ε ≥ ⃗0 s.t. ||⃗ε||∞ ≤ ε: Rδ(⃗v+⃗ε)(⃗v + ⃗ε) ≤ 1
1615
+ 2(1 − λ)⃗v} ,
1616
+ (5)
1617
+ where Rδ(⃗v+⃗ε) is the matrix from Lemma 4 which satisfies
1618
+ ⃗f(µ⃗f + δ(⃗v + ⃗ε)) = µ⃗f + δ∂ ⃗f(µ⃗f)(⃗v + ⃗ε) + δRδ(⃗v+⃗ε)(⃗v + ⃗ε) .
1619
+ We now argue that δmax > 0. This is not immediately obvious because of the
1620
+ ∀-quantification in (5). Let δ > 0 be arbitrary. Further, let ⃗ε ≥ ⃗0 be such that
1621
+ ||⃗ε||∞ ≤ ε. In the following, we write ⃗ε′ = (ε . . . ε). We have
1622
+ Rδ(⃗v+⃗ε)(⃗v + ⃗ε)
1623
+ = 1
1624
+ δ Rδ(���v+⃗ε)δ(⃗v + ⃗ε)
1625
+ ≤ 1
1626
+ δ
1627
+
1628
+ ∂ ⃗f(µ⃗f + δ(⃗v + ⃗ε)) − ∂ ⃗f(µ⃗f)
1629
+
1630
+ δ(⃗v + ⃗ε)
1631
+ (Lemma 4)
1632
+ =
1633
+
1634
+ ∂ ⃗f(µ⃗f + δ(⃗v + ⃗ε)) − ∂ ⃗f(µ⃗f)
1635
+
1636
+ (⃗v + ⃗ε)
1637
+
1638
+
1639
+ ∂ ⃗f(µ⃗f + δ(⃗v + ⃗ε′)) − ∂ ⃗f(µ⃗f)
1640
+
1641
+ (⃗v + ⃗ε′)
1642
+ (Jacobi matrix is monotonic)
1643
+ =: Mδ(⃗v + ⃗ε′)
1644
+ Note that Mδ does not depend on ⃗ε and limδ→0 Mδ = 0. We can therefore find a
1645
+ specific δ∗ > 0 such that Mδ∗(⃗v+⃗ε′) ≤ 1
1646
+ 2(1−λ)⃗v. On the other hand, we have just
1647
+
1648
+ 22
1649
+ Tobias Winkler and Joost-Pieter Katoen
1650
+ shown for all ⃗ε ≥ ⃗0 with ||⃗ε||∞ ≤ ε and all δ > 0 that Rδ(⃗v+⃗ε)(⃗v+⃗ε) ≤ Mδ(⃗v+⃗ε′).
1651
+ So we have in particular for all ⃗ε ≥ ⃗0 with ||⃗ε||∞ ≤ ε that
1652
+ Rδ∗(⃗v+⃗ε)(⃗v + ⃗ε) ≤ Mδ∗(⃗v + ⃗ε′) ≤ 1
1653
+ 2(1 − λ)⃗v .
1654
+ Hence δmax ≥ δ∗ > 0.
1655
+ Finally, let 0 < δ ≤ δmax and ⃗˜v ≥ ⃗v with ||⃗v − ⃗˜v||∞ ≤ ε, i.e., ⃗˜v = ⃗v + ⃗ε for
1656
+ some ⃗ε ≥ ⃗0 with ||⃗ε||∞ ≤ ε. Then
1657
+ ⃗f(µ⃗f + δ(⃗v + ⃗ε))
1658
+ = µ⃗f + δ∂ ⃗f(µ⃗f)(⃗v + ⃗ε) + δRδ(⃗v+⃗ε)(⃗v + ⃗ε)
1659
+ (by Taylor’s Theorem (Lemma 4))
1660
+ = µ⃗f + δλ⃗v + δ∂ ⃗f(µ⃗f)⃗ε + δRδ(⃗v+⃗ε)(⃗v + ⃗ε)
1661
+ (by (1))
1662
+ ≤ µ⃗f + δλ⃗v + δ 1
1663
+ 3(1 − λ)⃗v + δRδ(⃗v+⃗ε)(⃗v + ⃗ε)
1664
+ (by (4))
1665
+ ≤ µ⃗f + δλ⃗v + δ 1
1666
+ 3(1 − λ)⃗v + δ 1
1667
+ 2(1 − λ)⃗v
1668
+ (by (5))
1669
+ ≺ µ⃗f + δλ⃗v + δ 1
1670
+ 2(1 − λ)⃗v + δ 1
1671
+ 2(1 − λ)⃗v
1672
+ (because δ(1 − λ)⃗v ≻ ⃗0)
1673
+ = µ⃗f + δ⃗v
1674
+ (simplification)
1675
+ ≤ µ⃗f + δ(⃗v + ⃗ε)
1676
+ (because ⃗ε ≥ ⃗0)
1677
+ “(4) =⇒ (3)”: Trivial.
1678
+ “(3) =⇒ (2)”: By (3) there exists ⃗u such that ⃗f(⃗u) < ⃗u. By Lemma 1 this
1679
+ implies that µ⃗f < ⃗u, so we can write ⃗u = µ⃗f + ⃗v for some ⃗v > ⃗0.
1680
+ Using Taylor’s Theorem (Lemma 4), it follows that
1681
+ ⃗f(µ⃗f + ⃗v) = µ⃗f + ∂ ⃗f(µ⃗f)⃗v + R⃗v⃗v < µ⃗f + ⃗v .
1682
+ (6)
1683
+ Using that R⃗v⃗v ≥ ⃗0, (6) implies that
1684
+ ∂ ⃗f(µ⃗f)⃗v < ⃗v .
1685
+ (7)
1686
+ The claim now follows by applying the following lemma to the matrix ∂ ⃗f(µ⃗f)
1687
+ and the vector ⃗v:
1688
+ Lemma 5. Let M ≥ 0 be an irreducible n× n-matrix. If there exists ⃗u > ⃗0 such
1689
+ that M⃗u < ⃗u, then ⃗u ≻ ⃗0, M n⃗u ≺ ⃗u and ρ(M) < 1.
1690
+ Proof. First observe that since multiplication by M is monotone we have for all
1691
+ 0 ≤ k1 ≤ k2 that
1692
+ ⃗0 ≤ M k2⃗u ≤ M k1⃗u ≤ ⃗u .
1693
+ We first show that ⃗u ≻ ⃗0, which is essentially [12, Lemma 5.3]. Since ⃗u > ⃗0,
1694
+ there must be 1 ≤ i ≤ n such that ⃗ui > 0. Now let 1 ≤ j ≤ n be arbitrary. Since
1695
+
1696
+ Certificates for Probabilistic Pushdown Automata via OVI
1697
+ 23
1698
+ M is irreducible there exists 0 ≤ k < n such that M k
1699
+ j,i > 0. This implies that
1700
+ (M k⃗u)j > 0. By monotonicty, ⃗u ≥ M k⃗u, and thus ⃗uj ≥ (M k⃗u)j > 0. Since j
1701
+ was arbitrary, ⃗u ≻ ⃗0.
1702
+ Next we show M n⃗u ≺ ⃗u. Since M⃗u < ⃗u holds by assumption, there exists
1703
+ 1 ≤ i ≤ n such that (M⃗u)i < ⃗ui. Let 1 ≤ j ≤ n be a arbitrary. Since M is
1704
+ irreducible, there exists 0 ≤ k < n such that (M k)j,i > 0. We now show that
1705
+ (M n⃗u)j < uj which implies that M n⃗u ≺ ⃗u as j was chosen arbitrarily:
1706
+ (M n⃗u)j
1707
+ ≤ (M kM⃗u)j
1708
+ (by monotonicity, and because k + 1 ≤ n)
1709
+ = (M k)j,i(M⃗u)i +
1710
+
1711
+ l̸=i
1712
+ (M k)j,l(M⃗u)l
1713
+ (Def. matrix-vector product)
1714
+ < (M k)j,i⃗ui +
1715
+
1716
+ l̸=i
1717
+ (M k)j,l(M⃗u)l
1718
+ (because (M⃗u)i < ⃗ui and (M k)j,i > 0)
1719
+ ≤ (M k)j,i⃗ui +
1720
+
1721
+ l̸=i
1722
+ (M k)j,l⃗ul
1723
+ (because (M⃗u)l ≤ ⃗ul)
1724
+ = (M k⃗u)j ≤ ⃗uj
1725
+ It remains to show that ρ(M) < 1. We do this by showing that the powers
1726
+ of M (i.e., the sequence (M k)k≥0) converge to the zero matrix. Since M n⃗u ≺ ⃗u,
1727
+ we can choose c < 1 such that M n⃗u ≤ c⃗u. Then for all m ≥ 1 it holds that
1728
+ M nm⃗u ≤ cm⃗u, so we have
1729
+ lim
1730
+ k→∞ M k⃗u = ⃗0 .
1731
+ Recall from above that we already know ⃗u ≻ ⃗0. Thus limk→∞ M k⃗u = ⃗0 means
1732
+ that a positive linear combination of the entries of each individual row of M k
1733
+ converges to zero, i.e., for all 1 ≤ i ≤ n we have limk→∞
1734
+
1735
+ j M k
1736
+ i,j⃗uj = 0, and
1737
+ thus for all 1 ≤ j ≤ n, limk→∞ M k
1738
+ i,j = 0. Thus limk→∞ M k = 0, which completes
1739
+ the proof.
1740
+ ⊓⊔
1741
+ A.2
1742
+ Proof of Theorem 3
1743
+ Theorem 3. Algorithm 1 is correct: when invoked with a strongly connected
1744
+ clean PPS ⃗f and ε > 0, then (if it terminates) it outputs a pair (⃗l, ⃗u) s.t. ⃗l ≤ µ⃗f,
1745
+ ⃗f(⃗u) ≤ ⃗u (and thus µ⃗f ≤ ⃗u), and ||⃗l − ⃗u||∞ ≤ ε. Moreover, if ⃗f is feasible and
1746
+ I − ∂ ⃗f(µ⃗f) is non-singular, then the algorithm terminates.
1747
+ Proof. Correctness is obvious, so we only show termination assuming that ⃗f is
1748
+ feasible and I − ∂ ⃗f(µ⃗f) is non-singular. Clearly, the algorithm terminates iff it
1749
+ eventually finds a ⃗u in line 8 which is inductive.
1750
+ Assume towards contradiction that the algorithm never terminates, i.e., it
1751
+ never finds an inductive ⃗u. For all i ≥ 1 let ⃗li, ⃗vi, τi be the values of the variables
1752
+ ⃗l, ⃗v and τ at the ith time the inner loop at line 7 is reached (note that we
1753
+ then have N = i − 1). Clearly, limi→∞ τi = 0. By the contract satisfied by
1754
+
1755
+ 24
1756
+ Tobias Winkler and Joost-Pieter Katoen
1757
+ improveLowerBound, we have limi→∞ ∂ ⃗f(⃗li) = ∂ ⃗f(µ⃗f). Since the eigenvectors
1758
+ of ∂ ⃗f(µ⃗f) depend continuously on those of the matrices ∂ ⃗f(⃗li), and because of
1759
+ the contract satisfied by approxEigenvec, the sequence ⃗v1,⃗v2, . . . converges to
1760
+ the true unique normalized Perron-Frobenius eigenvector ⃗vtrue of ∂ ⃗f(µ⃗f).
1761
+ We now apply condition (4) of Lemma 2. The condition ensures that the cone
1762
+ C(µ⃗f,⃗vtrue, ε′, δmax) = { µ⃗f + δ⃗˜v | 0 ≤ δ ≤ δmax, ||⃗˜v − ⃗vtrue||∞ ≤ ε′ }
1763
+ which is located at µ⃗f, points in direction ⃗vtrue and has radius ε′ and length
1764
+ δmax contains only inductive points. For the sake of illustration suppose that the
1765
+ algorithm already knows δmax and computes ⃗ui = ⃗li +δ⃗vi for some 0 < δ < δmax
1766
+ instead of executing the loop starting at line 7. But then the sequence (⃗ui)i≥1
1767
+ converges to µ⃗f + δ⃗vtrue, which is a point that lies inside the interior of C, so
1768
+ there must be some i ≥ 1 such that ⃗ui ∈ C, i.e., ⃗ui is inductive.
1769
+ The remaining difficulty is that δmax is of course unknown in practice. We
1770
+ handle this using the inner loop that starts at line 7. Eventually, the variable
1771
+ N is sufficiently large such that dkε < δmax for some k ≤ N. Termination then
1772
+ follows by applying the argument in the previous paragraph to δ = dkε.
1773
+ ⊓⊔
1774
+ A.3
1775
+ Proof of Lemma 3
1776
+ Lemma 3. Let M ≥ 0 be irreducible. Then power iteration applied to M + I
1777
+ and any ⃗v0 > ⃗0 converges to the Perron-Frobenius eigenvector ⃗v ≻ ⃗0 of M.
1778
+ Proof. Consider the following conditions for an irreducible matrix M ≥ 0 and a
1779
+ vector M⃗v0 with M⃗v0 ̸= ⃗0:
1780
+ 1. M has a unique dominant eigenvalue |λ1| > |λ2| ≥ . . . ≥ |λn|.
1781
+ 2. λ1 is semisimple, i.e., its algebraic multiplicity6 equals its geometric multi-
1782
+ plicity7.
1783
+ 3. ⃗v0 is not orthogonal to the eigenspace {⃗v | M⃗v = λ1⃗v}.
1784
+ It is known that if all these conditions are satisfied, then the power iteration
1785
+ sequence (⃗vi)i∈N converges to a (normalized) eigenvector ⃗v with eigenvalue λ1
1786
+ (e.g. [37, Theorem 4.1]).
1787
+ We now show that these conditions are satisfied for the irreducible matrix
1788
+ M + I ≥ 0 and every initial vector ⃗v0 > ⃗0. The eigenvectors of M and M + I
1789
+ are exactly the same but the eigenvalues are all shifted by +1. Indeed, if ⃗v is
1790
+ some eigenvector of M with eigenvalue λ, then (M + I)⃗v = λ⃗v + ⃗v = (λ + 1)⃗v.
1791
+ However, unlike M, the matrix M +I always has period 1, and so it has a unique
1792
+ dominant eigenvalue λ1 by Theorem 1(2). Therefore the first of the above three
1793
+ conditions is satisfied by the matrix M + I.
1794
+ 6 The algebraic multiplicity is the multiplicity of a given eigenvalue as a root of the
1795
+ characteristic polynomial.
1796
+ 7 The geometric multiplicity is the dimension of the eigenspace associated with a
1797
+ particular eigenvalue.
1798
+
1799
+ Certificates for Probabilistic Pushdown Automata via OVI
1800
+ 25
1801
+ Next, by Theorem 1(1) it holds that the geometric multiplicity of λ1 is 1. As
1802
+ the algebraic multiplicity is bounded by the geometric multiplicity, it must also
1803
+ be 1 and thus the matrix M + I satisfies the second condition as well.
1804
+ Finally, the third condition is satisfied for any ⃗v0 > ⃗0 because the scalar
1805
+ product ⃗v0 · ⃗v is non-zero (either strictly positive or strictly negative) for all
1806
+ non-zero eigenvectors ⃗v of λ1 by Theorem 1(1).
1807
+ ⊓⊔
1808
+ A.4
1809
+ Proof of Proposition 1
1810
+ Proposition 1 (Basic Certificates for pPDA). A basic certificate for ∆ =
1811
+ (Q, Γ, P) is a rational inductive upper bound ⃗u ∈ QQ×Γ ×Q
1812
+ ≥0
1813
+ on the lfp of the
1814
+ return probabilities system ⃗f∆ (see Thm. 4). They have the following properties:
1815
+ – (Existence) ∀ε > 0 there exists a basic certificate ⃗u with ||µ⃗f∆ − ⃗u||∞ ≤ ε if
1816
+ all maximal irreducible submatrices M of ∂ ⃗ˆf∆(µ⃗ˆf∆) satisfy ρ(M) < 1.
1817
+ – (Complexity) Let β be the maximum number of bits used to encode any of
1818
+ the numerators and denominators of the fractions occurring in ⃗u ∈ QQ×Γ ×Q
1819
+ ≥0
1820
+ .
1821
+ Then checking ⃗f∆(⃗u) ≤ ⃗u, i.e., whether ⃗u is basic certificate for ∆, can be
1822
+ done in time polynomial in β and the size of ∆.
1823
+ Proof. This proof closely follows the general idea of decomposed analysis of
1824
+ PPS [16].
1825
+ We first address existence. Note that ⃗f∆ is guaranteed to be feasible, in fact
1826
+ ⃗0 ≤ µ⃗f∆ ≤ ⃗1. For all qZr with (µ⃗f∆)qZr = 0 we set ⃗uqZr = 0. By removing
1827
+ these variables from ⃗f∆ we obtain the clean PPS ⃗ˆf∆ with ⃗0 ≺ µ⃗ˆf∆.
1828
+ Now consider the decomposition of ⃗ˆf∆ into the subsystems induced by the
1829
+ strongly connected components of the graph G ⃗ˆf∆: ⃗ˆf 1
1830
+ ∆, . . . , ⃗ˆf m
1831
+ ∆ . Note that in these
1832
+ subsystems, some variables might only appear on the right hand sides but not on
1833
+ the left (e.g. x1 = 0.5x1+0.5x2, x2 = 0.5x1+0.5x3). Since µ⃗ˆf∆ ≻ ⃗0, there is a 1 - 1
1834
+ correspondence of these subsystems and the maximal irreducible submatrices Mi
1835
+ of ∂ ⃗ˆf∆(µ⃗ˆf∆). More specifically, Mi = ∂ ⃗ˆf i
1836
+ ∆(µ⃗ˆf∆)8. By assumption, ρ(Mi) < 19.
1837
+ Now assume w.l.o.g. that ⃗ˆf 1
1838
+ ∆ is a bottom SCC (i.e., in the dependency graph
1839
+ G ⃗ˆ
1840
+ f∆ there is no path from the variables in ⃗ˆf 1
1841
+ ∆ to any variable not in ⃗ˆf 1
1842
+ ∆). Then
1843
+ ⃗ˆf 1
1844
+ ∆ is a strongly connected PPS with ∂ ⃗ˆf 1
1845
+ ∆(µ⃗ˆf∆) = ∂ ⃗ˆf 1
1846
+ ∆(µ⃗ˆf 1
1847
+ ∆) and we can apply
1848
+ Lemma 2(4) to obtain a rational ⃗u1 with ⃗ˆf 1
1849
+ ∆(⃗u1) ≤ ⃗u1 and ||µ⃗ˆf 1
1850
+ ∆ − ⃗u1||∞ ≤ ε (in
1851
+ fact, we can do this for any ε > 0).
1852
+ Suppose we have done the above for all bottom SCCs and now start traversing
1853
+ the DAG of SCCs bottom-up, i.e., in reverse topological order. Let ⃗u be the
1854
+ 8 The Jacobi matrix of a sub-PPS with n′ < n equations is an n′ × n′ matrix where
1855
+ all variables that occur only on the right hand sides are considered constants.
1856
+ 9 The spectral radius of the zero matrix is zero.
1857
+
1858
+ 26
1859
+ Tobias Winkler and Joost-Pieter Katoen
1860
+ bound we have constructed to far (i.e., ⃗u contains ⃗u1 and the bounds from
1861
+ the other bottom SCC as subvectors and is zero elsewhere). Note that we can
1862
+ always make ⃗u smaller while retaining the inductivity property. W.l.o.g. suppose
1863
+ that subsystem ⃗ˆf 2
1864
+ ∆ is one of the first non-bottom SCCs in the reverse topological
1865
+ order. The idea is now to modify ⃗ˆf 2
1866
+ ∆ to a strongly connected PPS ˜⃗f 2
1867
+ ⃗u by replacing
1868
+ all variables that occur only in right hand sides by their value in ⃗u. Clearly,
1869
+ lim⃗u→µ⃗ˆ
1870
+ f∆ ∂ ˜⃗f 2
1871
+ ⃗u(µ ˜⃗f 2
1872
+ ⃗u) = ∂ ⃗ˆf 2
1873
+ ∆(µ⃗ˆf∆). This means we can choose ⃗u sufficiently close
1874
+ to µ⃗ˆf∆ such that the spectral radius of ∂ ˜⃗f 2
1875
+ ⃗u(µ ˜⃗f 2
1876
+ ⃗u) is strictly smaller than 1. We
1877
+ can then apply Lemma 2(4) to ˜⃗f 2
1878
+ ⃗u to obtain a rational ⃗u2 with ˜⃗f 2
1879
+ ⃗u(⃗u2) ≤ ⃗u2 to
1880
+ enlarge our current ⃗u with.
1881
+ We can repeat this scheme for all finitely many subsystems until we have
1882
+ constructed a rational ⃗u with ˜⃗f i
1883
+ ⃗u(⃗u) ≤ ⃗u for all i. Clearly, this ⃗u also satisfies
1884
+ ⃗ˆf∆(⃗u) ≤ ⃗u. Finally, we may extend ⃗u by zero entries corresponding to the vari-
1885
+ ables that are assigned zero in the lfp of the (not necessarily clean) ⃗f∆. This
1886
+ yields an inductive upper bound for ⃗f∆. We stress that in order to verify this
1887
+ bound, we neither have to clean ⃗f∆ nor do we have to compute the SCCs.
1888
+ For complexity observe that ⃗f∆ is cubic in the size of ∆ and that all polyno-
1889
+ mials in ⃗f∆ have degree at most 2. Since multiplication and addition of rational
1890
+ numbers can be done in polynomial time in the number of their bits, evaluat-
1891
+ ing a polynomial of fixed maximum degree can also be done in polynomial time
1892
+ in the size of the polynomial and the number of bits representing the rationals
1893
+ where the polynomial is to be evaluated. Note that this is not true for arbitrary
1894
+ polynomials where exponents are encoded in binary: For instance, evaluating the
1895
+ polynomial x2n (which can be represented with O(n) bits) at x = 2 yields 22n,
1896
+ a number that needs O(2n) bits. This means that in order to verify certificates
1897
+ efficiently with exact rational arithmetic, it is important that the polynomials in
1898
+ the PPS do not have very high degrees. Fortunately, this is the case for pPDA.
1899
+
1900
+ Certificates for Probabilistic Pushdown Automata via OVI
1901
+ 27
1902
+ B
1903
+ Certificates for Expected Rewards
1904
+ We can certify upper bounds on the expected value of rewards collected during
1905
+ the run of a pPDA. To simplify the presentation, in this section we assume
1906
+ w.l.o.g. that qZ
1907
+ p−→ rα with p > 0 implies |α| ∈ {0, 2}, i.e., all transitions either
1908
+ decrease or increase the stack height by 1. Let R: Q → R≥0 be a state-based
1909
+ reward function. Consider the following PPS ⃗f∆,R with variables {⟨EqZr⟩ | qZr ∈
1910
+ Q × Γ × Q}:
1911
+ ⟨EqZr⟩ =
1912
+
1913
+ qZ
1914
+ p−→sY X
1915
+ p ·
1916
+
1917
+ t∈Q
1918
+ [sY t] · [tXr] · KqZ,sY X +
1919
+
1920
+ qZ
1921
+ p−→rε
1922
+ p · R(r) ,
1923
+ where KqZ,sY X = R(r) + ⟨EsY t⟩ + ⟨EtXr⟩. Note that ⃗f∆,R is linear but uses
1924
+ the return probabilities which are themselves characterized as the lfp of the
1925
+ non-linear system ⃗f R
1926
+ ∆ from Theorem 4 as coefficients.
1927
+ Suppose that in the lfp µ⃗f∆,R, each variable EqZr is assigned the quantity
1928
+ EqZr ∈ R≥0. It follows from the results of [14] that EqZr equals the expected
1929
+ value of the following random variable V r
1930
+ R under the probability measure PqZ
1931
+ ∆ :
1932
+ V r
1933
+ R(q0γ0, q1γ1, . . .) =
1934
+ firstHit(rε)
1935
+
1936
+ i>0
1937
+ R(qi)
1938
+ where firstHit(rε) is the minimum integer k such that qkγk = rε, or 0 if no such
1939
+ k exists. In words, EqZr is the expected reward accumulated on the runs from
1940
+ qZ to rε, where it is assumed that runs which never reach rε contribute zero
1941
+ reward. Consequently, E(qZ) = �
1942
+ r∈Q EqZr is the expected reward accumulated
1943
+ on all terminating runs.
1944
+ Example 7. Setting R = 1 we can characterize the expected runtime of pPDA.
1945
+ Reconsider Example 4. The equation system for expected runtimes becomes
1946
+ ⟨EqZq⟩ =1
1947
+ 4([qZq]2(1+2⟨EqZq⟩) + [qZr][rZq](1+⟨EqZr⟩+⟨ErZq⟩)) + 1
1948
+ 2
1949
+ ⟨EqZr⟩ =1
1950
+ 4([qZq][qZr](1+⟨EqZq⟩+⟨EqZr⟩)+[qZr][rZr](1+⟨EqZr⟩+⟨ErZr⟩)) + 1
1951
+ 4
1952
+ as well as ⟨ErZq⟩ = 0 and ⟨ErZr⟩ = 1. The solution is ⟨EqZq⟩ = 2063/2624 ≈
1953
+ 0.786 and ⟨EqZr⟩ = 59/82 ≈ 0.712, so the total expected runtime is E(qZ) ≈
1954
+ 1.506.
1955
+
1956
+ C
1957
+ Benchmark Programs
1958
+
1959
+ 28
1960
+ Tobias Winkler and Joost-Pieter Katoen
1961
+ void f() {
1962
+ if flip(p) {
1963
+ f();
1964
+ f();
1965
+ }
1966
+ }
1967
+ # main block
1968
+ {
1969
+ f();
1970
+ }
1971
+ (a) rw-p
1972
+ void f() {
1973
+ if flip(1//2) {
1974
+ f();
1975
+ f();
1976
+ f();
1977
+ }
1978
+ }
1979
+ # main block
1980
+ {
1981
+ f();
1982
+ }
1983
+ (b) golden
1984
+ void offspring() {
1985
+ while flip(2//5) {
1986
+ offspring();
1987
+ while flip(3//5) {
1988
+ offspring();
1989
+ }
1990
+ }
1991
+ }
1992
+ # main block
1993
+ {
1994
+ offspring();
1995
+ }
1996
+ (c) geom-offspring
1997
+ void gen_operator() {
1998
+ uniform(4);
1999
+ }
2000
+ void gen_expression() {
2001
+ prob {
2002
+ 4//10: uniform(10);
2003
+ 3//10: { }
2004
+ 3//10: {
2005
+ gen_operator();
2006
+ gen_expression();
2007
+ gen_expression();
2008
+ }
2009
+ }
2010
+ }
2011
+ void gen_function() {
2012
+ gen_operator();
2013
+ gen_expression();
2014
+ gen_expression();
2015
+ }
2016
+ # main block
2017
+ {
2018
+ gen_function();
2019
+ }
2020
+ (d) gun-fun
2021
+ void young() {
2022
+ int y = uniform(4);
2023
+ while(y > 0) {
2024
+ young();
2025
+ y = y-1;
2026
+ }
2027
+ int e = uniform(3);
2028
+ while(e > 0) {
2029
+ elder();
2030
+ e = e-1;
2031
+ }
2032
+ }
2033
+ void elder() {
2034
+ int y = uniform(2);
2035
+ while(y > 0) {
2036
+ young();
2037
+ y = y-1;
2038
+ }
2039
+ int e = uniform(5);
2040
+ while(e > 0) {
2041
+ elder();
2042
+ e = e-1;
2043
+ }
2044
+ }
2045
+ # main block
2046
+ {
2047
+ young();
2048
+ }
2049
+ (e) virus
2050
+ bool f() {
2051
+ prob {
2052
+ 1//2:
2053
+ return flip(1//2);
2054
+ 1//2:
2055
+ if f()
2056
+ {
2057
+ return f();
2058
+ } else {
2059
+ return false;
2060
+ }
2061
+ }
2062
+ }
2063
+ # main blcok
2064
+ {
2065
+ bool res1 = f();
2066
+ ...
2067
+ bool resN = f();
2068
+ }
2069
+ (f) sequentialN
2070
+
2071
+ Certificates for Probabilistic Pushdown Automata via OVI
2072
+ 29
2073
+ int f(int n, int m) {
2074
+ prob {
2075
+ (n+1)//(n+2) : {
2076
+ f((n + 1) % m, m);
2077
+ f((n + 1) % m, m);
2078
+ return 0;
2079
+ }
2080
+ 1//(n+2) :
2081
+ return 0;
2082
+ }
2083
+ }
2084
+ # main block
2085
+ {
2086
+ f(0, N);
2087
+ }
2088
+ (a) escapeN
2089
+ void f(int n) {
2090
+ while(n > 0) {
2091
+ prob {
2092
+ 2//3: f(n-1);
2093
+ 1//3: f((n+1) % N);
2094
+ }
2095
+ n = n-1;
2096
+ }
2097
+ }
2098
+ # main block
2099
+ {
2100
+ f(1);
2101
+ }
2102
+ (b) modN
2103
+
2104
+ 30
2105
+ Tobias Winkler and Joost-Pieter Katoen
2106
+ D
2107
+ Z3 vs CVC5
2108
+ Table 3: Comparison of the SMT-approach (see §Baselines in Section 5) using z3
2109
+ and cvc5 on SCFG given as explicit PPS (right), and on programs automatically
2110
+ translated to pPDA (left).
2111
+ benchmark
2112
+ certz3
2113
+ tz3
2114
+ certcvc5
2115
+ tcvc5
2116
+ rw-0.499
2117
+
2118
+ 11
2119
+
2120
+ 92
2121
+ rw-0.500
2122
+
2123
+ 10
2124
+
2125
+ 87
2126
+ rw-0.501
2127
+
2128
+ 12
2129
+
2130
+ 104
2131
+ geom-offspring
2132
+
2133
+ 16
2134
+
2135
+ 4687
2136
+ golden
2137
+
2138
+ 14
2139
+
2140
+ 1097
2141
+ and-or
2142
+
2143
+ 15260
2144
+
2145
+ TO
2146
+ gen-fun
2147
+
2148
+ 141
2149
+
2150
+ TO
2151
+ virus
2152
+
2153
+ 139
2154
+
2155
+ 163727
2156
+ escape10
2157
+
2158
+ 48
2159
+
2160
+ 12031
2161
+ escape25
2162
+
2163
+ 15958
2164
+
2165
+ TO
2166
+ escape50
2167
+
2168
+ 410
2169
+
2170
+ TO
2171
+ escape75
2172
+
2173
+ TO
2174
+
2175
+ TO
2176
+ escape100
2177
+
2178
+ 939
2179
+
2180
+ TO
2181
+ escape200
2182
+
2183
+ TO
2184
+
2185
+ TO
2186
+ sequential5
2187
+
2188
+ 1074
2189
+
2190
+ TO
2191
+ sequential7
2192
+
2193
+ 12822
2194
+
2195
+ TO
2196
+ sequential10
2197
+
2198
+ 453718
2199
+
2200
+ TO
2201
+ mod5
2202
+
2203
+ 34150
2204
+
2205
+ TO
2206
+ mod7
2207
+
2208
+ 443
2209
+
2210
+ TO
2211
+ mod10
2212
+
2213
+ 1245
2214
+
2215
+ TO
2216
+ benchmark
2217
+ certz3
2218
+ tz3
2219
+ certcvc5
2220
+ tcvc5
2221
+ brown
2222
+
2223
+ TO
2224
+
2225
+ TO
2226
+ lemonde
2227
+
2228
+ TO
2229
+
2230
+ TO
2231
+ negra
2232
+
2233
+ 37248
2234
+
2235
+ 10144
2236
+ swbd
2237
+
2238
+ TO
2239
+
2240
+ Error
2241
+ tiger
2242
+
2243
+ 17454
2244
+
2245
+ 16118
2246
+ tuebadz
2247
+
2248
+ 15323
2249
+
2250
+ 5534
2251
+ wsj
2252
+
2253
+ TO
2254
+
2255
+ TO
2256
+ random
2257
+
2258
+ TO
2259
+
2260
+ TO
2261
+
BdFAT4oBgHgl3EQfsR4z/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
C9E2T4oBgHgl3EQfSAeL/content/2301.03788v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a15dc7a546fcc890e687828e17af82c4b47fbb065f2190dde2b95995bfaa9c1b
3
+ size 2132876
C9E2T4oBgHgl3EQfSAeL/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f50899424fd3b5d9a3053cde77c105ad9737d821eeb3cf82535f76fc0499f7d4
3
+ size 3342381
C9FJT4oBgHgl3EQfAyyD/content/tmp_files/2301.11422v1.pdf.txt ADDED
@@ -0,0 +1,788 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ RMSim: Controlled Respiratory Motion Simulation
2
+ on Static Patient Scans
3
+ Donghoon Lee, Ellen Yorke, Masoud Zarepisheh,
4
+ Saad Nadeem*, Yu-Chi Hu*
5
+ Department of Medical Physics, Memorial Sloan Kettering Cancer Center, New York,
6
+ NY, USA
7
+ E-mail: {leed10,yorkee,zarepism,nadeems,huj}@mskcc.org
8
+ *Corresponding Authors
9
+ Objective:
10
+ This work aims to generate realistic anatomical deformations from
11
+ static
12
+ patient
13
+ scans.
14
+ Specifically,
15
+ we
16
+ present
17
+ a
18
+ method
19
+ to
20
+ generate
21
+ these
22
+ deformations/augmentations via deep learning driven respiratory motion simulation
23
+ that provides the ground truth for validating deformable image registration (DIR)
24
+ algorithms and driving more accurate deep learning based DIR.
25
+ Approach: We present a novel 3D Seq2Seq deep learning respiratory motion simulator
26
+ (RMSim) that learns from 4D-CT images and predicts future breathing phases given
27
+ a static CT image. The predicted respiratory patterns, represented by time-varying
28
+ displacement vector fields (DVFs) at different breathing phases, are modulated through
29
+ auxiliary inputs of 1D breathing traces so that a larger amplitude in the trace results in
30
+ more significant predicted deformation. Stacked 3D-ConvLSTMs are used to capture
31
+ the spatial-temporal respiration patterns.
32
+ Training loss includes a smoothness loss
33
+ in the DVF and mean-squared error between the predicted and ground truth phase
34
+ images.
35
+ A spatial transformer deforms the static CT with the predicted DVF to
36
+ generate the predicted phase image. 10-phase 4D-CTs of 140 internal patients were
37
+ used to train and test RMSim. The trained RMSim was then used to augment a public
38
+ DIR challenge dataset for training VoxelMorph to show the effectiveness of RMSim-
39
+ generated deformation augmentation.
40
+ Main results:
41
+ We validated our RMSim output with both private and public
42
+ benchmark datasets (healthy and cancer patients).
43
+ The structure similarity index
44
+ measure (SSIM) for predicted breathing phases and ground truth 4D CT images was
45
+ 0.92±0.04, demonstrating RMSim’s potential to generate realistic respiratory motion.
46
+ Moreover, the landmark registration error in a public DIR dataset was improved from
47
+ 8.12±5.78mm to 6.58mm±6.38mm using RMSim-augmented training data.
48
+ Significance: The proposed approach can be used for validating DIR algorithms as
49
+ well as for patient-specific augmentations to improve deep learning DIR algorithms.
50
+ The code, pretrained models, and augmented DIR validation datasets will be released
51
+ at https://github.com/nadeemlab/SeqX2Y. The supplementary video can be found
52
+ at https://youtu.be/xIx8B_Q_R9o.
53
+ arXiv:2301.11422v1 [cs.CV] 26 Jan 2023
54
+
55
+ RMSim: Controlled Respiratory Motion Simulation on Static Patient Scans
56
+ 2
57
+ 1. Introduction
58
+ Respiratory motion hampers accurate diagnosis as well as image-guided therapeutics.
59
+ For example, during radiotherapy, it may lead to poor local tumor control and increased
60
+ radiation toxicity to the normal organs [1]. It can also exhibit itself as motion artifacts
61
+ in the acquired images, making it difficult to differentiate nodule/tumor morphology
62
+ changes from those induced by respiratory motion.
63
+ This also makes the image
64
+ registration task across different breathing phases as well as across different time points
65
+ challenging. To validate the image registration accuracy/performance for commissioning
66
+ solutions available in clinical commercial systems, the American Association of
67
+ Physicists in Medicine(AAPM) TG-132 [2] recommended independent quality checks
68
+ using digital phantoms. Current commercial solutions such as ImSimQA allow creation
69
+ of synthetic deformation vector fields (DVFs) by user-defined transformations with only
70
+ a limited degree of freedom.
71
+ These monotonic transformations can not capture the
72
+ realistic respiratory motion.
73
+ For modeling respiration motion, an intuitive representation of motion is time-
74
+ varying displacement vector fields (DVFs) obtained by deformable image registrations
75
+ (DIR) in 4D images, acquired in a breathing cycle. Surrogate-driven approaches [3]
76
+ employ DVF as a function of the surrogate breathing signal. However, an exact and
77
+ direct solution in the high-dimensional space of DVFs is computationally intractable.
78
+ Still, motion surrogates have been widely studied in the field of radiotherapy for
79
+ building models establishing the relationship between surrogates and respiratory motion
80
+ estimated from the image data [3].
81
+ For example, the 1D diaphragm displacement
82
+ has been reported as a reliable surrogate for tumor motion model [4] as well as for
83
+ PCA (principle component analysis) respiratory motion model to correct CT motion
84
+ artifacts [5].
85
+ Recently,
86
+ Romaguera et al. [6] used a 2D sequence-to-sequence (Seq2Seq)
87
+ network [7] to predict 2D in-plane motion for a single future time point.
88
+ Krebs et
89
+ al. [8] applied a similar encoder-decoder network in a conditional variational autoencoder
90
+ (cVAE) framework in which network parameters were learned to approximate the
91
+ distribution of deformations in low-dimensional latent space with the encoder and decode
92
+ the latent features for 2D motion prediction with the decoder. Romaguera et al. [9]
93
+ integrated Voxelmorph [10] for assisting the VAE encoder to map deformations in latent
94
+ space conditioned on anatomical features from 3D images. Temporal information of 2D
95
+ surrogate cine images from a 2D Seq2Seq network was used to predict 3D DVF at a
96
+ single future time point.
97
+ In this paper, we present a novel deep learning respiratory motion simulator
98
+ (RMSim) that learns to generate realistic patient-specific respiratory motion represented
99
+ by time-varying DVFs at different breathing phases from a static 3D CT image. For the
100
+ first time, we also allow modulation of this simulated motion via arbitrary 1D breathing
101
+ traces as auxiliary input to create large variations. This in turn creates diverse patient-
102
+ specific data augmentations while also generating ground truth for DIR validation.
103
+
104
+ RMSim: Controlled Respiratory Motion Simulation on Static Patient Scans
105
+ 3
106
+ Our work has several differences and advantages over the aforementioned deep learning
107
+ approaches: (1) we used 3D Seq2Seq architecture for the first time which has never been
108
+ attempted before for predicting deformations due to GPU memory limitations, (2) we
109
+ did not use VoxelMorph in its entirety but only the Spatial Transform module to train
110
+ our model end-to-end, and (3) as opposed to predicting just a single future time point,
111
+ we can predict 9 future time point breathing phases simultaneously (learnt from 4D-CT
112
+ images with 10 3D CT breathing phases) along with their 3D DVFs. We have thoroughly
113
+ validated our RMSim output with both private and public benchmark datasets (healthy
114
+ and cancer patients) and demonstrated that adding our patient-specific augmentations
115
+ to training data can improve performance/accuracy of state-of-the-art deep learning DIR
116
+ algorithms. We also showcase breathing trace-modulated respiratory motion simulations
117
+ for public static radiology scans (in the accompanying supplementary video). The
118
+ code, pretrained models, and augmented DIR validation datasets will be released at
119
+ https://github.com/nadeemlab/SeqX2Y.
120
+ Figure 1. The schematic image for the proposed deep learning model. The Seq2Seq
121
+ encoder-decoder framework was used as the backbone of the proposed model. The
122
+ model was built with 3D convolution layers for feature encoding and output decoding
123
+ and 3D convolutional Long Short-Term Memory (3D ConvLSTM) layers for spatial-
124
+ temporal correlation between time points. The last layer of the decoder was a spatial
125
+ transform layer to warp the initial phase image with the predicted Deformation Vector
126
+ Field (DVF). To modulate the respiratory motions the 1D breathing trace was given
127
+ as input along with the initial phase image. The dimension of image volume was 128
128
+ × 128 × 128 and the input feature to 3D ConvLSTM is 64 × 64 × 64 × 96 (Depth ×
129
+ Width × Height × Channel)
130
+
131
+ 1D Respiratory Signal
132
+ D
133
+ 128×128×128x3
134
+ Phase 1
135
+ 64×64×64×96
136
+ 3D Convolution
137
+ ConvLSTM3D
138
+ Multiplication
139
+ T
140
+ Spatial transform
141
+ Phase 1
142
+ Phase 2
143
+ Phase k
144
+ DVF
145
+ 128×128×128RMSim: Controlled Respiratory Motion Simulation on Static Patient Scans
146
+ 4
147
+ 2. Materials and Methods
148
+ 2.1. Datasets
149
+ We used an internal lung 4D-CT dataset retrospectively collected and de-identified
150
+ from 140 non-small cell lung cancer (NSCLC) patients receiving radiotherapy in our
151
+ institution. The helical and cine mode 4D-CTs were acquired using Philips Brilliance
152
+ Big Bore or GE Advantage respectively and binned into 10 phases using the vendor’s
153
+ proprietary software with breathing signals from bellows or external fiducial markers.
154
+ The x-ray energy for the CT image was 120 kVp and tube current varies case by case
155
+ according to vendor-specific tube current modulations based on patient size. The mAs
156
+ range is [100, 400] for GE and [500, 800] for Philips. The image slice dimension was
157
+ 512x512, while the number of image slices varied patient by patient. We used the 100:40
158
+ split for training:testing.
159
+ We used 20 cases of the Lung Nodule Analysis (LUNA) challenge dataset [11]
160
+ containing 3D radiology CTs for lung tumor screening to show that our RMSim
161
+ model trained with the internal dataset can be effectively applied to an external
162
+ radiology/diagnostic dataset to generate realistic respiration motions (see accompanying
163
+ supplementary video). For quantitative evaluation of the model generality on an
164
+ external data set, we used POPI [12] dataset which contains 6 10-phase 4D-CTs
165
+ with segmented lung masks as well as annotated landmarks on the vessel and airway
166
+ bifurcations.
167
+ To validate the effectiveness of data augmentation using synthetic respiratory
168
+ motion images generated from our RMSim model in the deformable registration task,
169
+ we used the Learn2Reg 2020 challenge dataset [13]. The Learn2Reg dataset consists of
170
+ 30 subjects (20 for the training / 10 for the testing) with 3D CT thorax images taken
171
+ in inhale and exhale phases. For each Learn2Reg 20 inhale/exhale pairs, we generated
172
+ other phases of images using our RMSim model which was trained with the internal
173
+ dataset, therefore increasing the sample size to 200 in total to augment the training of
174
+ a well-known unsupervised deep learning DIR method, VoxelMorph [10]. Unfortunately
175
+ the inhale-exhale landmarks are not publicly available in Learn2Reg dataset to assess
176
+ the registration accuracy. For the landmarks evaluation in registration task, we used the
177
+ POPI dataset. Brief description/purpose of all the datasets used in this study is given in
178
+ Table 1. All datasets used in this study were cropped to eliminate the background and
179
+ resampled to 128×128×128 with 2mm voxel size due to the GPU memory constrains.
180
+ 2.2. Realistic Respiratory Motion Simulation
181
+ Sequence-to-Sequence (Seq2Seg) is a many-to-many network architecture originally
182
+ developed for natural language processing tasks such as language translation.
183
+ Inspired by Seq2Seq, the proposed RMSim, illustrated in Figure 1, is a novel deep
184
+ learning encoder-decoder architecture that comprises three main parts including 3D
185
+ convolution, ConvLSTM3D (3D Convolutional Long-Short Term Memory), and spatial
186
+
187
+ RMSim: Controlled Respiratory Motion Simulation on Static Patient Scans
188
+ 5
189
+ Table 1. Datasets used in this study.
190
+ Dataset
191
+ Size
192
+ Description
193
+ Purpose
194
+ Evaluation
195
+ Internal 4D-CTs
196
+ 140 (100 train-
197
+ ing, 40 testing)
198
+ 10-phase
199
+ radiother-
200
+ apy 4D-CTs
201
+ Training and testing RM-
202
+ Sim
203
+ Image similar-
204
+ ity
205
+ LUNA
206
+ 20
207
+ Radiology CTs for
208
+ lung nodule detec-
209
+ tion
210
+ Testing model generality
211
+ Visualization
212
+ and qualitative
213
+ POPI 4D-CTs
214
+ 6
215
+ 10-phase
216
+ 4D-CTs
217
+ with landmarks
218
+ Testing
219
+ model
220
+ general-
221
+ ity (evaluating DVF accu-
222
+ racy)
223
+ Target
224
+ Regis-
225
+ tration
226
+ Error
227
+ (TRE)
228
+ of
229
+ landmarks
230
+ Learn2Reg
231
+ 30 (20 training,
232
+ 10 testing)
233
+ Inspiration-
234
+ expiration
235
+ thorax
236
+ CT pairs with lung
237
+ segmentations
238
+ Training
239
+ and
240
+ testing
241
+ RMSim-augmented deep
242
+ learning
243
+ deformable
244
+ image registration (Vox-
245
+ elmorph)
246
+ Lung
247
+ segmen-
248
+ tation
249
+ (Dice
250
+ score) and im-
251
+ age similarity
252
+ Figure 2. Respiration motion surrogate extraction using a diaphragm point that has
253
+ the maximum superior-inferior displacement across the phases. LDDMM was used to
254
+ register the phase 1 (fixed) image to other phases (moving) to get the DVFs. The
255
+ diaphragm point’s trajectory in z-axis (shown in red) across the phases was considered
256
+ as the breathing trace. Yellow line shows the diaphragm position at phase 1.
257
+ transformation layer (adapted from VoxelMorph [10]).
258
+ The 3D convolution in the
259
+ encoder is used to reduce the matrix dimension and extract salient features from images.
260
+ We used 3×3×3 kernel size and 2×2×2 stride size to reduce the matrix dimension
261
+ to 1/8. The number of channels for 3D convolution layer is 96. LSTM has a more
262
+ complex cell structure than a neuron in classical recurrent neural network (RNN).
263
+ Apart from the cell state, it contains gate units to decide when to keep or override
264
+ information in and out of memory cells to better handle the gradient vanishing problem
265
+ in recurrent neural network. This helps in learning long term dependencies. ConvLSTM
266
+ [14] replaces Hadamard product with convolution operators in the input as well as the
267
+ state transitions to capture the spatial pattern of the feature representations aggregated
268
+ from different time points. We implemented ConvLSTM in 3D for handling the 3D
269
+ phase images from the 4D-CT. We used two stacked ConvLSTM3D layers to make the
270
+ network deeper, adding levels of abstraction to input observations similar to the typical
271
+ deep neural network. The hidden state output from ConvLSTM3D was fed to both the
272
+
273
+ Phase 1
274
+ Phase 2
275
+ Phase 3
276
+ Phase 4
277
+ Phase 5
278
+ Phase 6
279
+ Phase7
280
+ Phase 8
281
+ Phase9
282
+ Phase 10
283
+ Fixed
284
+ Moving1
285
+ Moving3
286
+ Moving4
287
+ Moving5
288
+ Moving6
289
+ Moving7
290
+ Moving8
291
+ Moving2
292
+ Moving9
293
+ DVF1
294
+ DVF2
295
+ DVF3
296
+ DVF4
297
+ DVF5
298
+ DVF6
299
+ DVF7
300
+ DVF8
301
+ DVF9RMSim: Controlled Respiratory Motion Simulation on Static Patient Scans
302
+ 6
303
+ next layer in the same stack and the next timepoint ConvLSTM3D layer. The output
304
+ of ConvLSTM3D in the decoder at each predicted time point was up-sampled to the
305
+ original input resolution and output channels were reduced via 3D convolution, resulting
306
+ in the 3D DVF for the final output. The initial phase CT image was then deformed to
307
+ a predicted phase image at different breathing phase using spatial transformation layer
308
+ and the predicted 3D DVFs.
309
+ Moreover, to modulate the predicted motion with a patient-specific pattern, we
310
+ used an auxiliary input of 1D breathing trace.
311
+ In this paper, we considered the
312
+ amplitude of diaphragm apex motion as the surrogate of the respiratory signal [4].
313
+ The 1D breathing trace for each training case was extracted using DVF obtained
314
+ from large deformation diffeomorphic metric mapping (LDDMM) DIR provided by
315
+ ANTs (Advanced Normalization Tools). Specifically, using the DVF, the apex point
316
+ in diaphragm was propagated from the phase at the end of inhalation to other phases
317
+ to generate the 1D displacement trace. The apex of the diaphragm was determined by
318
+ finding the lung surface voxel with the maximum superior-inferior (z-axis) displacement
319
+ among the DVFs. The z-axis displacement of the apex voxel at each phase resembles
320
+ the 1D breathing trace. Figure 2 describes the process of preparing the 1D respiratory
321
+ signal. Feature-wise transformations, e.g. addition or multiplication, are simple and
322
+ effective mechanisms to incorporate conditioning information from another data source
323
+ to the features learned in the network. In this paper, the hidden state of ConvLSTM at
324
+ each phase is modulated by a simple element-wise multiplication of the phase-amplitude
325
+ of the trace:
326
+ m(Ht, bt) = btHt,
327
+ (1)
328
+ where Ht is the hidden state encoded from the sequence of phase images up to phase t
329
+ and bt is the amplitude of the breathing trace at phase t,
330
+ The loss function for training includes the mean-squared error of ground truth
331
+ phase image and predicted phase image, and the regularization on the gradient of DVF
332
+ by promoting smoothness of DVF:
333
+ Loss =
334
+
335
+ t>0
336
+ [(Yt − T(X0, φt))2 + ||∇φt||2],
337
+ (2)
338
+ where X0 is the initial phase image (phase 1 in this paper), T is the spatial transform
339
+ (adapted from VoxelMorph), φt is the predicted DVF for phase t and Yt is the ground
340
+ truth phase image at phase t.
341
+ We developed RMSim using the PyTorch library (version 1.2.0). We used Adam
342
+ for optimization and set learning rate to be 0.001 (as done in the original Seq2Seq
343
+ paper [14]). Due to the large data size of 4D image sequence (10 3D CT phase images
344
+ constituting a single 4D-CT), the batch size was limited to 1 and the number of feature
345
+ channels was 96, considering GPU memory and training time. The model was trained
346
+ and tested on an internal high performance computing cluster with 4 NVIDIA A40
347
+ GPUs with 48GB memory each. Our model consumed 35.2 GB GPU memory and the
348
+
349
+ RMSim: Controlled Respiratory Motion Simulation on Static Patient Scans
350
+ 7
351
+ training time was approximately 72 hours. The inference time for 9 phases and 40 total
352
+ test cases from the internal dataset was less than 3 minutes.
353
+ 2.3. Data augmentation by RMSim
354
+ Since RMSim can generate a series of realistic respiratory motion-induced images from
355
+ a single 3D CT, one of its use cases is data augmentation for training DIR algorithms.
356
+ For each of the 20 training cases in the Learn2Reg Grand Challenge dataset [13], we
357
+ randomly selected a 1D breathing trace from our internal dataset to modulate the motion
358
+ on the Learn2Reg inhalation image to generate 9 additional phase images, increasing
359
+ the training size 10-fold. We chose a popular deep learning DIR method, VoxelMorph,
360
+ suitable for unsupervised training for the propose of validating effectiveness of data
361
+ augmentation. We first trained a VoxelMorph model with the original 20 inhalation-
362
+ to-exhalation image pairs in the Learn2Reg training set.
363
+ We then trained another
364
+ VoxelMorph model with the augmented data including 200 pairs of inhalation-to-phase
365
+ images. We compared the registrations from the two VoxelMorph models for validating
366
+ the effectiveness of data augmentation.
367
+ 2.4. Evaluation Metrics
368
+ For image similarity, we used structure similarity index measure (SSIM) [15] which
369
+ measures the similarity of two given images based on the degradation of structural
370
+ information, including luminance, contrast and structure. The closer the SSIM value
371
+ is to 1, the more similarity between the two images. SSIM was used for comparing
372
+ RMSim-predicted phase images and ground truth phase images in the internal test cases.
373
+ SSIM was also used for comparing deformable registration results from VoxelMorph to
374
+ validate data augmentation effectiveness in Learn2Reg test cases, which additionally
375
+ were evaluated with the provided lung segmentation using Dice score to compare the
376
+ ground truth lung contours and propagated lung contours.
377
+ For landmark comparison in the POPI dataset, we used Target Registration
378
+ Error (TRE), defined as the Euclidean distance between a landmark position spatially
379
+ transformed and the target position.
380
+ 3. Results
381
+ For each test case in the internal 4D-CT dataset, we generated 9 simulated phase images
382
+ from the ground truth phase 1 image by deforming the phase 1 image using the predicted
383
+ DVF at each phase. We calculated SSIM to measure the image similarity (SSIMsim)
384
+ between the simulated phase image and the ground truth phase image. For comparison,
385
+ we also calculated the SSIM (SSIMgnd) between the ground truth phase 1 image and the
386
+ rest of the ground truth phase images. The average SSIMsim was 0.92±0.04, compared
387
+ to 0.86±0.08 of SSIMgnd (p < 0.01.)
388
+
389
+ RMSim: Controlled Respiratory Motion Simulation on Static Patient Scans
390
+ 8
391
+ We also measured the diaphragm displacement between the reference respiratory
392
+ signal and the predicted signal (see Figure 3).
393
+ As can be seen, the error increased
394
+ from inhale to exhale phases. This is because prediction accuracy decreases at later
395
+ time points. However, the overall displacement error was within 3 mm. Adding more
396
+ realistic respiratory data for training can further reduce this displacement error.
397
+ Figure 3. The error between reference respiratory signal (diaphragm displacement in
398
+ mm) and predicted signal.
399
+ To demonstrate the modulation flexibility of the 1D breathing traces, we applied
400
+ different breathing traces to the same 3D CT image to generate different motion
401
+ simulations, as shown in Figure 4. The plot on the top illustrates the two 1D breathing
402
+ traces used for modulation.
403
+ The breathing trace 1 (BT1), denoted by orange color
404
+ line, represents the original respiratory signal for the case. BT2 denoted by gray line
405
+ is a trace from another patient that was used to generate the simulated images. The
406
+ white horizontal line indicates the position of the apex of the diaphragm in the initial
407
+ phase (the first column). It is used as a reference to show the relative positions of the
408
+ diaphragm at different phases. The diaphragm in images on the upper row clearly shows
409
+ the more significant movement as BT2 has higher amplitudes in the trace.
410
+ The amplitude range in our internal dataset was 0.14 – 40 mm. To validate the
411
+ prediction performance on out-of-range displacement, we predicted additional sequences
412
+ using a 5 times larger respiratory amplitude. The prediction results using a 5 times
413
+ larger respiratory signal achieve a higher diaphragm level which means the predicted
414
+ respiratory has larger fluctuation than the original respiratory signal but it was not
415
+ proportional to the respiratory signal that was used for inference (see Figure 5).
416
+ The results of propagating anatomical structures using the predicted DVFs are also
417
+ shown in Figure 4. We propagated the lung, heart, esophagus, and tumor from the initial
418
+ phase image. The propagated contours are well-matched with the predicted image and
419
+ the motion of structures looks very realistic. We also provided the supplementary
420
+ video of the simulated 4D-CT along with the ground truth 4D-CT and the 3D
421
+
422
+ 9
423
+ 8
424
+ 7
425
+ 6
426
+ Error
427
+ 5
428
+ 4
429
+ 3
430
+ X
431
+ X
432
+ X
433
+ 2
434
+ X
435
+ X
436
+ 1
437
+ 0RMSim: Controlled Respiratory Motion Simulation on Static Patient Scans
438
+ 9
439
+ Figure 4.
440
+ Two different breathing traces, BT1 and BT2 shown in the plot, were
441
+ used to simulate the respiration motion of an internal case, resulting in 2 series of
442
+ modulated phase images according to the breathing traces. The diaphragm has larger
443
+ displacement in images simulated with BT2 (upper row) than the displacement in
444
+ images simulated with shallower BT1 (bottom row.) The white horizontal line indicates
445
+ the position of the apex of the left diaphragm at the initial phase (left-most column.)
446
+ We also overlay the propagated lung(in yellow), heart(in red), esophagus(in blue) and
447
+ tumor(in green) contours using predicted DVFs.
448
+ volume-rendered visualizations. Specifically, the 3D volume-rendered visualizations on
449
+ LUNA challenge datasets as well as internal lung radiotherapy datasets with structure
450
+ propagation are included in the accompanying supplementary video with chained
451
+ predictions for 60-phase predictions for LUNA challenge (radiology lung nodule) and
452
+ 30-phase predictions for the lung radiotherapy datasets.
453
+ In POPI dataset, there is only one case which contains lung segmentations on all
454
+ the phases. For this case, we extracted 1D breathing trace from the lung segmentations
455
+ as we did for our internal dataset. RMSim trained with our internal dataset predicted
456
+ the remaining phases from the inhale phase with the modulation from the 1D breathing
457
+ trace. The average TRE (Target Registration Error) of landmarks propagated with our
458
+ predicted DVFs in this case was 0.92±0.64mm, showing that RMSim can accurately
459
+ predict the patient-specific motion from the patient’s 1D breathing trace.
460
+ Figure 6
461
+ shows the TRE results for all predicted phases in this case. For the three other 4D-CT
462
+ cases in POPI there were no lung segmentation masks so we performed semi-automatic
463
+ lung segmentation for extracting the 1D breathing traces and the results are shown in
464
+ Figure 7.
465
+ Additionally, we used the RMSim for augmenting the Learn2Reg Challenge dataset.
466
+ The Dice score of lung segmentation of 10 Learn2Reg testing cases using the VoxelMorph
467
+ without augmentation was 0.96 ± 0.01 while the model trained with RMSim data
468
+
469
+ 15
470
+ 10
471
+ mm
472
+ 5
473
+ 0
474
+ 2
475
+ 3
476
+ 5
477
+ 6
478
+ 7
479
+ 8
480
+ 9
481
+ BT1---BT2RMSim: Controlled Respiratory Motion Simulation on Static Patient Scans
482
+ 10
483
+ Figure 5. The predicted phase 5 images using different 1D respiratory signal. Blue
484
+ line is original respiratory signal, orange line is 3 times amplitude and green line is 5
485
+ times amplitude.
486
+ Figure 6. TRE results of all 9 phases from the 4DCT case in POPI. RMSim trained
487
+ with the internal dataset were able to achieve sub-mm accuracy in this external case.
488
+ augmentation was 0.97 ± 0.01 (p < 0.001 using the paired t-test). The SSIM between
489
+ the warped images and the ground truth images was 0.88 ± 0.02 for the model without
490
+ augmentation and 0.89 ± 0.02 (p < 0.001) for the model with augmentation.
491
+ To validate the improvement of DIR using VoxelMorph with augmentation, we
492
+ propagated the landmark points from the inhale phase to the exhale phase for the 6
493
+
494
+ Phase
495
+ Respiratory signal
496
+ 140
497
+ Amplitude(mm)
498
+ 120
499
+ 100
500
+ 016.00
501
+ Without Prediction
502
+ 14.00
503
+ With Prediction
504
+ 12.00
505
+ TRE (mm)
506
+ 10.00
507
+ 8.00
508
+ 6.00
509
+ X
510
+ 4.00
511
+ X
512
+ 2.00
513
+ 0.00
514
+ P.2
515
+ P.3
516
+ P.4
517
+ P.5
518
+ P.6
519
+ P.7
520
+ P.8
521
+ P.9
522
+ P.10RMSim: Controlled Respiratory Motion Simulation on Static Patient Scans
523
+ 11
524
+ Figure 7. Three other 4D-CT POPI cases including 10 phases with landmarks on each
525
+ phase (TRE plots for the three cases given below). For each case, we show original and
526
+ predicted phase images overlaid with the difference with respect to original phase 1
527
+ input. In original DIR Validation 03 phase difference image, the diaphragm in the left
528
+ lung (viewer’s right) did not move due to the large tumor but it does in our prediction
529
+ (shown in red bounding boxes). This case does not deflect from the goals of this paper,
530
+ i.e. data augmentation and DIR validation. The difference in Case #1 appears minor
531
+ because the breathing is shallower (less diaphragm movement) and Case #2 and Case
532
+ #3 have larger differences due to deeper breathing.
533
+ cases available in POPI dataset and computed the TRE. On average, pre-DIR TRE
534
+ was 8.05±5.61mm, VoxelMorph w/o augmentation was 8.12±5.78mm compared to
535
+ 6.58±6.38mm for VoxelMorph with augmentation (p < 3e-48). The TRE comparison
536
+ of all 6 cases are shown in Figure 8.
537
+
538
+ Phase2
539
+ Phase3
540
+ Phase4
541
+ Phase5
542
+ Phase6
543
+ Phase7
544
+ Phase8
545
+ Phase9
546
+ Phase10
547
+ Original
548
+ DIR_Validation_01
549
+ Prediction
550
+ Original
551
+ Validation_02
552
+ Prediction
553
+ DIR
554
+ Original
555
+ DIR_Validation_03
556
+ Prediction
557
+ WithoutPrediction
558
+ WithPrediction
559
+ 12
560
+ 35
561
+ 20
562
+ 30
563
+ 10
564
+ 5
565
+ (mm)
566
+ 15
567
+ 20
568
+ TRE
569
+ 15
570
+ 10
571
+ 10
572
+ 5
573
+ 0
574
+ P2 P3 P4 P5P6 P7 P8 P9 P10
575
+ P2 P3 P4 P5 P6 P7 P8 P9 P10
576
+ P2 P3 P4 P5P6 P7 P8 P9 P10
577
+ DIR Validation01
578
+ DIR Validation 02
579
+ DIRValidation 03RMSim: Controlled Respiratory Motion Simulation on Static Patient Scans
580
+ 12
581
+ Figure 8. TRE results of POPI dataset. VoxelMorph with RMSim augmentation
582
+ outperformed the VoxelMorph w/o augmentation in all 6 cases.
583
+ 4. Discussion
584
+ In this work, we presented a 3D Seq2Seq network, referred to as RMSim, to predict
585
+ patient-specific realistic motion induced/modulated with 1D breathing trace.
586
+ We
587
+ successfully validated our RMSim output with both private and public benchmark
588
+ datasets (healthy and cancer patients) and demonstrated that adding our patient-
589
+ specific augmentations to training data can improve performance/accuracy of state-
590
+ of-the-art deep learning DIR algorithms. We also showcased breathing trace-modulated
591
+ respiratory motion simulations for public static radiology scans.
592
+ In this work, we
593
+ predicted the motion in one breathing cycle.
594
+ In the future, we will fine-tune our
595
+ current model to predict multiple cycles in one-shot. Possible solutions include making
596
+ our model bi-directional and using cross-attention to improve temporal dynamics in a
597
+ long sequence. Further research is needed to investigate the impact of training data
598
+ augmentation on different image modalities such as 4D-MRI.
599
+ Another application of our work is in external radiotherapy treatment planning.
600
+ RMSim simulated 4D-CT can be used to delineate the internal target volume (ITV)
601
+ which is the union of the target volumes in all respiratory phases. The entire ITV is
602
+ irradiated in radiation therapy to ensure all regions of tumor receive enough radiation.
603
+ There is a more sophisticated alternative to ITV, referred to as robust treatment
604
+ planning, where the key idea is to model the motion and directly incorporate it into the
605
+ planning [16]. This typically can be done by assuming a probability density function
606
+ (PDF) for the position of the target and doing plan optimization based on that [17, 18].
607
+ It is also possible to assume a set of possible motion PDFs to account for uncertainty in
608
+ breathing and plan accordingly [19, 20]. The simulated 4D-CT can be used to extract
609
+
610
+ 40
611
+ Vanilla VoxelMorph
612
+ 35
613
+ Pre_DIR
614
+ 30
615
+ VoxelMorph + Augmentation
616
+ TRE (mm)
617
+ 25
618
+ 20
619
+ 15
620
+ 10
621
+ 5
622
+ 0
623
+ #1
624
+ #2
625
+ #3
626
+ #4
627
+ #5
628
+ #6RMSim: Controlled Respiratory Motion Simulation on Static Patient Scans
629
+ 13
630
+ the motion PDF or a set of motion PDFs from varied breathing patterns exhibited by
631
+ the patient.
632
+ Additional interesting future direction is the extension of our earlier work in
633
+ exhaustively simulating physics-based artifacts in CT and CBCT images for more robust
634
+ cross-modal deep learning translation, segmentation, and motion-correction algorithms
635
+ [21, 22, 23], available via our Physics-ArX library (https://github.com/nadeemlab/
636
+ Physics-ArX).
637
+ Specifically, in our previous work we presented a proof-of-concept
638
+ pipeline for physics-based motion artifact simulation in CT/CBCT images using 4D-
639
+ CT phases [22]. Using the method proposed in the current paper, we can generate and
640
+ modulate large/diverse 4D-CT phases from any static 3D CT scan using the 1D RPM
641
+ signal. These generated 4D-CT variations can then be used to produce large realistic
642
+ motion-artifact variations via our earlier pipeline[22].
643
+ Limitations: For simplicity, we used the maximal displacement on the diaphragm
644
+ as the surrogate of clinical breathing trace to drive the modulation.
645
+ We assume
646
+ (1) the breathing pattern is regular since we extracted the diaphragm displacements
647
+ from amplitude-binned 4D-CT, and (2) regional DVFs are linearly scaled according to
648
+ diaphragm motion. Note 1D breathing trace might not represent the actual cardiac
649
+ motion.
650
+ Because of the GPU memory constraints, our input and output dimension
651
+ was limited to 128x128x128.
652
+ Nevertheless, the precise estimation of motion is not
653
+ required for providing realistic motion-induced ground truth DVFs for the validation
654
+ of the DIR algorithms and data augmentation for training DIR algorithms, as shown in
655
+ this work. To extend our work to tumor tracking during radiation treatment, we will
656
+ use the signals from the actual external real-time motion management (RPM) device
657
+ to drive the modulation more precisely. We will also explore incorporating 2D MV/kV
658
+ projections acquired during the treatment to infer more realistic cardiac/tumor motion.
659
+ Acknowledgements
660
+ This work was supported partially by NCI/NIH P30 CA008748.
661
+ Conflict of interest
662
+ We have no conflict of interest to declare.
663
+ Code Availability Statement
664
+ The code, pretrained models, and augmented DIR validation datasets will be released
665
+ at https://github.com/nadeemlab/SeqX2Y.
666
+ Data Availability Statement
667
+ The public datasets used in this study and their urls are as follows: (1) Learn2Reg
668
+ Challenge Lung CT dataset (Empire10 Challenge Dataset): https://drive.google.
669
+
670
+ RMSim: Controlled Respiratory Motion Simulation on Static Patient Scans
671
+ 14
672
+ com/drive/folders/1yHWLQEK9c1xzggkCC4VX0X4To7BBDqu5,
673
+ (2)
674
+ LUNA
675
+ challenge
676
+ dataset (subset0.zip):
677
+ https://zenodo.org/record/3723295, (3) DIR Validation
678
+ POPI Dataset (6 4D CT patients with landmarks): https://www.creatis.insa-lyon.
679
+ fr/rio/dir_validation_data, and (4) POPI model dataset (one 4D CT patient
680
+ dataset with landmarks on all phases as well as lung segmentation mask): https:
681
+ //www.creatis.insa-lyon.fr/rio/popi-model_original_page.
682
+ References
683
+ [1] Jason K. Molitoris, Tejan Diwanji, James W. Snider III, Sina Mossahebi, Santanu Samanta,
684
+ Shahed N. Badiyan, Charles B. Simone II, and Pranshu Mohindra.
685
+ Advances in the use of
686
+ motion management and image guidance in radiation therapy treatment for lung cancer. Journal
687
+ of Thoracic Disease, 10(21), 2018.
688
+ [2] Kristy K. Brock, Sasa Mutic, Todd R. McNutt, Hua Li, and Marc L. Kessler.
689
+ Use of image
690
+ registration and fusion algorithms and techniques in radiotherapy: Report of the aapm radiation
691
+ therapy committee task group no. 132. Medical Physics, 44(7):e43–e76, 2017.
692
+ [3] J.R. McClelland, D.J. Hawkes, T. Schaeffter, and A.P. King.
693
+ Respiratory motion models: A
694
+ review. Medical Image Analysis, 17(1):19–42, 2013.
695
+ [4] Laura I Cervi˜no, Alvin K Y Chao, Ajay Sandhu, and Steve B Jiang.
696
+ The diaphragm as an
697
+ anatomic surrogate for lung tumor motion. Physics in Medicine and Biology, 54(11):3529–3541,
698
+ may 2009.
699
+ [5] Qinghui Zhang, Alex Pevsner, Agung Hertanto, Yu-Chi Hu, Kenneth E. Rosenzweig, C. Clifton
700
+ Ling, and Gig S Mageras. A patient-specific respiratory model of anatomical motion for radiation
701
+ treatment planning. Medical Physics, 34(12):4772–4781, 2007.
702
+ [6] Liset V´azquez Romaguera, Rosalie Plantef`eve, Francisco Perdig´on Romero, Fran¸cois H´ebert, Jean-
703
+ Fran¸cois Carrier, and Samuel Kadoury. Prediction of in-plane organ deformation during free-
704
+ breathing radiotherapy via discriminative spatial transformer networks. Medical Image Analysis,
705
+ 64:101754, 2020.
706
+ [7] Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. Sequence to sequence learning with neural networks.
707
+ In Proceedings of the 27th International Conference on Neural Information Processing Systems
708
+ - Volume 2, NIPS’14, page 3104–3112, Cambridge, MA, USA, 2014. MIT Press.
709
+ [8] Julian Krebs, Tommaso Mansi, Nicholas Ayache, and Herv´e Delingette.
710
+ Probabilistic motion
711
+ modeling from medical image sequences: Application to cardiac cine-mri.
712
+ In Mihaela Pop,
713
+ Maxime Sermesant, Oscar Camara, Xiahai Zhuang, Shuo Li, Alistair Young, Tommaso Mansi,
714
+ and Avan Suinesiaputra, editors, Statistical Atlases and Computational Models of the Heart.
715
+ Multi-Sequence CMR Segmentation, CRT-EPiggy and LV Full Quantification Challenges, pages
716
+ 176–185, Cham, 2020. Springer International Publishing.
717
+ [9] Liset V´azquez Romaguera, Tal Mezheritsky, Rihab Mansour, Jean-Fran¸cois Carrier, and Samuel
718
+ Kadoury. Probabilistic 4d predictive model from in-room surrogates using conditional generative
719
+ networks for image-guided radiotherapy. Medical Image Analysis, 74:102250, 2021.
720
+ [10] Guha Balakrishnan, Amy Zhao, Mert R Sabuncu, John Guttag, and Adrian V Dalca. Voxelmorph:
721
+ a learning framework for deformable medical image registration. IEEE transactions on medical
722
+ imaging, 38(8):1788–1800, 2019.
723
+ [11] Arnaud Arindra Adiyoso Setio, Alberto Traverso, Thomas de Bel, Moira S.N. Berens, Cas van den
724
+ Bogaard, Piergiorgio Cerello, Hao Chen, Qi Dou, Maria Evelina Fantacci, Bram Geurts, Robbert
725
+ van der Gugten, Pheng Ann Heng, Bart Jansen, Michael M.J. de Kaste, Valentin Kotov, Jack Yu-
726
+ Hung Lin, Jeroen T.M.C. Manders, Alexander S´o˜nora-Mengana, Juan Carlos Garc´ıa-Naranjo,
727
+ Evgenia Papavasileiou, Mathias Prokop, Marco Saletta, Cornelia M Schaefer-Prokop, Ernst T.
728
+ Scholten, Luuk Scholten, Miranda M. Snoeren, Ernesto Lopez Torres, Jef Vandemeulebroucke,
729
+ Nicole Walasek, Guido C.A. Zuidhof, Bram van Ginneken, and Colin Jacobs.
730
+ Validation,
731
+
732
+ RMSim: Controlled Respiratory Motion Simulation on Static Patient Scans
733
+ 15
734
+ comparison, and combination of algorithms for automatic detection of pulmonary nodules in
735
+ computed tomography images: The luna16 challenge. Medical Image Analysis, 42:1–13, 2017.
736
+ [12] Jef
737
+ Vandemeulebroucke,
738
+ Simon
739
+ Rit,
740
+ Jan
741
+ Kybic,
742
+ Patrick
743
+ Clarysse,
744
+ and
745
+ David
746
+ Sarrut.
747
+ Spatiotemporal motion estimation for respiratory-correlated imaging of the lungs.
748
+ Medical
749
+ physics, 38(1):166–178, 2011.
750
+ [13] Alessa Hering, Keelin Murphy, and Bram van Ginneken.
751
+ Learn2Reg Challenge:
752
+ CT Lung
753
+ Registration - Training Data, May 2020.
754
+ [14] Xingjian Shi, Zhourong Chen, Hao Wang, Dit-Yan Yeung, Wai-kin Wong, and Wang-chun WOO.
755
+ Convolutional lstm network: A machine learning approach for precipitation nowcasting.
756
+ In
757
+ C. Cortes, N. Lawrence, D. Lee, M. Sugiyama, and R. Garnett, editors, Advances in Neural
758
+ Information Processing Systems, volume 28. Curran Associates, Inc., 2015.
759
+ [15] Zhou Wang, A.C. Bovik, H.R. Sheikh, and E.P. Simoncelli. Image quality assessment: from error
760
+ visibility to structural similarity. IEEE Transactions on Image Processing, 13(4):600–612, 2004.
761
+ [16] Jan Unkelbach, Markus Alber, Mark Bangert, Rasmus Bokrantz, Timothy C. Y. Chan, Joseph O.
762
+ Deasy, Albin Fredriksson, Bram L. Gorissen, Marcel van Herk, Wei Liu, Houra Mahmoudzadeh,
763
+ Omid Nohadani, Jeffrey V. Siebers, Marnix Witte, and Huijun Xu.
764
+ Robust radiotherapy
765
+ planning. Physics in Medicine & Biology, 63(22):22TR02, November 2018.
766
+ [17] Eelco Lens, Alexis NTJ Kotte, Ajay Patel, Hanne D Heerkens, Matthieu Bal, Geertjan van
767
+ Tienhoven, Arjan Bel, Astrid van der Horst, and Gert J Meijer. Probabilistic treatment planning
768
+ for pancreatic cancer treatment: prospective incorporation of respiratory motion shows only
769
+ limited dosimetric benefit. Acta Oncologica, 56(3):398–404, 2017.
770
+ [18] W Tyler Watkins, Joseph A Moore, James Gordon, Geoffrey D Hugo, and Jeffrey V Siebers.
771
+ Multiple anatomy optimization of accumulated dose. Medical physics, 41(11):111705, 2014.
772
+ [19] Emily Heath, Jan Unkelbach, and Uwe Oelfke. Incorporating uncertainties in respiratory motion
773
+ into 4d treatment plan optimization. Medical physics, 36(7):3059–3071, 2009.
774
+ [20] Thomas Bortfeld, Timothy CY Chan, Alexei Trofimov, and John N Tsitsiklis.
775
+ Robust
776
+ management of motion uncertainty in intensity-modulated radiation therapy.
777
+ Operations
778
+ Research, 56(6):1461–1473, 2008.
779
+ [21] Sadegh R Alam, Tianfang Li, Pengpeng Zhang, Si-Yuan Zhang, and Saad Nadeem. Generalizable
780
+ cone beam ct esophagus segmentation using physics-based data augmentation.
781
+ Physics in
782
+ Medicine & Biology, 66(6):065008, 2021.
783
+ [22] Sadegh R Alam, Tianfang Li, Si-Yuan Zhang, Pengpeng Zhang, and Saad Nadeem. Physics-based
784
+ motion artifact simulation in ct/cbct images using 4dct phases. AAPM’21 abstract, 2021.
785
+ [23] Navdeep Dahiya, Sadegh R Alam, Pengpeng Zhang, Si-Yuan Zhang, Tianfang Li, Anthony Yezzi,
786
+ and Saad Nadeem. Multitask 3d cbct-to-ct translation and organs-at-risk segmentation using
787
+ physics-based data augmentation. Medical Physics, 48(9):5130–5141, 2021.
788
+
C9FJT4oBgHgl3EQfAyyD/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
C9FKT4oBgHgl3EQfYi5Z/content/2301.11799v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e1cf4ff29ea2f8599b27e82683916e704e1e8524d78b1c1b653bf2126953380
3
+ size 554045
C9FKT4oBgHgl3EQfYi5Z/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:826d4cc5aed0410351f91c2dec8fe6274aee1e28fa6d307d9c5019fb79afd19b
3
+ size 2490413
C9FKT4oBgHgl3EQfYi5Z/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f60c8e39a40d5782119316e9ac5a2545e520596f4d598767ee715be0bea8113
3
+ size 99866
CtE2T4oBgHgl3EQfoAiM/content/tmp_files/2301.04014v1.pdf.txt ADDED
@@ -0,0 +1,461 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.04014v1 [quant-ph] 9 Jan 2023
2
+ Ozawa’s Intersubjectivity Theorem as
3
+ objection to QBism individual agent
4
+ perspective
5
+ Andrei Khrennikov
6
+ Linnaeus University, International Center for Mathematical Modeling
7
+ in Physics and Cognitive Sciences V¨axj¨o, SE-351 95, Sweden
8
+ January 11, 2023
9
+ Abstract
10
+ QBism’s foundational statement that “the outcome of a measure-
11
+ ment of an observable is personal” is in the straight contraversion with
12
+ Ozawa’s Intersubjectivity Theorem (OIT). The latter (proven within
13
+ the quantum formalism) states that two observers, agents within the
14
+ QBism terminology, performing joint measurements of the same ob-
15
+ servable A on a system S in the state ψ should get the same outcome
16
+ A = x. In Ozawa’s terminology, this outcome is intersubjective and it
17
+ can’t be treated as personal. This is the strong objection to QBism
18
+ which can’t survive without updating its principles.
19
+ The essential
20
+ aspect in understanding of the OIT-impact on QBism’s foundations
21
+ takes the notion of quantum observable. This paper comprises the
22
+ complementary discussion highlighting the difference between the ac-
23
+ curate, von Neumann, and inaccurate, noisy, quantum observables
24
+ which are represented by PVMs and POVMs respectively. Moreover,
25
+ we discuss the OIT-impact on the Copenhagen interpretation of quan-
26
+ tum mechanics.
27
+ 1
28
+ Introduction
29
+ In this paper I move ahead my critical analysis of QBism’s founda-
30
+ tions (see, e.g., [1]–[4] for QBism basics). This paper, as well as my
31
+ two previous articles [5, 6], straightly critiques the individual agent
32
+ perspective on measurement’s outcomes [7]. My previous appraisal
33
+ 1
34
+
35
+ convinced QBists to specify the level of agent’s individuality. In con-
36
+ trast to the general subjective probability theory, the class of agents
37
+ should be restricted, at least to agents who were educated in basics
38
+ of quantum theory. So, Ivan who lives in a Siberian village, a busy
39
+ hunter, can’t be treated as a QBism’s agent.
40
+ Now I have an intention to offense QBism by using Ozawa’s Inter-
41
+ subjectivity Theorem (OIT) [8]. Qbism’s statement that “the outcome
42
+ of a measurement of an observable is personal” is in the straight con-
43
+ traversion with OIT. This theorem is not so widely known and one
44
+ of the present paper’s intention is the theorem’s advertizement. OIT
45
+ states that two observers, agents within the QBism terminology, per-
46
+ forming joint measurements of the same observable A on a system S in
47
+ the state ψ should register the same outcome A = x with probability
48
+ one. Hence, the outcome is intersubjective [8], and it’s unnatural to
49
+ consider outcomes of quantum observations as agent’s personal expe-
50
+ riences.
51
+ OIT is proven within the quantum formalism, it is the rigorous
52
+ mathematical statement. But, as many theorems having the quan-
53
+ tum foundational impact, its interpretation is not straightforward.
54
+ The analysis of the OIT-impact onto QBism is coupled to the foun-
55
+ dations of quantum measurement theory and especially the notion of
56
+ quantum observable. Therefore, this paper comprises the complemen-
57
+ tary discussion, highlighting the difference between the accurate, von
58
+ Neuman, and inaccurate, noisy, quantum observables, mathematically
59
+ represented by projection valued measures (PVMs) and positive oper-
60
+ ator valued measures (POVMs), respectively. QIT is about the agents
61
+ who are able to perform the joint accurate measurements. For such
62
+ agents, measurement’s outcome loses its personalization, in favour of
63
+ intersubjectivity.
64
+ The conclusion of our analysis is that QBism should update its
65
+ ideology by taking in consideration OIT. But, how? See section 6.
66
+ Thus, I am in line with the criticism of QBism presented in article
67
+ [8]. However, I depart from its conclusion that OIT contradicts to the
68
+ Copenhagen interpretation; in contrast, OIT peacefully coexist with
69
+ this interpretation. It is relevant to recall here that QBism fundamen-
70
+ tally differs from the Copenhagen interpretation [2].
71
+ Right away we initiate with the mathematical formulation of OIT
72
+ and its proof. We set out to make the presentation very shortly (see
73
+ [8] for details). The indirect measurement scheme is the heart of OIT.
74
+ We go ahead with the recollection of the notion of quantum observ-
75
+ able, namely, Hermitian operator or PVM, and generalized quantum
76
+ observable (POVM) and the indirect measurements scheme for their
77
+ generation.
78
+ 2
79
+
80
+ 2
81
+ Quantum observables vs.
82
+ general-
83
+ ized quantum observables
84
+ In quantum mechanics’ axiomatics, von Neumann [9] introduced quan-
85
+ tum observables as Hermitian operators acting in complex Hilbert
86
+ space H, the state space of a system.1 The spectral decomposition is
87
+ the essential part in this framework.
88
+ We restrict considerations to observables represented by the oper-
89
+ ators with totally discrete spectra X ⊂ R. Here
90
+ A =
91
+
92
+ x
93
+ xEA(x),
94
+ (1)
95
+ where EA(x) is projection on the eigensubspace corresponding to the
96
+ eigenvalue x; these projectors form the resolution of unity:
97
+ I =
98
+
99
+ x
100
+ EA(x).
101
+ (2)
102
+ The Born rule determines the probabilities of the outcomes of mea-
103
+ surements for a system S in the state ψ,
104
+ P(A = x|ψ) = ⟨ψ|EA(x)|ψ⟩.
105
+ (3)
106
+ Later generalized quantum observables were invented. Such ob-
107
+ servables are represented by POVMs. We restrict considerations to
108
+ POVMs with a discrete domain of definition X. POVM is a map
109
+ x → Π(x) : for each x ∈ X, Π(x) is a positive contractive self-adjoint
110
+ operator (i.e., 0 ≤ Π(x) ≤ I) (called an effect), and effects form the
111
+ resolution of unity
112
+
113
+ x
114
+ Π(x) = I.
115
+ (4)
116
+ This map defines an operator valued measure on algebra of all subsets
117
+ of set X. For O ⊂ X,
118
+ Π(O) =
119
+
120
+ x∈O
121
+ Π(x).
122
+ The condition (4) is the operator-measure counterpart of the condition
123
+ normalization by 1 for usual probability measures.
124
+ 1Why did he select the Hermitian operators for mathematical representation of observ-
125
+ ables in quantum theory? Moreover, he considered only such observables as the genuine
126
+ quantum observables. I guess that he followed Schr¨odinger’s quantization rule for the
127
+ position and momentum observables which are realized by Hermitian operators in L2-
128
+ space. This rule implies that each classical observable given by the real-valued function
129
+ A = A(q, p) on the phase space is represented as a Hermitian operator in L2-space.
130
+ 3
131
+
132
+ POVM Π represents statistics of measurements for observable A
133
+ with the following generalization of the Born’s rule:
134
+ P(Π = x|ψ) = ⟨ψ|Π(x)|ψ⟩.
135
+ (5)
136
+ We remark that equality (4) implies that
137
+
138
+ x
139
+ P(A = x|ψ) = 1.
140
+ Any quantum observable A can also be represented as POVM of the
141
+ special type – PVM EA = (EA(x)).
142
+ Quantum observables given by PVMs were interpreted by von Neu-
143
+ mann [9] as describing accurate measurements. And generalized ob-
144
+ servables given by POVMs which are not PVMs are interpreted as
145
+ representing inaccurate measurements. In von Neumann’s [9], the no-
146
+ tion of measurement’s precision was not completely formalized. Only
147
+ recently the consistent formalization of this notion was presented in
148
+ [11].
149
+ We shall keep firmly the expression “quantum observable” for ob-
150
+ servable axiomatically introduced by von Neumann [9] and represented
151
+ by PVMs and the expression “generalized quantum observable” for
152
+ POVMs.
153
+ 3
154
+ Generalized quantum observables from
155
+ the indirect measurement scheme
156
+ The indirect measurement scheme involves the following components
157
+ • the states spaces H and K of the systems S and the apparatus
158
+ M for measurement of some observable A;
159
+ • the evolution operator U = U(t) representing the interaction-
160
+ dynamics for the system S + M;
161
+ • the meter observable M giving outputs of the pointer of the
162
+ apparatus M.
163
+ Here the quantum observables A and M can be represented as PVMs,
164
+ EA = (EA(x)), EM = (EM(x)), where EA(x), EM(x) are projections
165
+ in Hilbert spaces H and K respectively. It is assumed that the com-
166
+ pound system’s evolution is driven by the Schr¨odinger equation, so
167
+ the evolution operator is unitary.
168
+ Formally, an indirect measurement model for an observable A, in-
169
+ troduced in [10] as a “measuring process”, is a quadruple
170
+ (K, |ξ⟩, U, M)
171
+ 4
172
+
173
+ where |ξ⟩ ∈ K represents the apparatus state.
174
+ We explore the Heisenberg picture. To describe meter’s evolution,
175
+ we represent it in the state space of the compound system, i.e., as
176
+ I ⊗ M. The meter observable evolves as
177
+ M(t) = U ⋆(t)(I ⊗ M)U(t).
178
+ (6)
179
+ By the Born rule
180
+ P(M(t) = x|ψξ) = ⟨ψξ|EM(t)(x)|ψξ⟩.
181
+ (7)
182
+ This is the probability distribution for the outputs of measure-
183
+ ments done by the apparatus and given by the meter. In principle,
184
+ one can ignore the representation of the measurement process as the
185
+ system-apparatus interaction and operate solely with system’s states.
186
+ In this picture one proceeds with generalized observables given by
187
+ POVMs. The meter observable generates the POVM Π = (Π(x))
188
+ Π(x) = ⟨ξ|EM(T)(x)|ξ⟩,
189
+ (8)
190
+ where T is the time needed to complete the experiment.
191
+ The probability distribution of the generalized observable given by
192
+ a POVM is determined by (5).
193
+ Generally the probability distribution generated by a measurement
194
+ process does not coincide with the probability distribution of the quan-
195
+ tum observable A for which this process was constructed, i.e., generally
196
+ P(Π = x|ψ) = ⟨ψ|Π(x)|ψ⟩ ̸= P(A = x|ψ) = ⟨ψ|EA(x)|ψ⟩,
197
+ (9)
198
+ We remark that, as was proven by Ozawa [10], any generalized
199
+ observable (POVM) can be generated via the indirect measurement
200
+ scheme.
201
+ Typically one operates solely with generalized observables
202
+ by ignoring the indirect measurement scheme. This simplifies consid-
203
+ erations, but it can lead to misunderstanding of the foundations the
204
+ quantum measurement theory.
205
+ 4
206
+ Probability reproducibility condition
207
+ Definition. A measurement process (K, |ξ⟩, U, M) reproduces the prob-
208
+ ability distribution for quantum observable A (accurate von Neumann
209
+ observable) if
210
+ P(A = x|ψ) = P(M(T) = x|ψξ).
211
+ (10)
212
+ In this case
213
+ ⟨ψξ|EM(T)(x)|ψξ⟩ = ⟨ψ|E(x)|ψ⟩.
214
+ (11)
215
+ 5
216
+
217
+ or
218
+ ⟨ψ|Π(x)|ψ⟩ = ⟨ψ|E(x)|ψ⟩,
219
+ (12)
220
+ and hence,
221
+ Π(x) = E(x),
222
+ Proposition. Probability reproducibility condition for a measure-
223
+ ment process is equivalent to the representation of the corresponding
224
+ generalized observable by the PVM EA of measured quantum observ-
225
+ able A.
226
+ 5
227
+ Intersubjectivity of outcomes of quan-
228
+ tum observables
229
+ Following [8], consider two remote observers O1 and O2 who perform
230
+ joint measurements on a system S, in mathematical terms it means
231
+ that the meter quantum observables of the corresponding measure-
232
+ ment processes commute,
233
+ [M1(t), M2(t)] = 0.
234
+ Here each apparatus has its own state space, i.e., K = K1 ⊗ K2. We
235
+ call such measurements local. In this situation the joint probability
236
+ distribution is well defined
237
+ P(M1(t) = x, M1(t) = y|ψξ1ξ2) = ⟨ψξ1ξ2|EM1(t)(x)EM1(t)(y)|ψξ1ξ2⟩
238
+ (13)
239
+ Suppose that both observers perform the accurate measurements
240
+ of the quantum observable A given by PVM EA = (EA(x)). Then the
241
+ corresponding POVMs Πj, j = 1, 2, coincide with EA :
242
+ Π1(x) = Π2(x) = EA(x).
243
+ (14)
244
+ This equality implies:
245
+ Theorem. (OIT [8]) Two observers performing the joint local and
246
+ probability reproducible measurements of the same quantum observable
247
+ A on the system S should get the same outcome with probability 1:
248
+ P(M1(T) = x, M1(T) = y|ψξ1ξ2) = δ(x − y)P(E = x|ψ) = ∥E(x)ψ∥2.
249
+ (15)
250
+ 6
251
+
252
+ 6
253
+ Intersubjectivity challenges QBism
254
+ We start with the following citation of Fuchs and Schack [2]:
255
+ “The fundamental primitive of QBism is the concept of experience.
256
+ According to QBism, quantum mechanics is a theory that any agent
257
+ can use to evaluate her expectations for the content of her personal
258
+ experience. ...
259
+ In QBism, a measurement is an action an agent takes to elicit an
260
+ experience. The measurement outcome is the experience so elicited.
261
+ The measurement outcome is thus personal to the agent who takes the
262
+ measurement action. In this sense, quantum mechanics, like probabil-
263
+ ity theory, is a single user theory. A measurement does not reveal a
264
+ pre-existing value. Rather, the measurement outcome is created in the
265
+ measurement action.”
266
+ However, OIT implies that, for accurate local observables, mea-
267
+ surement’s outcome is intersubjective which is the strong objection to
268
+ QBism. There is nothing concerning personal experiences and QBists
269
+ should response to this objection. My suggestion (see also [7]) is to fol-
270
+ low Brukner’s work [12] where he proceeds not with individual agents
271
+ and their personal experiences, but with a universal agent. I remark
272
+ that consideration of universal agents is common in general theory of
273
+ decision making. However, for QBists, such solution seems to be un-
274
+ acceptable, since it would destroy consistency of the QBism’s private
275
+ agency perspective. It would move QBism closer to Zeilinger-Brukner
276
+ information interpretation of quantum mechanics [13, 14, 15].
277
+ This objection to QBism is foundationally interesting and gen-
278
+ erates the discussion on the notion of quantum observable. Due to
279
+ efforts Helstrom, Holevo, and Ozawa [16]–[19], [10], generalized quan-
280
+ tum observables which are mathematically represented by POVMs
281
+ became one of the basic tools of quantum information theory. Nowa-
282
+ days the special role of accurate observables represented by PVMs is
283
+ not emphasized. In particular, the notion of observables in QBism is
284
+ identified with generalized quantum observable given by POVM. How-
285
+ ever, the clash between QBism and OIT stimulates highlighting of the
286
+ accurate PVM- as the genuine quantum observables, and treating the
287
+ generalized quantum observables which are not accurate POVM as
288
+ imprecise and noisy ones. Of course, it is a well known fact, but the
289
+ clash between OIT and QBism is good occasion to emphasize this
290
+ difference.
291
+ What does this difference between accurate PVM and noisy POVM
292
+ observables mean for QBism?
293
+ I have the following picture of the situation. OIT holds only for the
294
+ accurate PVM-observables; for generalized quantum observables, it
295
+ 7
296
+
297
+ can be violated and generally it is impossible to assign the same value
298
+ for measurements’ outcomes for observers O1 and O2. Thus, QBism
299
+ ideology of the personal experiences of observers (agents) can still be
300
+ kept for such generalizad observables. But, where does individuality
301
+ come from? The personal experiences come from noise! So, different
302
+ observers performing inaccurate measurements are coupled to different
303
+ noisy environments. This is just my personal view on consequences of
304
+ IOT for QBism.
305
+ In conclusion, QBism might response to the OIT-challenge by con-
306
+ sidering the universal agent who is able to perform accurate measure-
307
+ ments; individuality of agents’ experience is reduced to individuality
308
+ of noise generated in the process of measurement.
309
+ 7
310
+ Intersubjectivity and Copenhagen in-
311
+ terpretation
312
+ By the Copenhagen interpretation (at least by its Bohr’s version2)
313
+ measurements’ outcomes cannot be treated as the objective properties
314
+ of a system S. They are results of the complex process of interaction
315
+ of a system and an apparatus, see Bohr [21]:
316
+ “This crucial point ... implies the impossibility of any sharp sep-
317
+ aration between the behaviour of atomic objects and the interaction
318
+ with the measuring instruments which serve to define the conditions
319
+ under which the phenomena appear. In fact, the individuality of the
320
+ typical quantum effects finds its proper expression in the circumstance
321
+ that any attempt of subdividing the phenomena will demand a change
322
+ in the experimental arrangement introducing new possibilities of inter-
323
+ action between objects and measuring instruments which in principle
324
+ cannot be controlled. Consequently, evidence obtained under different
325
+ experimental conditions cannot be comprehended within a single pic-
326
+ ture, but must be regarded as complementary in the sense that only the
327
+ totality of the phenomena exhausts the possible information about the
328
+ objects.”
329
+ The indirect measurement scheme matches perfectly with the Copen-
330
+ hagen interpretation. Therefore it is surprising that OIT contradicts
331
+ to it. The clash between OIT and the the Copenhagen interpretation
332
+ was highlighted in the conclusion section of OIT-article [8]:
333
+ 2As was stressed by Plotnitsky [20], one should recognize the diversity of views on the
334
+ Copenhagen interpretation. He suggested to speak about interpretations in the spirit of
335
+ Copenhagen. Even Bohr changed the views a few times during his life [20].
336
+ 8
337
+
338
+ “Schr¨odinger [22] argued that a measurement does not ascertain
339
+ the pre-existing value of the observable and is only required to be re-
340
+ peatable. Since the inception of quantum mechanics, this view has long
341
+ been supported as one of the fundamental tenets of quantum mechan-
342
+ ics. In contrast, we have shown that any probability reproducible mea-
343
+ surement indeed ascertains the value that the observable has, whether
344
+ the repeatability is satisfied or not.”
345
+ I disagree with the author of [8]. The seed of this misunderstand-
346
+ ing is in ignoring the two level structure of physical theories, ontic
347
+ and epistemic [23, 24, 25]. The former is about reality as it is and
348
+ the latter is about knowledge about reality.
349
+ Bohr and Schr¨odinger
350
+ wrote about the ontic reality, about impossibility to assign to quan-
351
+ tum systems preexisting values and here “preexisting” is the synonym
352
+ for “objective”, “ontic”. But OIT is not about such values, it is about
353
+ epistemic reality, reality of knowledge about the possible outcome of
354
+ measurement.
355
+ Hence, in my opinion OIT can peacefully coexist with the Copen-
356
+ hagen interpretation.
357
+ But, as was stressed, OIT is a challenge for QBism which operates
358
+ at the epistemic level of scientific description of quantum phenom-
359
+ ena. This is the good place to recall that QBism should be sharply
360
+ separated from the Copenhagen interpretation, see again Fuchs and
361
+ Schack [2]:
362
+ “According to QBism, quantum mechanics can be applied to any
363
+ physical system. QBism treats all physical systems in the same way,
364
+ including atoms, beam splitters, Stern-Gerlach magnets, preparation
365
+ devices, measurement apparatuses, all the way to living beings and
366
+ other agents. In this, QBism differs crucially from various versions
367
+ of the Copenhagen interpretation.”
368
+ Acknowledgments
369
+ This paper was written on the basis of the long discussions with
370
+ Masanao Ozawa and I would like to thank him; Arkady Plotnitsky
371
+ told me a lot about the Copenhagen interpretation and Bohr’s views
372
+ and I would like to thank him; Christopher Fuchs ignited my inter-
373
+ est to QBism at the second V¨axj¨o conference (in 2001) and I am
374
+ sorry if this paper would disturb QBists; I am also thankful to Harald
375
+ Atmanspacher who introduced me into ontic-epistemic approach to
376
+ scientific representation of natural phenomena.
377
+ 9
378
+
379
+ References
380
+ [1] Fuchs, C. A. and Schack, R. (2011). A Quantum-Bayesian Route
381
+ to Quantum-State Space, Found. Phys. 41, p. 345.
382
+ [2] Fuchs, C. A. and Schack, R. (2014). QBism and the Greeks:
383
+ why a quantum state does not represent an element of physical
384
+ reality, Phys. Scr., 90, 015104.
385
+ [3] Fuchs, C. A., Mermin, N. D. and Schack, R. (2014). An In-
386
+ troduction to QBism with an Application to the Locality of
387
+ Quantum Mechanics, Am. J. Phys. 82, p. 749.
388
+ [4] DeBrota, J. B., Fuchs, C. A., Pienaar, J. L., and Stacey, B. C.
389
+ (2021). Born’s rule as a quantum extension of Bayesian coher-
390
+ ence. Physical Review A, 104(2), 022207.
391
+ [5] Khrennikov,
392
+ A. (2018).
393
+ External Observer Reflections on
394
+ QBism, Its Possible Modifications, and Novel Applications, In:
395
+ Quantum Foundations, STEAM-H: Science, Technology, Engi-
396
+ neering, Agriculture, Mathematics & Health; Khrennikov A. and
397
+ Toni B. Eds.; Springer, Cham, pp. 93–118.
398
+ [6] Khrennikov, A. (2018). Towards better understanding QBism,
399
+ Found. Sc., 23 (1), 181–195.
400
+ [7] Khrennikov, A. Reflections on Zeilinger–Brukner Information
401
+ Interpretation of Quantum Mechanics. Found Phys 46, 836–844
402
+ (2016).
403
+ [8] Ozawa, M. (2019). Intersubjectivity of outcomes of quantum
404
+ measurements. https://arxiv.org/abs/1911.10893
405
+ [9] von Neuman, J. (1955). Mathematical foundations of quan-
406
+ tum mechanics (Princeton Univ. Press, Princenton) [Originally
407
+ published: Mathematische Grundlagen der Quanten-mechanik,
408
+ Springer, Berlin, 1932].
409
+ [10] Ozawa, M. (1984). Quantum measuring processes for continuous
410
+ observables. J. Math. Phys. 25, 79–87
411
+ [11] Ozawa, M. Soundness and completeness of quantum root-mean-
412
+ square errors. Quant. Inf. 5, Article number: 1 (2019)
413
+ [12] Brukner,
414
+ C.
415
+ On
416
+ the
417
+ quantum
418
+ measurement
419
+ problem.
420
+ https://arxiv.org/abs/1507.05255.
421
+ [13] Zeilinger, A.: A foundational principle for quantum mechanics.
422
+ Found. Phys. 29, 31–641 (1999)
423
+ [14] Brukner, C., Zeilinger, A.: Malus’ law and quantum informa-
424
+ tion. Acta Phys. Slovava 49, 647–652 (1999)
425
+ 10
426
+
427
+ [15] Brukner, C., Zeilinger, A.: Information invariance and quantum
428
+ probabilities. Found. Phys. 39, 677 (2009)
429
+ [16] C. W. Helstrom, Quantum Detection and Estimation Theory.
430
+ Academic, New York, 1976.
431
+ [17] A. S. Holevo, Probabilistic and Statistical Aspects of Quantum
432
+ Theory. North-Holland, Amsterdam, 1982.
433
+ [18] M. Ozawa, Optimal measurements for general quantum systems.
434
+ Rep. Math. Phys. 18, 1980, 11–28
435
+ [19] M. Ozawa,
436
+ Realization of measurement and the standard
437
+ quantum limit. Squeezed and Nonclassical Light, P. Tombesi
438
+ and E. R. Pike, Plenum, New York, 1989,
439
+ pp. 263–286,
440
+ arXiv:1505.01083 [quant-ph].
441
+ [20] Plotnitsky, A. (2012). Niels Bohr and complementarity: An in-
442
+ troduction. Berlin and New York: Springer.
443
+ [21] Bohr, N.: (1987). The Philosophical Writings of Niels Bohr, 3
444
+ vols. (Ox Bow Press, Woodbridge, CT).
445
+ [22] Schr¨odinger, E. The present situation in quantum mechanics:
446
+ A translation of Schr¨odinger’s “Cat Paradox” paper (by: J. D.
447
+ Trimmer). Proc. Am. Philos. Soc. 124, 323–338 (1980). [Orig-
448
+ inally published: Die gegenw¨artige Situation in der Quanten-
449
+ mechanik, Naturwissenschaften 23, 807–812, 823–828, 844– 849
450
+ (1935)].
451
+ [23] Atmanspacher, H. (1994). Is the ontic/epistemic-distinction suf-
452
+ ficient to describe quantum systems exhaustively. In: Sympo-
453
+ sium on the Foundations of Modern Physics (pp. 15-32).
454
+ [24] Atmanspacher, H. and Primas, H. (2003). Epistemic and ontic
455
+ quantum realities. In: Time, quantum and information (pp. 301-
456
+ 321). Springer, Berlin, Heidelberg.
457
+ [25] Khrennikov, A. (2017). Quantum epistemology from subquan-
458
+ tum ontology: Quantum mechanics from theory of classical ran-
459
+ dom fields. Annals of Physics, 377, 147-163.
460
+ 11
461
+
CtE2T4oBgHgl3EQfoAiM/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf,len=305
2
+ page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
3
+ page_content='04014v1 [quant-ph] 9 Jan 2023 Ozawa’s Intersubjectivity Theorem as objection to QBism individual agent perspective Andrei Khrennikov Linnaeus University, International Center for Mathematical Modeling in Physics and Cognitive Sciences V¨axj¨o, SE-351 95, Sweden January 11, 2023 Abstract QBism’s foundational statement that “the outcome of a measure- ment of an observable is personal” is in the straight contraversion with Ozawa’s Intersubjectivity Theorem (OIT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
4
+ page_content=' The latter (proven within the quantum formalism) states that two observers, agents within the QBism terminology, performing joint measurements of the same ob- servable A on a system S in the state ψ should get the same outcome A = x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
5
+ page_content=' In Ozawa’s terminology, this outcome is intersubjective and it can’t be treated as personal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
6
+ page_content=' This is the strong objection to QBism which can’t survive without updating its principles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
7
+ page_content=' The essential aspect in understanding of the OIT-impact on QBism’s foundations takes the notion of quantum observable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
8
+ page_content=' This paper comprises the complementary discussion highlighting the difference between the ac- curate, von Neumann, and inaccurate, noisy, quantum observables which are represented by PVMs and POVMs respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
9
+ page_content=' Moreover, we discuss the OIT-impact on the Copenhagen interpretation of quan- tum mechanics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
10
+ page_content=' 1 Introduction In this paper I move ahead my critical analysis of QBism’s founda- tions (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
11
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
12
+ page_content=', [1]–[4] for QBism basics).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
13
+ page_content=' This paper, as well as my two previous articles [5, 6], straightly critiques the individual agent perspective on measurement’s outcomes [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
14
+ page_content=' My previous appraisal 1 convinced QBists to specify the level of agent’s individuality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
15
+ page_content=' In con- trast to the general subjective probability theory, the class of agents should be restricted, at least to agents who were educated in basics of quantum theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
16
+ page_content=' So, Ivan who lives in a Siberian village, a busy hunter, can’t be treated as a QBism’s agent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
17
+ page_content=' Now I have an intention to offense QBism by using Ozawa’s Inter- subjectivity Theorem (OIT) [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
18
+ page_content=' Qbism’s statement that “the outcome of a measurement of an observable is personal” is in the straight con- traversion with OIT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
19
+ page_content=' This theorem is not so widely known and one of the present paper’s intention is the theorem’s advertizement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
20
+ page_content=' OIT states that two observers, agents within the QBism terminology, per- forming joint measurements of the same observable A on a system S in the state ψ should register the same outcome A = x with probability one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
21
+ page_content=' Hence, the outcome is intersubjective [8], and it’s unnatural to consider outcomes of quantum observations as agent’s personal expe- riences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
22
+ page_content=' OIT is proven within the quantum formalism, it is the rigorous mathematical statement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
23
+ page_content=' But, as many theorems having the quan- tum foundational impact, its interpretation is not straightforward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
24
+ page_content=' The analysis of the OIT-impact onto QBism is coupled to the foun- dations of quantum measurement theory and especially the notion of quantum observable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
25
+ page_content=' Therefore, this paper comprises the complemen- tary discussion, highlighting the difference between the accurate, von Neuman, and inaccurate, noisy, quantum observables, mathematically represented by projection valued measures (PVMs) and positive oper- ator valued measures (POVMs), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
26
+ page_content=' QIT is about the agents who are able to perform the joint accurate measurements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
27
+ page_content=' For such agents, measurement’s outcome loses its personalization, in favour of intersubjectivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
28
+ page_content=' The conclusion of our analysis is that QBism should update its ideology by taking in consideration OIT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
29
+ page_content=' But, how?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
30
+ page_content=' See section 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
31
+ page_content=' Thus, I am in line with the criticism of QBism presented in article [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
32
+ page_content=' However, I depart from its conclusion that OIT contradicts to the Copenhagen interpretation;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
33
+ page_content=' in contrast, OIT peacefully coexist with this interpretation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
34
+ page_content=' It is relevant to recall here that QBism fundamen- tally differs from the Copenhagen interpretation [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
35
+ page_content=' Right away we initiate with the mathematical formulation of OIT and its proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
36
+ page_content=' We set out to make the presentation very shortly (see [8] for details).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
37
+ page_content=' The indirect measurement scheme is the heart of OIT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
38
+ page_content=' We go ahead with the recollection of the notion of quantum observ- able, namely, Hermitian operator or PVM, and generalized quantum observable (POVM) and the indirect measurements scheme for their generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
39
+ page_content=' 2 2 Quantum observables vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
40
+ page_content=' general- ized quantum observables In quantum mechanics’ axiomatics, von Neumann [9] introduced quan- tum observables as Hermitian operators acting in complex Hilbert space H, the state space of a system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
41
+ page_content='1 The spectral decomposition is the essential part in this framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
42
+ page_content=' We restrict considerations to observables represented by the oper- ators with totally discrete spectra X ⊂ R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
43
+ page_content=' Here A = � x xEA(x), (1) where EA(x) is projection on the eigensubspace corresponding to the eigenvalue x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
44
+ page_content=' these projectors form the resolution of unity: I = � x EA(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
45
+ page_content=' (2) The Born rule determines the probabilities of the outcomes of mea- surements for a system S in the state ψ, P(A = x|ψ) = ⟨ψ|EA(x)|ψ⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
46
+ page_content=' (3) Later generalized quantum observables were invented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
47
+ page_content=' Such ob- servables are represented by POVMs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
48
+ page_content=' We restrict considerations to POVMs with a discrete domain of definition X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
49
+ page_content=' POVM is a map x → Π(x) : for each x ∈ X, Π(x) is a positive contractive self-adjoint operator (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
50
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
51
+ page_content=', 0 ≤ Π(x) ≤ I) (called an effect), and effects form the resolution of unity � x Π(x) = I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
52
+ page_content=' (4) This map defines an operator valued measure on algebra of all subsets of set X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
53
+ page_content=' For O ⊂ X, Π(O) = � x∈O Π(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
54
+ page_content=' The condition (4) is the operator-measure counterpart of the condition normalization by 1 for usual probability measures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
55
+ page_content=' 1Why did he select the Hermitian operators for mathematical representation of observ- ables in quantum theory?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
56
+ page_content=' Moreover, he considered only such observables as the genuine quantum observables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
57
+ page_content=' I guess that he followed Schr¨odinger’s quantization rule for the position and momentum observables which are realized by Hermitian operators in L2- space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
58
+ page_content=' This rule implies that each classical observable given by the real-valued function A = A(q, p) on the phase space is represented as a Hermitian operator in L2-space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
59
+ page_content=' 3 POVM Π represents statistics of measurements for observable A with the following generalization of the Born’s rule: P(Π = x|ψ) = ⟨ψ|Π(x)|ψ⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
60
+ page_content=' (5) We remark that equality (4) implies that � x P(A = x|ψ) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
61
+ page_content=' Any quantum observable A can also be represented as POVM of the special type – PVM EA = (EA(x)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
62
+ page_content=' Quantum observables given by PVMs were interpreted by von Neu- mann [9] as describing accurate measurements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
63
+ page_content=' And generalized ob- servables given by POVMs which are not PVMs are interpreted as representing inaccurate measurements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
64
+ page_content=' In von Neumann’s [9], the no- tion of measurement’s precision was not completely formalized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
65
+ page_content=' Only recently the consistent formalization of this notion was presented in [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
66
+ page_content=' We shall keep firmly the expression “quantum observable” for ob- servable axiomatically introduced by von Neumann [9] and represented by PVMs and the expression “generalized quantum observable” for POVMs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
67
+ page_content=' 3 Generalized quantum observables from the indirect measurement scheme The indirect measurement scheme involves the following components the states spaces H and K of the systems S and the apparatus M for measurement of some observable A;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
68
+ page_content=' the evolution operator U = U(t) representing the interaction- dynamics for the system S + M;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
69
+ page_content=' the meter observable M giving outputs of the pointer of the apparatus M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
70
+ page_content=' Here the quantum observables A and M can be represented as PVMs, EA = (EA(x)), EM = (EM(x)), where EA(x), EM(x) are projections in Hilbert spaces H and K respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
71
+ page_content=' It is assumed that the com- pound system’s evolution is driven by the Schr¨odinger equation, so the evolution operator is unitary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
72
+ page_content=' Formally, an indirect measurement model for an observable A, in- troduced in [10] as a “measuring process”, is a quadruple (K, |ξ⟩, U, M) 4 where |ξ⟩ ∈ K represents the apparatus state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
73
+ page_content=' We explore the Heisenberg picture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
74
+ page_content=' To describe meter’s evolution, we represent it in the state space of the compound system, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
75
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
76
+ page_content=', as I ⊗ M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
77
+ page_content=' The meter observable evolves as M(t) = U ⋆(t)(I ⊗ M)U(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
78
+ page_content=' (6) By the Born rule P(M(t) = x|ψξ) = ⟨ψξ|EM(t)(x)|ψξ⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
79
+ page_content=' (7) This is the probability distribution for the outputs of measure- ments done by the apparatus and given by the meter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
80
+ page_content=' In principle, one can ignore the representation of the measurement process as the system-apparatus interaction and operate solely with system’s states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
81
+ page_content=' In this picture one proceeds with generalized observables given by POVMs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
82
+ page_content=' The meter observable generates the POVM Π = (Π(x)) Π(x) = ⟨ξ|EM(T)(x)|ξ⟩, (8) where T is the time needed to complete the experiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
83
+ page_content=' The probability distribution of the generalized observable given by a POVM is determined by (5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
84
+ page_content=' Generally the probability distribution generated by a measurement process does not coincide with the probability distribution of the quan- tum observable A for which this process was constructed, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
85
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
86
+ page_content=', generally P(Π = x|ψ) = ⟨ψ|Π(x)|ψ⟩ ̸= P(A = x|ψ) = ⟨ψ|EA(x)|ψ⟩, (9) We remark that, as was proven by Ozawa [10], any generalized observable (POVM) can be generated via the indirect measurement scheme.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
87
+ page_content=' Typically one operates solely with generalized observables by ignoring the indirect measurement scheme.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
88
+ page_content=' This simplifies consid- erations, but it can lead to misunderstanding of the foundations the quantum measurement theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
89
+ page_content=' 4 Probability reproducibility condition Definition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
90
+ page_content=' A measurement process (K, |ξ⟩, U, M) reproduces the prob- ability distribution for quantum observable A (accurate von Neumann observable) if P(A = x|ψ) = P(M(T) = x|ψξ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
91
+ page_content=' (10) In this case ⟨ψξ|EM(T)(x)|ψξ⟩ = ⟨ψ|E(x)|ψ⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
92
+ page_content=' (11) 5 or ⟨ψ|Π(x)|ψ⟩ = ⟨ψ|E(x)|ψ⟩, (12) and hence, Π(x) = E(x), Proposition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
93
+ page_content=' Probability reproducibility condition for a measure- ment process is equivalent to the representation of the corresponding generalized observable by the PVM EA of measured quantum observ- able A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
94
+ page_content=' 5 Intersubjectivity of outcomes of quan- tum observables Following [8], consider two remote observers O1 and O2 who perform joint measurements on a system S, in mathematical terms it means that the meter quantum observables of the corresponding measure- ment processes commute, [M1(t), M2(t)] = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
95
+ page_content=' Here each apparatus has its own state space, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
96
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
97
+ page_content=', K = K1 ⊗ K2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
98
+ page_content=' We call such measurements local.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
99
+ page_content=' In this situation the joint probability distribution is well defined P(M1(t) = x, M1(t) = y|ψξ1ξ2) = ⟨ψξ1ξ2|EM1(t)(x)EM1(t)(y)|ψξ1ξ2⟩ (13) Suppose that both observers perform the accurate measurements of the quantum observable A given by PVM EA = (EA(x)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
100
+ page_content=' Then the corresponding POVMs Πj, j = 1, 2, coincide with EA : Π1(x) = Π2(x) = EA(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
101
+ page_content=' (14) This equality implies: Theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
102
+ page_content=' (OIT [8]) Two observers performing the joint local and probability reproducible measurements of the same quantum observable A on the system S should get the same outcome with probability 1: P(M1(T) = x, M1(T) = y|ψξ1ξ2) = δ(x − y)P(E = x|ψ) = ∥E(x)ψ∥2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
103
+ page_content=' (15) 6 6 Intersubjectivity challenges QBism We start with the following citation of Fuchs and Schack [2]: “The fundamental primitive of QBism is the concept of experience.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
104
+ page_content=' According to QBism, quantum mechanics is a theory that any agent can use to evaluate her expectations for the content of her personal experience.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
105
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
106
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
107
+ page_content=' In QBism, a measurement is an action an agent takes to elicit an experience.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
108
+ page_content=' The measurement outcome is the experience so elicited.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
109
+ page_content=' The measurement outcome is thus personal to the agent who takes the measurement action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
110
+ page_content=' In this sense, quantum mechanics, like probabil- ity theory, is a single user theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
111
+ page_content=' A measurement does not reveal a pre-existing value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
112
+ page_content=' Rather, the measurement outcome is created in the measurement action.” However, OIT implies that, for accurate local observables, mea- surement’s outcome is intersubjective which is the strong objection to QBism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
113
+ page_content=' There is nothing concerning personal experiences and QBists should response to this objection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
114
+ page_content=' My suggestion (see also [7]) is to fol- low Brukner’s work [12] where he proceeds not with individual agents and their personal experiences, but with a universal agent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
115
+ page_content=' I remark that consideration of universal agents is common in general theory of decision making.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
116
+ page_content=' However, for QBists, such solution seems to be un- acceptable, since it would destroy consistency of the QBism’s private agency perspective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
117
+ page_content=' It would move QBism closer to Zeilinger-Brukner information interpretation of quantum mechanics [13, 14, 15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
118
+ page_content=' This objection to QBism is foundationally interesting and gen- erates the discussion on the notion of quantum observable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
119
+ page_content=' Due to efforts Helstrom, Holevo, and Ozawa [16]–[19], [10], generalized quan- tum observables which are mathematically represented by POVMs became one of the basic tools of quantum information theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
120
+ page_content=' Nowa- days the special role of accurate observables represented by PVMs is not emphasized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
121
+ page_content=' In particular, the notion of observables in QBism is identified with generalized quantum observable given by POVM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
122
+ page_content=' How- ever, the clash between QBism and OIT stimulates highlighting of the accurate PVM- as the genuine quantum observables, and treating the generalized quantum observables which are not accurate POVM as imprecise and noisy ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
123
+ page_content=' Of course, it is a well known fact, but the clash between OIT and QBism is good occasion to emphasize this difference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
124
+ page_content=' What does this difference between accurate PVM and noisy POVM observables mean for QBism?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
125
+ page_content=' I have the following picture of the situation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
126
+ page_content=' OIT holds only for the accurate PVM-observables;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
127
+ page_content=' for generalized quantum observables, it 7 can be violated and generally it is impossible to assign the same value for measurements’ outcomes for observers O1 and O2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
128
+ page_content=' Thus, QBism ideology of the personal experiences of observers (agents) can still be kept for such generalizad observables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
129
+ page_content=' But, where does individuality come from?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
130
+ page_content=' The personal experiences come from noise!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
131
+ page_content=' So, different observers performing inaccurate measurements are coupled to different noisy environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
132
+ page_content=' This is just my personal view on consequences of IOT for QBism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
133
+ page_content=' In conclusion, QBism might response to the OIT-challenge by con- sidering the universal agent who is able to perform accurate measure- ments;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
134
+ page_content=' individuality of agents’ experience is reduced to individuality of noise generated in the process of measurement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
135
+ page_content=' 7 Intersubjectivity and Copenhagen in- terpretation By the Copenhagen interpretation (at least by its Bohr’s version2) measurements’ outcomes cannot be treated as the objective properties of a system S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
136
+ page_content=' They are results of the complex process of interaction of a system and an apparatus, see Bohr [21]: “This crucial point .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
137
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
138
+ page_content=' implies the impossibility of any sharp sep- aration between the behaviour of atomic objects and the interaction with the measuring instruments which serve to define the conditions under which the phenomena appear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
139
+ page_content=' In fact, the individuality of the typical quantum effects finds its proper expression in the circumstance that any attempt of subdividing the phenomena will demand a change in the experimental arrangement introducing new possibilities of inter- action between objects and measuring instruments which in principle cannot be controlled.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
140
+ page_content=' Consequently, evidence obtained under different experimental conditions cannot be comprehended within a single pic- ture, but must be regarded as complementary in the sense that only the totality of the phenomena exhausts the possible information about the objects.” The indirect measurement scheme matches perfectly with the Copen- hagen interpretation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
141
+ page_content=' Therefore it is surprising that OIT contradicts to it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
142
+ page_content=' The clash between OIT and the the Copenhagen interpretation was highlighted in the conclusion section of OIT-article [8]: 2As was stressed by Plotnitsky [20], one should recognize the diversity of views on the Copenhagen interpretation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
143
+ page_content=' He suggested to speak about interpretations in the spirit of Copenhagen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
144
+ page_content=' Even Bohr changed the views a few times during his life [20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
145
+ page_content=' 8 “Schr¨odinger [22] argued that a measurement does not ascertain the pre-existing value of the observable and is only required to be re- peatable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
146
+ page_content=' Since the inception of quantum mechanics, this view has long been supported as one of the fundamental tenets of quantum mechan- ics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
147
+ page_content=' In contrast, we have shown that any probability reproducible mea- surement indeed ascertains the value that the observable has, whether the repeatability is satisfied or not.” I disagree with the author of [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
148
+ page_content=' The seed of this misunderstand- ing is in ignoring the two level structure of physical theories, ontic and epistemic [23, 24, 25].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
149
+ page_content=' The former is about reality as it is and the latter is about knowledge about reality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
150
+ page_content=' Bohr and Schr¨odinger wrote about the ontic reality, about impossibility to assign to quan- tum systems preexisting values and here “preexisting” is the synonym for “objective”, “ontic”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
151
+ page_content=' But OIT is not about such values, it is about epistemic reality, reality of knowledge about the possible outcome of measurement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
152
+ page_content=' Hence, in my opinion OIT can peacefully coexist with the Copen- hagen interpretation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
153
+ page_content=' But, as was stressed, OIT is a challenge for QBism which operates at the epistemic level of scientific description of quantum phenom- ena.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
154
+ page_content=' This is the good place to recall that QBism should be sharply separated from the Copenhagen interpretation, see again Fuchs and Schack [2]: “According to QBism, quantum mechanics can be applied to any physical system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
155
+ page_content=' QBism treats all physical systems in the same way, including atoms, beam splitters, Stern-Gerlach magnets, preparation devices, measurement apparatuses, all the way to living beings and other agents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
156
+ page_content=' In this, QBism differs crucially from various versions of the Copenhagen interpretation.” Acknowledgments This paper was written on the basis of the long discussions with Masanao Ozawa and I would like to thank him;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
157
+ page_content=' Arkady Plotnitsky told me a lot about the Copenhagen interpretation and Bohr’s views and I would like to thank him;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
158
+ page_content=' Christopher Fuchs ignited my inter- est to QBism at the second V¨axj¨o conference (in 2001) and I am sorry if this paper would disturb QBists;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
159
+ page_content=' I am also thankful to Harald Atmanspacher who introduced me into ontic-epistemic approach to scientific representation of natural phenomena.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
160
+ page_content=' 9 References [1] Fuchs, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
161
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
162
+ page_content=' and Schack, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
163
+ page_content=' (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
164
+ page_content=' A Quantum-Bayesian Route to Quantum-State Space, Found.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
165
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
166
+ page_content=' 41, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
167
+ page_content=' 345.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
168
+ page_content=' [2] Fuchs, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
169
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
170
+ page_content=' and Schack, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
171
+ page_content=' (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
172
+ page_content=' QBism and the Greeks: why a quantum state does not represent an element of physical reality, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
173
+ page_content=' Scr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
174
+ page_content=', 90, 015104.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
175
+ page_content=' [3] Fuchs, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
176
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
177
+ page_content=', Mermin, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
178
+ page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
179
+ page_content=' and Schack, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
180
+ page_content=' (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
181
+ page_content=' An In- troduction to QBism with an Application to the Locality of Quantum Mechanics, Am.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
182
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
183
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
184
+ page_content=' 82, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
185
+ page_content=' 749.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
186
+ page_content=' [4] DeBrota, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
187
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
188
+ page_content=', Fuchs, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
189
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
190
+ page_content=', Pienaar, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
191
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
192
+ page_content=', and Stacey, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
193
+ page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
194
+ page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
195
+ page_content=' Born’s rule as a quantum extension of Bayesian coher- ence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
196
+ page_content=' Physical Review A, 104(2), 022207.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
197
+ page_content=' [5] Khrennikov, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
198
+ page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
199
+ page_content=' External Observer Reflections on QBism, Its Possible Modifications, and Novel Applications, In: Quantum Foundations, STEAM-H: Science, Technology, Engi- neering, Agriculture, Mathematics & Health;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
200
+ page_content=' Khrennikov A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
201
+ page_content=' and Toni B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
202
+ page_content=' Eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
203
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
204
+ page_content=' Springer, Cham, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
205
+ page_content=' 93–118.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
206
+ page_content=' [6] Khrennikov, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
207
+ page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
208
+ page_content=' Towards better understanding QBism, Found.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
209
+ page_content=' Sc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
210
+ page_content=', 23 (1), 181–195.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
211
+ page_content=' [7] Khrennikov, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
212
+ page_content=' Reflections on Zeilinger–Brukner Information Interpretation of Quantum Mechanics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
213
+ page_content=' Found Phys 46, 836–844 (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
214
+ page_content=' [8] Ozawa, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
215
+ page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
216
+ page_content=' Intersubjectivity of outcomes of quantum measurements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
217
+ page_content=' https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
218
+ page_content='org/abs/1911.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
219
+ page_content='10893 [9] von Neuman, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
220
+ page_content=' (1955).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
221
+ page_content=' Mathematical foundations of quan- tum mechanics (Princeton Univ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
222
+ page_content=' Press, Princenton) [Originally published: Mathematische Grundlagen der Quanten-mechanik, Springer, Berlin, 1932].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
223
+ page_content=' [10] Ozawa, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
224
+ page_content=' (1984).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
225
+ page_content=' Quantum measuring processes for continuous observables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
226
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
227
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
228
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
229
+ page_content=' 25, 79–87 [11] Ozawa, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
230
+ page_content=' Soundness and completeness of quantum root-mean- square errors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
231
+ page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
232
+ page_content=' Inf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
233
+ page_content=' 5, Article number: 1 (2019) [12] Brukner, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
234
+ page_content=' On the quantum measurement problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
235
+ page_content=' https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
236
+ page_content='org/abs/1507.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
237
+ page_content='05255.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
238
+ page_content=' [13] Zeilinger, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
239
+ page_content=': A foundational principle for quantum mechanics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
240
+ page_content=' Found.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
241
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
242
+ page_content=' 29, 31–641 (1999) [14] Brukner, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
243
+ page_content=', Zeilinger, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
244
+ page_content=': Malus’ law and quantum informa- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
245
+ page_content=' Acta Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
246
+ page_content=' Slovava 49, 647–652 (1999) 10 [15] Brukner, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
247
+ page_content=', Zeilinger, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
248
+ page_content=': Information invariance and quantum probabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
249
+ page_content=' Found.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
250
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
251
+ page_content=' 39, 677 (2009) [16] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
252
+ page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
253
+ page_content=' Helstrom, Quantum Detection and Estimation Theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
254
+ page_content=' Academic, New York, 1976.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
255
+ page_content=' [17] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
256
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
257
+ page_content=' Holevo, Probabilistic and Statistical Aspects of Quantum Theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
258
+ page_content=' North-Holland, Amsterdam, 1982.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
259
+ page_content=' [18] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
260
+ page_content=' Ozawa, Optimal measurements for general quantum systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
261
+ page_content=' Rep.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
262
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
263
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
264
+ page_content=' 18, 1980, 11–28 [19] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
265
+ page_content=' Ozawa, Realization of measurement and the standard quantum limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
266
+ page_content=' Squeezed and Nonclassical Light, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
267
+ page_content=' Tombesi and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
268
+ page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
269
+ page_content=' Pike, Plenum, New York, 1989, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
270
+ page_content=' 263–286, arXiv:1505.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
271
+ page_content='01083 [quant-ph].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
272
+ page_content=' [20] Plotnitsky, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
273
+ page_content=' (2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
274
+ page_content=' Niels Bohr and complementarity: An in- troduction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
275
+ page_content=' Berlin and New York: Springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
276
+ page_content=' [21] Bohr, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
277
+ page_content=': (1987).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
278
+ page_content=' The Philosophical Writings of Niels Bohr, 3 vols.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
279
+ page_content=' (Ox Bow Press, Woodbridge, CT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
280
+ page_content=' [22] Schr¨odinger, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
281
+ page_content=' The present situation in quantum mechanics: A translation of Schr¨odinger’s “Cat Paradox” paper (by: J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
282
+ page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
283
+ page_content=' Trimmer).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
284
+ page_content=' Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
285
+ page_content=' Am.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
286
+ page_content=' Philos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
287
+ page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
288
+ page_content=' 124, 323–338 (1980).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
289
+ page_content=' [Orig- inally published: Die gegenw¨artige Situation in der Quanten- mechanik, Naturwissenschaften 23, 807–812, 823–828, 844– 849 (1935)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
290
+ page_content=' [23] Atmanspacher, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
291
+ page_content=' (1994).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
292
+ page_content=' Is the ontic/epistemic-distinction suf- ficient to describe quantum systems exhaustively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
293
+ page_content=' In: Sympo- sium on the Foundations of Modern Physics (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
294
+ page_content=' 15-32).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
295
+ page_content=' [24] Atmanspacher, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
296
+ page_content=' and Primas, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
297
+ page_content=' (2003).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
298
+ page_content=' Epistemic and ontic quantum realities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
299
+ page_content=' In: Time, quantum and information (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
300
+ page_content=' 301- 321).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
301
+ page_content=' Springer, Berlin, Heidelberg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
302
+ page_content=' [25] Khrennikov, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
303
+ page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
304
+ page_content=' Quantum epistemology from subquan- tum ontology: Quantum mechanics from theory of classical ran- dom fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
305
+ page_content=' Annals of Physics, 377, 147-163.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
306
+ page_content=' 11' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtE2T4oBgHgl3EQfoAiM/content/2301.04014v1.pdf'}
D9E1T4oBgHgl3EQfqQU2/content/2301.03340v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:967d1f5b0f358d417e5bb982164c4fc3368d9529c4862edff44e7167851c4351
3
+ size 1593177
D9E1T4oBgHgl3EQfqQU2/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa38f2b21ac3be948b681e2150181e7be72771b10d2c5a40b65b0e61ea3523f5
3
+ size 1835053
ENE1T4oBgHgl3EQfqQWR/content/2301.03341v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d13830f074ea08fe920284b26d0e55e275d36556dd1241826be4c52c543968fa
3
+ size 473042
ENE1T4oBgHgl3EQfqQWR/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d05f374fba8930c51ade845eb7cd86e87f05b2ebe4d0eadfc8d0ba47315f686
3
+ size 3145773
ENE1T4oBgHgl3EQfqQWR/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05c57a8365e35acfe7f917794a0d1a5f4d0786844bb60171e39f86bd160b9045
3
+ size 109271