jackkuo commited on
Commit
f6ab0ba
·
verified ·
1 Parent(s): 218b359

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. -9FQT4oBgHgl3EQf7Ta5/content/tmp_files/2301.13442v1.pdf.txt +2837 -0
  2. -9FQT4oBgHgl3EQf7Ta5/content/tmp_files/load_file.txt +0 -0
  3. -NFLT4oBgHgl3EQfuy_h/content/tmp_files/2301.12157v1.pdf.txt +476 -0
  4. -NFLT4oBgHgl3EQfuy_h/content/tmp_files/load_file.txt +410 -0
  5. -dE2T4oBgHgl3EQfQgYx/vector_store/index.faiss +3 -0
  6. .gitattributes +69 -0
  7. 09E3T4oBgHgl3EQfnArs/content/tmp_files/2301.04622v1.pdf.txt +1653 -0
  8. 09E3T4oBgHgl3EQfnArs/content/tmp_files/load_file.txt +0 -0
  9. 0NAyT4oBgHgl3EQfbfc0/content/tmp_files/2301.00262v1.pdf.txt +2919 -0
  10. 0NAyT4oBgHgl3EQfbfc0/content/tmp_files/load_file.txt +0 -0
  11. 19AyT4oBgHgl3EQfbvd_/content/2301.00269v1.pdf +3 -0
  12. 19AyT4oBgHgl3EQfbvd_/vector_store/index.faiss +3 -0
  13. 19AyT4oBgHgl3EQfbvd_/vector_store/index.pkl +3 -0
  14. 29E0T4oBgHgl3EQfuwF7/content/tmp_files/2301.02609v1.pdf.txt +819 -0
  15. 29E0T4oBgHgl3EQfuwF7/content/tmp_files/load_file.txt +0 -0
  16. 49E0T4oBgHgl3EQfvgE1/content/tmp_files/2301.02618v1.pdf.txt +0 -0
  17. 49E0T4oBgHgl3EQfvgE1/content/tmp_files/load_file.txt +0 -0
  18. 4NA0T4oBgHgl3EQfNf9p/content/2301.02147v1.pdf +3 -0
  19. 4NA0T4oBgHgl3EQfNf9p/vector_store/index.faiss +3 -0
  20. 4NA0T4oBgHgl3EQfNf9p/vector_store/index.pkl +3 -0
  21. 59E0T4oBgHgl3EQfewCK/content/tmp_files/2301.02395v1.pdf.txt +0 -0
  22. 59E0T4oBgHgl3EQfewCK/content/tmp_files/load_file.txt +0 -0
  23. 7dE1T4oBgHgl3EQf7QV5/content/2301.03532v1.pdf +3 -0
  24. 7dE1T4oBgHgl3EQf7QV5/vector_store/index.pkl +3 -0
  25. 8dFQT4oBgHgl3EQf4jbF/vector_store/index.pkl +3 -0
  26. 99AyT4oBgHgl3EQfRPYW/content/tmp_files/2301.00060v1.pdf.txt +1119 -0
  27. 99AyT4oBgHgl3EQfRPYW/content/tmp_files/load_file.txt +0 -0
  28. 9NAzT4oBgHgl3EQfSft5/content/tmp_files/2301.01233v1.pdf.txt +1415 -0
  29. 9NAzT4oBgHgl3EQfSft5/content/tmp_files/load_file.txt +0 -0
  30. 9NFRT4oBgHgl3EQfqTc6/content/tmp_files/2301.13616v1.pdf.txt +1725 -0
  31. 9NFRT4oBgHgl3EQfqTc6/content/tmp_files/load_file.txt +0 -0
  32. ANE0T4oBgHgl3EQfPgBb/vector_store/index.faiss +3 -0
  33. ANE0T4oBgHgl3EQfPgBb/vector_store/index.pkl +3 -0
  34. AdAyT4oBgHgl3EQf3_pa/content/2301.00778v1.pdf +3 -0
  35. AdAyT4oBgHgl3EQf3_pa/vector_store/index.pkl +3 -0
  36. B9E5T4oBgHgl3EQfTg8R/content/2301.05536v1.pdf +3 -0
  37. B9E5T4oBgHgl3EQfTg8R/vector_store/index.pkl +3 -0
  38. BdFIT4oBgHgl3EQf_ywO/content/tmp_files/load_file.txt +373 -0
  39. BtAzT4oBgHgl3EQfTfyw/content/2301.01251v1.pdf +3 -0
  40. BtAzT4oBgHgl3EQfTfyw/vector_store/index.faiss +3 -0
  41. C9E2T4oBgHgl3EQfSAeL/content/tmp_files/2301.03788v1.pdf.txt +1595 -0
  42. C9E2T4oBgHgl3EQfSAeL/content/tmp_files/load_file.txt +0 -0
  43. D9A0T4oBgHgl3EQfAv_u/vector_store/index.faiss +3 -0
  44. DdAyT4oBgHgl3EQf4vob/vector_store/index.faiss +3 -0
  45. ENAyT4oBgHgl3EQf4vqx/content/tmp_files/2301.00793v1.pdf.txt +0 -0
  46. ENAyT4oBgHgl3EQf4vqx/content/tmp_files/load_file.txt +0 -0
  47. EdFRT4oBgHgl3EQfyTjG/content/tmp_files/2301.13645v1.pdf.txt +1073 -0
  48. EdFRT4oBgHgl3EQfyTjG/content/tmp_files/load_file.txt +352 -0
  49. FNAzT4oBgHgl3EQfG_ve/content/2301.01039v1.pdf +3 -0
  50. FNAzT4oBgHgl3EQfG_ve/vector_store/index.faiss +3 -0
-9FQT4oBgHgl3EQf7Ta5/content/tmp_files/2301.13442v1.pdf.txt ADDED
@@ -0,0 +1,2837 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Scaling laws for single-agent reinforcement learning
2
+ Jacob Hilton
3
+ OpenAI
4
5
+ Jie Tang
6
+ OpenAI
7
8
+ John Schulman
9
+ OpenAI
10
11
+ Abstract
12
+ Recent work has shown that, in generative modeling, cross-entropy loss improves
13
+ smoothly with model size and training compute, following a power law plus
14
+ constant scaling law. One challenge in extending these results to reinforcement
15
+ learning is that the main performance objective of interest, mean episode return,
16
+ need not vary smoothly. To overcome this, we introduce intrinsic performance,
17
+ a monotonic function of the return defined as the minimum compute required to
18
+ achieve the given return across a family of models of different sizes. We find that,
19
+ across a range of environments, intrinsic performance scales as a power law in
20
+ model size and environment interactions. Consequently, as in generative modeling,
21
+ the optimal model size scales as a power law in the training compute budget.
22
+ Furthermore, we study how this relationship varies with the environment and with
23
+ other properties of the training setup. In particular, using a toy MNIST-based
24
+ environment, we show that varying the “horizon length” of the task mostly changes
25
+ the coefficient but not the exponent of this relationship.
26
+ 1
27
+ Introduction
28
+ Recent studies of how neural network performance varies with model size and training compute have
29
+ found these relationships to be governed by smooth power laws [Kaplan et al., 2020, Henighan et al.,
30
+ 2020, Droppo and Elibol, 2021, Ghorbani et al., 2021]. These studies have focused primarily on
31
+ generative modeling, in which the training objective is cross-entropy loss, and have found test loss to
32
+ scale smoothly. In this work we seek to extend these results to reinforcement learning, in which there
33
+ is generally no cross-entropy loss.
34
+ In some reinforcement learning environments, there is still a performance metric that varies smoothly.
35
+ For example, in competitive games, it is often possible to assign Elo ratings to players such that
36
+ scaled differences in Elo ratings give approximate logit probabilities of victory. Recently it has been
37
+ shown that, in the board games Hex [Jones, 2021], Connect Four and Pentago [Neumann and Gros,
38
+ 2022], the exponentiated Elo rating of a policy trained using AlphaZero [Silver et al., 2018] follows a
39
+ power law in training compute (within a certain Elo range). We call metrics that follow such simple
40
+ relationships natural performance metrics.
41
+ However, in other reinforcement learning environments, there may be no obvious natural performance
42
+ metric. For example, there may be no reason to expect the number of objects collected in a video
43
+ game to vary smoothly, since crossing some threshold may require some challenging new capability.
44
+ To overcome this difficulty, we introduce intrinsic performance, which is defined to be equal to
45
+ training compute on the compute-efficient frontier of the tradeoff between model size and environment
46
+ interactions. This causes the relationship between performance and training compute to follow a
47
+ power law by definition, thereby making it possible to study the remaining relationships between
48
+ performance, model size and environment interactions.
49
+ We study these relationships across a range of environments: the easy and hard modes of environments
50
+ from Procgen Benchmark [Cobbe et al., 2020]; a 1v1 version of Dota 2 [OpenAI et al., 2019]; and a toy
51
+ environment based on MNIST [LeCun, 1998] for which we vary the “horizon length”. Across these
52
+ arXiv:2301.13442v1 [cs.LG] 31 Jan 2023
53
+
54
+ environments, we find intrinsic performance to scale as a power law in model size and environment
55
+ interactions, in much the same way as the analogous quantities in generative modeling.
56
+ One consequence of this scaling law is that, as in generative modeling, the optimal model size for
57
+ a given training compute budget follows a power law. We study in detail how the coefficient and
58
+ exponent of this relationship vary with properties of the training setup, including: the difficulty mode
59
+ of environment, for Procgen; the horizon length of the task, for the MNIST-based environment; the
60
+ period of training used to fit the power law; and whether the width or depth of the model is scaled.
61
+ Contents
62
+ 1
63
+ Introduction
64
+ 1
65
+ 2
66
+ Scaling laws without cross-entropy loss
67
+ 3
68
+ 2.1
69
+ Intrinsic performance . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
70
+ 3
71
+ 2.2
72
+ The power law for intrinsic performance . . . . . . . . . . . . . . . . . . . . . . .
73
+ 4
74
+ 2.3
75
+ Optimal model size vs compute . . . . . . . . . . . . . . . . . . . . . . . . . . . .
76
+ 4
77
+ 3
78
+ Experimental setup
79
+ 5
80
+ 4
81
+ Results
82
+ 7
83
+ 4.1
84
+ Optimal model size vs compute . . . . . . . . . . . . . . . . . . . . . . . . . . . .
85
+ 8
86
+ 4.2
87
+ Effect of task horizon length . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
88
+ 9
89
+ 4.3
90
+ Variability of exponents over training
91
+ . . . . . . . . . . . . . . . . . . . . . . . .
92
+ 10
93
+ 4.4
94
+ Scaling depth . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
95
+ 11
96
+ 4.5
97
+ Natural performance metrics . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
98
+ 12
99
+ 5
100
+ Discussion
101
+ 13
102
+ 5.1
103
+ Extrapolating sample efficiency . . . . . . . . . . . . . . . . . . . . . . . . . . . .
104
+ 13
105
+ 5.2
106
+ Cost-efficient reinforcement learning . . . . . . . . . . . . . . . . . . . . . . . . .
107
+ 14
108
+ 5.3
109
+ Limitations
110
+ . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
111
+ 15
112
+ 5.4
113
+ Forecasting compute requirements . . . . . . . . . . . . . . . . . . . . . . . . . .
114
+ 15
115
+ 6
116
+ Conclusion
117
+ 16
118
+ A Curve-fitting methodology
119
+ 19
120
+ B
121
+ Hyperparameters
122
+ 21
123
+ C Results in full
124
+ 24
125
+ D Parameter and FLOP calculations
126
+ 27
127
+ E
128
+ Fitted constants
129
+ 28
130
+ F
131
+ Proof of the lemma
132
+ 32
133
+ G Proof sketch of the proposition
134
+ 33
135
+ 2
136
+
137
+ 1014
138
+ 1015
139
+ 1016
140
+ 1017
141
+ 1018
142
+ Compute (FLOPs)
143
+ 5
144
+ 10
145
+ 15
146
+ 20
147
+ 25
148
+ 30
149
+ Mean episode return
150
+ StarPilot, hard
151
+ (a) Using the usual metric of mean episode return.
152
+ 1014
153
+ 1015
154
+ 1016
155
+ 1017
156
+ 1018
157
+ Compute (FLOPs)
158
+ 1014
159
+ 1015
160
+ 1016
161
+ 1017
162
+ 1018
163
+ Intrinsic performance (FLOPs)
164
+ Parameters
165
+ 104.3
166
+ 104.6
167
+ 104.9
168
+ 105.2
169
+ 105.5
170
+ 105.8
171
+ 106.1
172
+ 106.4
173
+ 106.7
174
+ 107.0
175
+ StarPilot, hard
176
+ (b) Using intrinsic performance instead.
177
+ Figure 1: Learning curves as a function of total training compute for StarPilot, an environment from
178
+ Procgen Benchmark, using CNNs of different widths. Mean ±1 sample standard deviation over three
179
+ seeds shown.
180
+ 2
181
+ Scaling laws without cross-entropy loss
182
+ 2.1
183
+ Intrinsic performance
184
+ In generative modeling, cross-entropy test loss scales smoothly with training compute, following a
185
+ power law plus constant scaling law [Henighan et al., 2020]. However, in reinforcement learning
186
+ (RL), there is generally no cross-entropy loss, and the usual objective of mean episode return need
187
+ not scale so smoothly.
188
+ For example, consider StarPilot, a side-scrolling shooter from Procgen Benchmark [Cobbe et al.,
189
+ 2020]. The agent receives a reward of 1 for destroying each enemy, and the episode continues until
190
+ either the agent is destroyed, or the agent reaches the end of the level and obtains a bonus reward
191
+ of 10. There is no reason to expect mean episode return in this game to scale smoothly. Indeed, it
192
+ takes some ability with aiming and dodging to reach a mean episode return of 5 or 10, but not much
193
+ additional skill to reach a mean episode return of 15 or 20. This irregular difficulty profile is reflected
194
+ in the uneven shape of learning curves for this environment (see Figure 1(a)).
195
+ It may be tempting to conclude that the scaling law methodology cannot be applied to such an
196
+ environment. However, in generative modeling, there are smooth scaling laws that do not depend on
197
+ test loss per se. For example, the model size that achieves the minimum test loss for a given compute
198
+ budget scales as a power law with compute. In order to study such relationships in the context of
199
+ RL, we would like a performance metric that behaves like test loss, i.e., some monotonic function
200
+ of the return that scales as a power law with compute. We achieve this with our notion of intrinsic
201
+ performance by simply using compute itself as our performance metric.
202
+ Definition. A scalable model family is collection of models trained in a uniform way, parameterized
203
+ by the model size and the total compute used in training. Given a scalable model family, the intrinsic
204
+ performance of an arbitrary policy is the minimum compute required to train a model of any size in
205
+ the family to reach the same return (averaged over random seeds).
206
+ Another way of explaining this definition is to consider learning curves as a function of compute
207
+ for a family of models of different sizes, as in Figure 1. The maximum performance over all model
208
+ sizes defines the compute-efficient frontier. When using the usual metric of mean episode return (as
209
+ in Figure 1(a)), the compute-efficient frontier need not follow any particular trend. However, when
210
+ using intrinsic performance instead (as in Figure 1(b)), the efficient frontier is mapped onto the line
211
+ 3
212
+
213
+ 1014
214
+ 1015
215
+ 1016
216
+ 1017
217
+ 1018
218
+ Compute (FLOPs)
219
+ 5
220
+ 10
221
+ 15
222
+ 20
223
+ 25
224
+ 30
225
+ Mean episode return
226
+ StarPilot, hard
227
+ (a) Using mean episode return.
228
+ 1014
229
+ 1015
230
+ 1016
231
+ 1017
232
+ 1018
233
+ Compute (FLOPs)
234
+ 1014
235
+ 1015
236
+ 1016
237
+ 1017
238
+ 1018
239
+ Intrinsic performance (FLOPs)
240
+ Parameters
241
+ 104.3
242
+ 104.6
243
+ 104.9
244
+ 105.2
245
+ 105.5
246
+ 105.8
247
+ 106.1
248
+ 106.4
249
+ 106.7
250
+ 107.0
251
+ Learning
252
+ curve
253
+ Power law
254
+ fit
255
+ Power law
256
+ asymptote
257
+ Efficient
258
+ frontier
259
+ Efficient
260
+ points
261
+ StarPilot, hard
262
+ (b) Using intrinsic performance.
263
+ Figure 2: Learning curves as a function of total training compute for StarPilot, together with their
264
+ power law fits. The asymptotes show the E → ∞ limits of the power law fits, representing the
265
+ predicted performance at convergence. The efficient points show where the power law fits are tangent
266
+ to the efficient frontier. Mean over three seeds shown.
267
+ y = x by definition. This reveals the regularity of the learning curves, which, as we shall see next,
268
+ now follow a power law trend.
269
+ We describe in detail how we compute intrinsic performance in Appendix A.
270
+ 2.2
271
+ The power law for intrinsic performance
272
+ Our main empirical result is that intrinsic performance I scales approximately as a power law with
273
+ model parameters N and environment interactions E,
274
+ I−β =
275
+ �Nc
276
+ N
277
+ �αN
278
+ +
279
+ �Ec
280
+ E
281
+ �αE
282
+ ,
283
+ (1)
284
+ where αN, αE, β, Nc and Ec are positive constants.
285
+ This is essentially the same as the corresponding scaling law for language models [Kaplan et al.,
286
+ 2020, equation (1.6)], but with test loss replaced by I−β. Although it appears that we have introduced
287
+ an additional exponent β, the intrinsic definition of I means that β is actually determined by αN and
288
+ αE (see Lemma 1).
289
+ The intuition behind this equation is that, when the number of interactions is not bottlenecked
290
+ (E → ∞), I scales as a power law in N, and when model size is not bottlenecked (N → ∞), I
291
+ scales as a power law in E.
292
+ 2.3
293
+ Optimal model size vs compute
294
+ An important implication of equation (1) is that the optimal model size for a given compute budget
295
+ scales as a power law in that compute budget.
296
+ More precisely, we assume that total training compute is proportional to NE (ignoring the compute
297
+ required to run the environment, at least for now). Hence, for a given compute budget, there is a
298
+ trade-off between N and E (the optimum of which defines a point on the compute-efficient frontier).
299
+ What we will now show is that, under equation (1), the optimal value of N scales as a power law in
300
+ the compute budget, with an exponent that we will specify.
301
+ 4
302
+
303
+ Since training compute is proportional to NE, for convenience we choose units of compute such
304
+ that training compute equals NE exactly (although in plots we will continue to display compute in
305
+ FLOPs). This implies that I = NE along the compute-efficient frontier.
306
+ Lemma 1. If I satisfies equation (1) and I = NE along the compute-efficient frontier, then the
307
+ compute-efficient frontier is described by the equation
308
+ αN
309
+ �Nc
310
+ N
311
+ �αN
312
+ = αE
313
+ �Ec
314
+ E
315
+ �αE
316
+ .
317
+ (2)
318
+ Moreover, once αN and αE are chosen, β and NcEc are determined:
319
+ 1
320
+ β =
321
+ 1
322
+ αN
323
+ + 1
324
+ αE
325
+ and
326
+ 1
327
+ NcEc
328
+ =
329
+
330
+ 1 + αN
331
+ αE
332
+
333
+ 1
334
+ αN �
335
+ 1 + αE
336
+ αN
337
+
338
+ 1
339
+ αE .
340
+ For a proof, see Appendix F.
341
+ Substituting equation (2) into equation (1), it follows that along the compute-efficient frontier,
342
+ N = Nc
343
+
344
+ 1 + αN
345
+ αE
346
+
347
+ 1
348
+ αN C
349
+ 1
350
+ 1+ αN
351
+ αE ,
352
+ where C := NE. In other words, for a given compute budget C, the optimal model size N scales as
353
+ N ∝ C
354
+ 1
355
+ 1+ αN
356
+ αE .
357
+ 3
358
+ Experimental setup
359
+ We ran experiments using variety of RL environments:
360
+ • Procgen Benchmark [Cobbe et al., 2020]: CoinRun, StarPilot and FruitBot in both easy
361
+ and hard modes, separately varying CNN width and depth.
362
+ • Dota 2 [OpenAI et al., 2019]: a 1v1 version of the game, varying LSTM size.
363
+ • MNIST: an RL environment in which the agent has to correctly label a handwritten digit
364
+ from MNIST [LeCun, 1998], using hyperparameters to artificially alter the “horizon length”
365
+ of the task, varying CNN width.
366
+ All our experiments used a variant of either the PPO algorithm [Schulman et al., 2017] or its close
367
+ cousin PPG [Cobbe et al., 2021], along with the Adam optimization algorithm [Kingma and Ba,
368
+ 2014].
369
+ The remainder of this section discusses further details of our experimental setup. Hyperparameters
370
+ for all our experiments are given in Appendix B.
371
+ 3.1
372
+ Procgen Benchmark
373
+ For our Procgen Benchmark experiments, we used CoinRun, StarPilot and FruitBot. We chose these
374
+ environments because they have lower-variance learning curves than other Procgen environments,
375
+ and because CoinRun’s binary reward enabled us to study the scaling of natural performance metrics
376
+ (see Section 4.5). We used both the easy and hard difficulty modes of these environments to see if
377
+ this would have an effect on the scaling constants.
378
+ We used PPG-EWMA [Hilton et al., 2021] with a fixed KL penalty objective [Cobbe et al., 2021],
379
+ and trained for 200 million environment interactions.
380
+ We used the CNN architecture from IMPALA [Espeholt et al., 2018] and conducted both width-
381
+ scaling and depth-scaling experiments. For our width-scaling experiments, we varied the total number
382
+ of parameters from
383
+ 1
384
+ 64 of the default to 8 times the default, rounding to integer numbers of channels.
385
+ For our depth-scaling experiments, we varied the number of residual blocks per stack from 1 to 64,
386
+ and used 1
387
+ 4 of the default width since the default number of residual blocks per stack was only 2.
388
+ 5
389
+
390
+ 3.2
391
+ Dota 2
392
+ For our Dota 2 experiments, we used a 1v1 version of the game to save computational expense.
393
+ Following OpenAI et al. [2019], we used PPO, but we adjusted the asynchronous setup to ensure that
394
+ training used only on-policy data with no data reuse. We used 8 parallel GPU workers and trained for
395
+ between 13.6 billion and 82.6 billion environment interactions.
396
+ We used an LSTM architecture and varied the width of the network, with the sizes of the embedding
397
+ and hidden state varying from 8 to 4096.
398
+ 3.3
399
+ MNIST
400
+ Our MNIST environment samples a handwritten digit from the MNIST training set uniformly and
401
+ independently random at each timestep, and provides an immediate reward of 1 for a correct label
402
+ and 0 for an incorrect label. There are no episode boundaries, and so we measure mean training
403
+ accuracy instead of mean episode return.
404
+ The use of immediate rewards with no episode boundaries allows the horizon length of the task
405
+ to be artificially controlled by varying the hyperparameters of our method advantage estimation,
406
+ GAE [Schulman et al., 2015]. First, we set the GAE credit assignment parameter λ to 1, so that the
407
+ algorithm assigns credit for each reward to all previous actions, instead of assigning more immediate
408
+ credit. Then we vary the GAE discount rate γ, so that the algorithm discounts future rewards at this
409
+ rate. In separate experiments, we set γ = 1 −
410
+ 2
411
+ h+1 for different values of the “horizon length” h
412
+ ranging from 1 to 256. (This equation is equivalent to saying that an exponentially-weighted moving
413
+ average with decay parameter γ has the same center of mass as the interval [0, h − 1].)
414
+ We used PPO-EWMA [Hilton et al., 2021] with rollouts of length 512 (twice as long as our maximum
415
+ value of h), and trained for 225 environment interactions.
416
+ We used a simple CNN architecture with ReLU activations and the following layers: a 5 × 5
417
+ convolutional layer with 40 channels, 2×2 max pooling, a 3×3 convolutional layer with 80 channels,
418
+ 2 × 2 max pooling, and a dense layer with 1,000 channels. We scaled the width of this network by
419
+ varying total number of parameters from
420
+ 1
421
+ 64 of the default to 8 times the default. We used separate
422
+ policy and value function networks because we did not expect there to be much transfer between the
423
+ two objectives, since the environment samples digits independently.
424
+ 3.4
425
+ Learning rates
426
+ Although we would not expect our qualitative results to change much, our quantitative results
427
+ such as scaling exponents depend crucially on using well-tuned hyperparameters. By far the most
428
+ important hyperparameter to tune in our setup is the Adam learning rate, whose optimal value can
429
+ vary substantially with model size and compute budget.
430
+ When varying model size, we found that a good heuristic is to keep the Adam learning rate propor-
431
+ tional to the initialization scale. For our width-scaling experiments, this means keeping the Adam
432
+ learning rate proportional to 1/
433
+
434
+ width, since we use Kaiming He initialization [He et al., 2015]. For
435
+ our Procgen depth-scaling experiments, which use a residual network, it means keeping the Adam
436
+ learning rate proportional to 1/
437
+
438
+ depth
439
+ 1
440
+ L , where L is the number of layers per residual block (L = 2
441
+ in our case), since we use an initialization similar to Fixup initialization [Zhang et al., 2019]. For
442
+ Procgen and MNIST, we tuned the learning rate at one model size and followed this heuristic to select
443
+ the learning rate for the other model sizes. For Dota 2, we tuned the learning rate separately for each
444
+ model size, but this amounted to following approximately the same heuristic.
445
+ When varying the compute budget for a given model size, it can actually be necessary to use separate
446
+ training runs for each compute budget, each with its own learning rate schedule, rather than taking
447
+ different snapshots at different points of the same training run [Hoffmann et al., 2022]. Unfortunately,
448
+ due to the challenge of carefully tuning learning rate schedules for RL and the expense of multiplying
449
+ the number of training runs, we took the latter approach. To mitigate the impact of this, we found a
450
+ learning rate schedule that seemed to work well for a variety of compute budgets, which we explain
451
+ in Appendix B.1. Nevertheless, the values of our scaling exponents should be considered uncertain
452
+ because of this.
453
+ 6
454
+
455
+ 0.2
456
+ 0.3
457
+ 0.4
458
+ 0.5
459
+ 0.6
460
+ 0.7
461
+ 0.8
462
+ αN
463
+ 0.3
464
+ 0.4
465
+ 0.5
466
+ 0.6
467
+ 0.7
468
+ 0.8
469
+ 0.9
470
+ 1.0
471
+ αE
472
+ 1
473
+ 1 + αN/αE
474
+ = 0.8
475
+ 1
476
+ 1 + αN/αE
477
+ = 0.7
478
+ 1
479
+ 1 + αN/αE
480
+ = 0.6
481
+ Procgen (width)
482
+ CoinRun
483
+ StarPilot
484
+ FruitBot
485
+ Easy, single seed
486
+ Easy, mean return
487
+ Hard, single seed
488
+ Hard, mean return
489
+ Dota 2
490
+ 1v1
491
+ Reference
492
+ αN/αE = const
493
+ MNIST horizons
494
+ 1
495
+ 2
496
+ 4
497
+ 8
498
+ 16
499
+ 32
500
+ 64
501
+ 128
502
+ 192
503
+ 256
504
+ αN vs αE
505
+ Figure 3: Fitted values of αN and αE. For Procgen, we also show the values fitted using each of
506
+ the 3 random seeds, to show the variation due to the choice of random seed. The dotted lines show
507
+ contours for
508
+ 1
509
+ 1+αN/αE , the exponent for the scaling of optimal model size with compute.
510
+ 4
511
+ Results
512
+ Our main result is that our power law for intrinsic performance, equation (1), holds across envi-
513
+ ronments and model sizes, at least after an initial transient period of training (which we discuss in
514
+ more detail in Section 4.3). This result is supported by the closeness of the power law fit to our
515
+ learning curves, as shown in Figure 2 for StarPilot and in Appendix C for all our environments. Our
516
+ methodology for fitting this power law is described in Appendix A.
517
+ It is interesting to study the sensitivity of the exponents αN and αE, which govern the scaling
518
+ behavior of I with N and E (and determine the other exponents of interest). The fitted values of
519
+ these exponents for the different environments are shown in Figure 3. The numerical values of all of
520
+ the fitted constants may be found in Appendix E.
521
+ Although our measurements of these exponents are uncertain, due to the limitations discussed in
522
+ Section 5.3, we make a number of observations:
523
+ • The primary determinant of αN and αE is the domain (Procgen, Dota 2, or MNIST), which
524
+ we expect is a consequence of the fact that so many experimental details are shared within
525
+ each domain.
526
+ • Within MNIST, increasing the horizon seems to lower αE, but as we explain in Section 4.2,
527
+ this effect is confounded by a measurement problem caused by under-training.
528
+ • Within Procgen, the easy and hard modes of each Procgen game tend to have closer
529
+ exponents to one another than to other Procgen games. We believe that this is because
530
+ identifying visual features is a core part of Procgen, and the two modes of each game have
531
+ very similar observation distributions.
532
+ 7
533
+
534
+ 10−7
535
+ 10−6
536
+ 10−5
537
+ 10−4
538
+ 10−3
539
+ 10−2
540
+ Compute (PF-days)
541
+ 103
542
+ 104
543
+ 105
544
+ 106
545
+ 107
546
+ Parameters
547
+ Procgen (width)
548
+ CoinRun
549
+ StarPilot
550
+ FruitBot
551
+ Easy
552
+ Hard
553
+ Dota 2
554
+ 1v1
555
+ Generative modeling
556
+ Language
557
+ (Hoffmann et al.)
558
+ Language
559
+ (Kaplan et al.)
560
+ Image 32x32
561
+ (Henighan et al.)
562
+ MNIST horizons
563
+ 1
564
+ 2
565
+ 4
566
+ 8
567
+ 16
568
+ 32
569
+ 64
570
+ 128
571
+ 192
572
+ 256
573
+ Optimal model size vs compute
574
+ Figure 4: Optimal model size vs compute for all our environments. Note that the individual points,
575
+ which correspond to the sizes of models that we trained, are themselves obtained from a power law
576
+ best fit. Hence the fact that the lines pass through the points exactly is automatic and does not indicate
577
+ goodness of fit.
578
+ • The Procgen difficulty mode does not obviously have any particular effect on the scaling
579
+ exponents. We hypothesize that humans tend to judge a task as easier when a near-perfect
580
+ score can be achieved with less compute, even if it takes a lot of additional compute to eke
581
+ out the final few points. Conversely, it does not seem to matter to the RL algorithm exactly
582
+ how the score maps on to intrinsic performance (i.e., the compute required).
583
+ 4.1
584
+ Optimal model size vs compute
585
+ As explained in Section 2.3, our power law for intrinsic performance implies that, for a given compute
586
+ budget, the optimal model size scales as a power law with exponent
587
+ 1
588
+ 1+αN/αE .
589
+ Figure 4 shows these inferred relationships for our different environments, along with some generative
590
+ modeling relationships taken from the literature. The full equations for these relationships are
591
+ provided in Appendix E.
592
+ The exponent
593
+ 1
594
+ 1+αN/αE varied between around 0.40 and 0.65 for Procgen and 0.66 and 0.80 for
595
+ MNIST, and was around 0.76 for Dota 2. By comparison, the corresponding exponent for language
596
+ modeling, which was carefully measured by Hoffmann et al. [2022], is around 0.50. Previous work
597
+ by Kaplan et al. [2020] and Henighan et al. [2020] measured this exponent less carefully but using a
598
+ methodology that more closely matches our own, and found an exponent of around 0.73 for language
599
+ 0.65 for 32x32 images.
600
+ An intriguing conjecture, which is also suggested by theoretical considerations [Bahri et al., 2021],
601
+ is that the exponent of this relationship would be around 0.5 in every domain if it were measured
602
+ carefully enough (i.e., with optimal hyperparameters and enough random seeds). Given the limitations
603
+ of our experiments, we consider our results to be inconclusive on this question.
604
+ Nevertheless, it is clear that the scaling coefficient of this relationship varies significantly between
605
+ domains. With the exception of our toy MNIST environment, the optimal model size for RL for
606
+ 8
607
+
608
+ a given compute budget is consistently smaller than for generative modeling, in some cases by
609
+ multiple orders of magnitude. We believe that this is because RL tasks have a longer horizon length
610
+ than generative modeling in some sense, and explore this hypothesis with our MNIST environment
611
+ in Section 4.2. Another possibility is that the arithmetic intensity (i.e., the number of FLOPs per
612
+ parameter in a forward pass) of the architecture is a confounder, which we discuss in more depth in
613
+ Section 4.4.
614
+ 4.2
615
+ Effect of task horizon length
616
+ As explained in Section 3.3, for our MNIST experiments, we artificially altered the “horizon length”
617
+ of the task by setting the GAE credit assignment parameter λ to 1 and varying the GAE discount rate
618
+ γ.
619
+ The expected effect of varying γ in this context is given by the following theoretical result.
620
+ Proposition 1. Consider an MDP with independent timesteps (by which we mean that each st is
621
+ identically distributed and independent of st−1 and at−1, and episodes never terminate). Suppose we
622
+ train a model with parameters θ on this MDP using Vanilla Policy Gradient,1 estimating advantages
623
+ using GAE with γ = 1 −
624
+ 2
625
+ h+1 and λ = 1, and working with separate policy and value function
626
+ networks. Then the covariance matrix of the policy gradient is approximately
627
+ Σθ + Πθ
628
+
629
+ h + 1
630
+ h − 2
631
+
632
+ for some symmetric positive semi-definite matrices Σθ and Πθ that do not depend on h.
633
+ For a proof sketch, see Appendix G.
634
+ Intuitively, this result says that gradient variance may be decomposed into two pieces: one piece that
635
+ is inherent to the task (the Σθ term), and one piece that comes from imperfect credit assignment (the
636
+ Πθ term). For example, when h = 1 (i.e., γ = 0), credit is correctly assigned to the previous action
637
+ only, and hence the second term vanishes. Ignoring the 1
638
+ h term (since h ≥ 1), we may stylize this
639
+ result as: gradient variance is an affine function of h (i.e., a linear function with an intercept).
640
+ This can be directly translated into a statement about sample efficiency, since multiplying the gradient
641
+ variance by some factor c can be exactly compensated for by multiplying the batch size by c, which
642
+ multiplies the number of samples used by c. Hence in order to reach a given performance level,
643
+ the number of environment interactions required should be an affine function of h. This affine
644
+ function will come from integrating certain functionals of Σθ and Πθ over the course of training,
645
+ and will therefore depend both on the model architecture and on the choice of performance level.
646
+ To test this prediction, we looked at the number of environment interactions required to reach a
647
+ 1% failure rate (i.e., 99% training accuracy) on MNIST as a function of the horizon length h. Our
648
+ results are shown in Figure 5, along with affine fits. As expected, the number of interactions closely
649
+ follows an affine function of the horizon length, although the fit is less good for shorter horizons and
650
+ larger models. At very short horizons, the number of interactions even decreases with the horizon
651
+ length, suggesting a hyperparameter issue (perhaps a suboptimal learning rate schedule, or reward
652
+ normalization implicitly decreasing the KL penalty and entropy bonus).
653
+ The implication of this for our optimal model size vs compute scaling law is that once h becomes large
654
+ enough, further increasing h should lead to a proportional increase the compute budget corresponding
655
+ to each given optimal model size, without changing the scaling exponent of this relationship. This
656
+ is because the intercept term of the affine function will eventually become dominated by the term
657
+ involving h, and so the number of environment interactions required to reach a given performance
658
+ level will eventually scale approximately proportionally to h. (For small values of h, however, the
659
+ relationship between the two components of the covariance matrix of the policy gradient may have a
660
+ more complex dependence on model size.)
661
+ This effect is visible in Figure 4, where the main impact of increasing the horizon length is to shift
662
+ the optimal model size vs compute curve to the right. The curve also gets shallower as the horizon
663
+ 1Vanilla Policy Gradient is a primitive version of PPO, explained here: https://spinningup.openai.
664
+ com/en/latest/algorithms/vpg.html
665
+ 9
666
+
667
+ 0
668
+ 50
669
+ 100
670
+ 150
671
+ 200
672
+ 250
673
+ Horizon length h
674
+ 1
675
+ 2
676
+ 3
677
+ 4
678
+ 5
679
+ 6
680
+ Interactions
681
+ ×106
682
+ Parameters
683
+ 104.8
684
+ 105.1
685
+ 105.4
686
+ 105.7
687
+ 106.0
688
+ 106.3
689
+ 106.6
690
+ 106.9
691
+ 107.2
692
+ 107.5
693
+ Value
694
+ Affine fit
695
+ Interactions required to reach a 1% failure rate, MNIST
696
+ Figure 5: Sample efficiency for MNIST as a function of the horizon length h, for all our model sizes.
697
+ length is increased, but this effect is confounded by a measurement problem caused by under-training,
698
+ which we explain in more detail in Section 4.3.
699
+ Our MNIST environment is useful because our it allows us to vary the task horizon length in a fine-
700
+ grained, quantifiable way by varying γ. But our analysis of this environment relies on the assumption
701
+ of independent timesteps, which does not hold in most environments (and in particular removes the
702
+ need for exploration). Nevertheless, our results are suggestive of a more general explanation for the
703
+ large differences in optimal model size for a given compute budget between different environments:
704
+ that different environments have different task horizon lengths in a more general sense. We speculate
705
+ that, in this more general sense, task horizon length is influenced by how long rewards are delayed
706
+ for relative to the actions the agent is currently learning (which may increase throughout training as
707
+ the agent learns skills with feedback loops that are less and less tight), and that γ determines only an
708
+ upper bound on the task horizon length.
709
+ 4.3
710
+ Variability of exponents over training
711
+ Although our power law for intrinsic performance holds across environments and model sizes, we
712
+ only obtain a good fit by excluding an initial transient period of training. Put another way, the scaling
713
+ constants vary over the course of training.
714
+ This phenomenon is clearest with with our MNIST environment, since we were able to use many
715
+ random seeds to reduce variance. Recall that in this environment, the agent observes a randomly
716
+ sampled MNIST training set digit each timestep, and the horizon length of the task is artificially
717
+ controlled using the GAE discount rate γ, as explained in Section 3.3. We fitted our power law to
718
+ three different periods of training for this environment: an early period (216–219 interactions), a
719
+ middle period (219–222 interactions), and a late period (222–225 interactions).
720
+ Figure 6 shows the fitted values of αN and αE for these different periods of training. We found αE
721
+ to be significantly lower during the early and middle periods of training, especially for the shorter
722
+ horizon lengths.
723
+ In order to accurately measure the scaling constants for optimal model size vs compute, it is best to
724
+ use a period of training during which the learning curves reach the compute-efficient frontier, since
725
+ otherwise the measurement is an extrapolation. As shown in Figure 7, this is always in the late period
726
+ 10
727
+
728
+ 0.0
729
+ 0.1
730
+ 0.2
731
+ 0.3
732
+ 0.4
733
+ 0.5
734
+ 0.6
735
+ αN
736
+ 0.1
737
+ 0.2
738
+ 0.3
739
+ 0.4
740
+ 0.5
741
+ 0.6
742
+ 0.7
743
+ 0.8
744
+ 0.9
745
+ 1.0
746
+ αE
747
+ 1
748
+ 1 + αN/αE
749
+ = 0.9
750
+ 1
751
+ 1 + αN/αE
752
+ = 0.8
753
+ 1
754
+ 1 + αN/αE
755
+ = 0.7
756
+ MNIST periods
757
+ Early
758
+ Middle
759
+ Late
760
+ MNIST horizons
761
+ 1
762
+ 2
763
+ 4
764
+ 8
765
+ 16
766
+ 32
767
+ 64
768
+ 128
769
+ 192
770
+ 256
771
+ αN vs αE, MNIST
772
+ Figure 6: Fitted values of αN and αE for MNIST
773
+ with different horizons, using different periods of
774
+ training to fit the power laws. The horizon h is
775
+ defined by γ = 1 −
776
+ 2
777
+ h+1, where γ is the discount
778
+ rate.
779
+ 1013
780
+ 1014
781
+ 1015
782
+ 1016
783
+ 1014
784
+ 1016
785
+ MNIST, horizon 1, late period
786
+ Parameters
787
+ 104.8
788
+ 105.1
789
+ 105.4
790
+ 105.7
791
+ 106.0
792
+ 106.3
793
+ 106.6
794
+ 106.9
795
+ 107.2
796
+ 107.5
797
+ 1013
798
+ 1014
799
+ 1015
800
+ 1016
801
+ 1013
802
+ 1014
803
+ 1015
804
+ Intrinsic performance (FLOPs)
805
+ MNIST, horizon 256, late period
806
+ 1012
807
+ 1013
808
+ 1014
809
+ 1015
810
+ Compute (FLOPs)
811
+ 1012
812
+ 1013
813
+ MNIST, horizon 1, middle period
814
+ Learning
815
+ curve
816
+ Power law
817
+ fit
818
+ Efficient
819
+ frontier
820
+ Efficient
821
+ points
822
+ Figure 7: Learning curves as a function of total
823
+ training compute for MNIST, using different hori-
824
+ zons and different periods of training, together
825
+ with their power law fits. Mean over the middle-
826
+ performing 16 of 20 random seeds shown.
827
+ of training, if at all. For this reason, we use the late period of training for all of our results on MNIST
828
+ outside of this section.
829
+ Figure 7 also shows that, for the longer horizon lengths, the learning curves of the larger models
830
+ did not reach the compute-efficient frontier even during the late period of training. Hence our
831
+ measurements of
832
+ 1
833
+ 1+αN/αE , the exponent for the scaling of optimal model size with compute, are
834
+ likely underestimates for these longer horizon lengths.
835
+ For our other environments, we found that it was enough to exclude only the first
836
+ 1
837
+ 64 of training
838
+ in order for our power law for intrinsic performance to be a good fit around the compute-efficient
839
+ frontier. This is similar to what is needed for the corresponding law for language [Kaplan et al., 2020,
840
+ Figure 4, right]. Nevertheless, it is possible that the measurement problem identified in this section
841
+ affects some of our other results.
842
+ 4.4
843
+ Scaling depth
844
+ Most of our experiments involved scaling the width of our networks, but for Procgen, we also tried
845
+ scaling the depth, as explained in Section 3.1. We found that our power law for intrinsic performance
846
+ still held, but with more noise than the width-scaling experiments, as a consequence of using fewer
847
+ model sizes. The fitted values of αN and αE for the depth-scaling experiments lay in a similar region
848
+ to the width-scaling experiments, but there were no clear relationships between the depth-scaling
849
+ exponents for the different environments, nor between the width-scaling and depth-scaling exponents
850
+ 11
851
+
852
+ 10−5
853
+ 10−4
854
+ 10−3
855
+ 10−2
856
+ Compute (PF-days)
857
+ 104
858
+ 105
859
+ 106
860
+ Parameters
861
+ Optimal model size vs compute, Procgen
862
+ (a) Using parameters as the measure of model size.
863
+ 10−5
864
+ 10−4
865
+ 10−3
866
+ 10−2
867
+ Compute (PF-days)
868
+ 106
869
+ 107
870
+ 108
871
+ FLOPs per forward pass
872
+ Generative modeling
873
+ Language
874
+ (Hoffmann et al.)
875
+ Language
876
+ (Kaplan et al.)
877
+ Image 32x32
878
+ (Henighan et al.)
879
+ Procgen
880
+ CoinRun
881
+ StarPilot
882
+ FruitBot
883
+ Easy
884
+ Hard
885
+ Width
886
+ Depth (*)
887
+ Optimal model size vs compute, Procgen,
888
+ arithmetic intensity-adjusted
889
+ (b) Using FLOPs per forward pass instead of parameters.
890
+ Figure 8: Comparison of optimal model size vs compute for our Procgen width- and depth-scaling
891
+ experiments. (*) It is important to understand how parameters and FLOPs were counted to interpret
892
+ the depth-scaling results. This is explained in detail in Appendix D.
893
+ for a given environment. Plots of our results may be found in Appendix C, and the numerical values
894
+ of the fitted constants may be found in Appendix E.
895
+ The main difference between our width-scaling and depth-scaling results is that the optimal model
896
+ size for a given compute budget was significantly smaller for our depth-scaling experiments, but
897
+ this was an artifact of how we counted parameters and FLOPs. As explained in Appendix D, we
898
+ only included the part of the network being scaled in our parameter and FLOP calculations, which
899
+ meant excluding the final dense layer of the network for our depth-scaling experiments, but not our
900
+ width-scaling experiments. If this layer had been included in our depth-scaling calculations, it would
901
+ have accounted for between 16% and 90% of the parameters but only 2% or fewer of the FLOPs,
902
+ depending on the depth.
903
+ Interestingly, as shown in Figure 8, the optimal model size vs compute scaling laws for our width-
904
+ and depth-scaling experiments become much more similar if we measure model size using FLOPs
905
+ per forward pass rather than parameters. This is because excluding the final dense layer from the
906
+ parameter and FLOP calculations significantly increases the arithmetic intensity (i.e., FLOPs per
907
+ parameter in a forward pass) as calculated for the depth-scaling experiments. This suggests that,
908
+ when comparing models with very different arithmetic intensities, FLOPs per forward pass may
909
+ be a better measure of model size than parameters (or perhaps arithmetic intensity should even be
910
+ considered as an additional independent variable).
911
+ 4.5
912
+ Natural performance metrics
913
+ Although in general there may be no obvious performance metric that scales smoothly with model
914
+ parameters and environment interactions, motivating our use of intrinsic performance, there may still
915
+ be such a metric in some environments. We call such metrics natural performance metrics, and we
916
+ were able to find them in a couple of our environments:
917
+ • CoinRun: In the CoinRun environment from Procgen Benchmark, the episode return is
918
+ always either 10 or 0, corresponding to whether or the agent successfully collects the coin at
919
+ the end of the level. We found the fail-to-success ratio F := 10−R
920
+ R
921
+ , where R is the mean
922
+ episode return, to be a natural performance metric for CoinRun. This is similar to the failure
923
+ rate 1 − R
924
+ 10, since R is close to 10 for most of training, but provides a slightly better fit
925
+ early in training, since it does not have an upper bound of 1. Note that the logarithm of the
926
+ 12
927
+
928
+ 1014
929
+ 1015
930
+ 1016
931
+ 1017
932
+ 1018
933
+ Compute (FLOPs)
934
+ 10−2
935
+ 10−1
936
+ Fail-to-success ratio
937
+ Easy
938
+ Hard
939
+ Learning
940
+ curves
941
+ Power law
942
+ fitted to
943
+ I−β
944
+ (arbitrary
945
+ function
946
+ of ratio)
947
+ Fail-to-
948
+ success
949
+ ratio
950
+ CoinRun, efficient frontier fits
951
+ Figure 9: Comparison of the efficient frontier fits
952
+ for CoinRun, using intrinsic performance and the
953
+ fail-to-success ratio.
954
+ 1014
955
+ 1016
956
+ 1018
957
+ 1020
958
+ Compute (FLOPs)
959
+ −5
960
+ 0
961
+ 5
962
+ 10
963
+ 15
964
+ 20
965
+ 25
966
+ TrueSkill
967
+ Learning
968
+ curves
969
+ Power law
970
+ fitted to
971
+ I−β
972
+ (arbitrary
973
+ function
974
+ of T)
975
+ e−αT T
976
+ Dota 2, efficient frontier fits
977
+ Figure 10: Comparison of the efficient frontier
978
+ fits for Dota 2, using intrinsic performance and
979
+ exponentiated scaled TrueSkill.
980
+ fail-to-success ratio can also be thought of as the logit function (inverse sigmoid) of the
981
+ failure rate.
982
+ • Dota 2: Dota 2 is a two-player game, and so the performance of a policy must be measured
983
+ by comparing it to other policies. The standard method for this is the TrueSkill rating
984
+ system,2 in which differences in rating between policies correspond to win probabilities
985
+ when the policies are played against one another, similarly to the Elo rating system. We
986
+ found TrueSkill to be a natural performance metric for Dota 2.
987
+ Specifically, we found that our power law for intrinsic performance, equation (1), still roughly held
988
+ with the left-hand side replaced by a suitable function of the natural performance metric. For CoinRun,
989
+ we used the fail-to-success ratio directly, but discarded data from early in training where this ratio
990
+ was above 0.5. For Dota 2, we used e−αT T , where T is TrueSkill and αT is a fitted constant, which
991
+ was needed because the scale of T is arbitrary.
992
+ Figures 9 and 10 compare the efficient frontier fits for intrinsic performance and for the natural
993
+ performance metric, for CoinRun and Dota 2 respectively. The fits match closely, except for Dota 2 at
994
+ higher levels of TrueSkill. We conjecture that Dota 2 has an analog of an irreducible loss [Henighan
995
+ et al., 2020], representing the maximum attainable TrueSkill for the family of models we trained.
996
+ We explored introducing an additional fitted constant T ∗ for this maximum attainable TrueSkill, and
997
+ using either of the functional forms e−αT T − e−αT T ∗ and (T ∗ − T)αT . However, it was unclear
998
+ to us which of these forms made the most theoretical sense, and we were unsure whether we could
999
+ justify the extra degree of freedom given the lack of data at higher levels of TrueSkill.
1000
+ The fitted constants for all of these alternative power laws for both CoinRun and Dota 2 are given in
1001
+ Appendix E. Interestingly, for CoinRun, the values of the scaling exponent for the fail-to-success
1002
+ ratio F in terms of intrinsic performance I, corresponding to the slopes of the lines in Figure 9, are
1003
+ similar between the two difficulty modes: F ∝ I−0.40 in easy mode and F ∝ I−0.48 in hard mode.
1004
+ 5
1005
+ Discussion
1006
+ 5.1
1007
+ Extrapolating sample efficiency
1008
+ We may use our power law for intrinsic performance, equation (1), to extrapolate sample efficiency
1009
+ to unseen model sizes N and environment interactions E. For example, in Figure 11, we show the
1010
+ 2https://en.wikipedia.org/wiki/TrueSkill
1011
+ 13
1012
+
1013
+ 0.0
1014
+ 0.5
1015
+ 1.0
1016
+ 1.5
1017
+ 2.0
1018
+ Interactions
1019
+ ×108
1020
+ 5
1021
+ 10
1022
+ 15
1023
+ 20
1024
+ 25
1025
+ 30
1026
+ Mean episode return
1027
+ Parameters
1028
+ 104.3
1029
+ 104.6
1030
+ 104.9
1031
+ 105.2
1032
+ 105.5
1033
+ 105.8
1034
+ 106.1
1035
+ 106.4
1036
+ 106.7
1037
+ 107.0
1038
+ Learning
1039
+ curve
1040
+ Power law fit
1041
+ Power law
1042
+ N → ∞
1043
+ limit
1044
+ Sample efficiency, StarPilot, hard
1045
+ Figure 11: Learning curves for StarPilot (hard
1046
+ mode, scaling width), together with their power
1047
+ law fits, and the N → ∞ limit of the power law.
1048
+ 10−7
1049
+ 10−6
1050
+ 10−5
1051
+ 10−4
1052
+ 10−3
1053
+ 10−2
1054
+ Compute (PF-days)
1055
+ 103
1056
+ 104
1057
+ 105
1058
+ 106
1059
+ 107
1060
+ Parameters
1061
+ Procgen (width)
1062
+ CoinRun
1063
+ StarPilot
1064
+ FruitBot
1065
+ Easy
1066
+ Hard
1067
+ Dota 2
1068
+ 1v1
1069
+ GM (various)
1070
+ MNIST horizons
1071
+ 1–256
1072
+ Optimal model size vs compute, Ne = 105
1073
+ Figure 12: Optimal model size vs compute, taking
1074
+ into account a hypothetical compute cost per en-
1075
+ vironment interaction equal to that of a model of
1076
+ size Ne = 105. See Figure 4 for the full legend.
1077
+ extrapolated learning curve for StarPilot in the infinite-width limit. This reaches the final performance
1078
+ of our largest model in about half the number of environment interactions. Note, however, that
1079
+ without a natural performance metric, we cannot extrapolate to unseen performance levels.
1080
+ It is natural to ask how this extrapolated infinite-width limit compares to human sample efficiency. On
1081
+ StarPilot (slowed down to 3 frames per second), a human can reach a mean episode return of around
1082
+ 20 after a few episodes, whereas the extrapolated infinitely-wide model takes 18 million interactions,
1083
+ around 10,000 times as many. This is not really a fair comparison though, because much of the
1084
+ challenge in Procgen is to learn to identify basic visual features, which humans are already able to do.
1085
+ For Dota 2, we crudely estimate that it would take a human around 50–500 hours of gameplay to
1086
+ reach the performance of the extrapolated infinitely-wide LSTM after 5 billion interactions, a factor
1087
+ of 100–1,000 in sample efficiency. This comparison may be fairer, because Dota 2 has a structured
1088
+ observation space and is more challenging than StarPilot, although it still draws on many pre-existing
1089
+ human intuitions. Of course, our models were all trained from scratch, and we should expect this
1090
+ factor to be smaller for models that have been pre-trained to learn useful representations.
1091
+ 5.2
1092
+ Cost-efficient reinforcement learning
1093
+ In the reinforcement learning literature, sample efficiency is usually taken to be the primary metric
1094
+ of algorithmic progress. This can be thought of as focusing on the cost of running the environment,
1095
+ but not the algorithm. At the other extreme, we have so far focused on the computational cost of the
1096
+ algorithm, but not on the cost of the environment. However, it is straightforward to now take both
1097
+ into account. To do this, let Ne be the cost of the environment, measured in terms of the number of
1098
+ parameters in a model with the same cost per interaction. Thus the total cost of both the algorithm
1099
+ and the environment is proportional to (N + Ne) E.
1100
+ The cost-efficient frontier is now described by the following generalization of equation (2):
1101
+
1102
+ 1 + Ne
1103
+ N
1104
+
1105
+ αN
1106
+ �Nc
1107
+ N
1108
+ �αN
1109
+ = αE
1110
+ �Ec
1111
+ E
1112
+ �αE
1113
+ .
1114
+ Substituting this into our power law given by equation (1), it follows that along the cost-efficient
1115
+ frontier,
1116
+ C =
1117
+
1118
+ 1 + Ne
1119
+ N
1120
+ � �
1121
+ 1
1122
+ 1 + αN
1123
+ αE
1124
+
1125
+ 1 + Ne
1126
+ N
1127
+
1128
+
1129
+ 1
1130
+ αN +
1131
+ 1
1132
+ αE � N
1133
+ Nc
1134
+ �1+ αN
1135
+ αE ,
1136
+ 14
1137
+
1138
+ where C := (N + Ne) E. Thus for a given budget C, the optimal model size N scales as the same
1139
+ power law in C as before once N ≫ Ne, and it is only efficient to take N ≪ Ne when C is very
1140
+ small. This validates and makes precise the rule-of-thumb that it is usually inefficient to use a model
1141
+ that is much cheaper to run than the environment, at least when training from scratch.
1142
+ To illustrate this relationship, Figure 12 shows the optimal model size vs compute relationship from
1143
+ Figure 4, but incorporating a fixed hypothetical compute cost associated with each environment
1144
+ interaction.
1145
+ 5.3
1146
+ Limitations
1147
+ Our experiments have several limitations:
1148
+ • As explained in Section 3.4, we did not use separate training runs for each compute budget,
1149
+ each with their own learning rate schedule, which can be necessary to accurately measure
1150
+ scaling exponents [Hoffmann et al., 2022]. We tried to mitigate this by using a learning rate
1151
+ schedule that worked well for a variety of compute budgets, as explained in Appendix B.1,
1152
+ but this may not have been enough.
1153
+ • As explained in Section 4.3, the variability of exponents over training gives rise to a
1154
+ measurement problem. We mitigated this to some extent by excluding data from early in
1155
+ training when fitting our power law, but this does not fully correct for the fact that some of
1156
+ our models were under-trained relative to the compute-efficient frontier.
1157
+ • We did not carefully optimize the aspect ratios of our models, instead scaling width and
1158
+ depth separately. More generally, suboptimal hyperparameters or other problems with our
1159
+ training setups could have lead to errors in our measurements of scaling constants.
1160
+ • Learning curves in reinforcement learning are often very high-variance, adding significant
1161
+ noise to power law fits. We mitigated this to some extent by choosing environments with
1162
+ relatively low-variance learning curves and using multiple random seeds, but a lot of variance
1163
+ still remained.
1164
+ As a result of these limitations, we do not think conclusions that depend on the precise fitted values of
1165
+ our scaling constants can be drawn with confidence, although we consider our mitigations sufficient
1166
+ for more qualitative conclusions. We are excited for future work to fix these limitations, explore
1167
+ new domains, and more carefully disentangle the effects of the choice of algorithm, architecture and
1168
+ hyperparameters as well as properties of the environment.
1169
+ 5.4
1170
+ Forecasting compute requirements
1171
+ The scaling of optimal model size with compute is a key input into the biological anchors framework
1172
+ for forecasting transformative artificial intelligence [Cotra, 2020]. In this framework, the human brain
1173
+ is used as a biological anchor for estimating the number of parameters in a transformative model, and
1174
+ optimal model size vs compute scaling laws are used to forecast the total compute required to train
1175
+ such a model. In this section we summarize the main implications of our work for this framework.
1176
+ Scaling exponents for reinforcement learning lie in a similar range to generative modeling. The
1177
+ exponent for the scaling of optimal model size with compute,
1178
+ 1
1179
+ 1+αN/αE , varied between around 0.4
1180
+ and 0.8 for our environments, a range that encompasses previous measurements of this exponent for
1181
+ generative modeling. However, as discussed in Section 5.3, we do not think our measurements of this
1182
+ exponent should be taken literally, due to the limitations of our experiments. The results of Hoffmann
1183
+ et al. [2022] and Bahri et al. [2021] suggest the possibility that this exponent would be around 0.5 in
1184
+ every domain if it were measured carefully enough, and we consider our results to be inconclusive on
1185
+ this question.
1186
+ Scaling coefficients for reinforcement learning vary by multiple orders of magnitude. The
1187
+ coefficient for the scaling of optimal model size with compute, Nc
1188
+
1189
+ 1 + αN
1190
+ αE
1191
+
1192
+ 1
1193
+ αN , varied substantially,
1194
+ enough that we do not think this variation is attributable only to the limitations of our experiments.
1195
+ For example, the scaling exponents for MNIST (with a horizon length of 1) and Dota 2 are very
1196
+ similar, but a model of the same size needs to be trained for around 2,000 times longer on Dota 2
1197
+ than on MNIST to be compute-efficient. By comparison, Henighan et al. [2020] found generative
1198
+ 15
1199
+
1200
+ modeling to require around 20 times as much training on 32x32 images than on language. Moreover,
1201
+ our analysis of the effect of the task horizon length gives a plausible mechanism for this variation.
1202
+ Arithmetic intensity may confound scaling coefficients. As discussed in Section 4.4, the coefficient
1203
+ for the scaling of optimal model size with compute can be affected by the arithmetic intensity (i.e.,
1204
+ the number of FLOPs per parameter in a forward pass) of the model. This alone does not explain the
1205
+ large variation in this coefficient between MNIST and Dota 2, for example, but it may explain some
1206
+ of the other variation. We hypothesize that, when comparing models with very different arithmetic
1207
+ intensities, due to parameter sharing or methods such as mixture of experts, it may be better to
1208
+ measure model size in FLOPs per forward pass rather than in parameters.
1209
+ Sample efficiency is an affine function of the task horizon length. We study the effect of the
1210
+ task horizon length using a toy MNIST-based environment in Section 4.2. Both theoretically (see
1211
+ Proposition 1) and empirically, the number of samples required to reach a given level of performance
1212
+ grows with the horizon length as an affine function (i.e., a linear function with an intercept) that
1213
+ depends on both the model size and the target performance level. However, our analysis makes a
1214
+ simplifying assumption of independent timesteps, which does not hold in most environments. In
1215
+ particular, we do not analyze the need for curricula and/or exploration to solve tasks for which it is
1216
+ challenging to obtain useful feedback. Instead, we simply assume that the algorithm pays attention to
1217
+ rewards over a longer time horizon, making credit assignment harder.
1218
+ This result validates and refines the analysis of Cotra [2020], who defined the “effective horizon
1219
+ length” as a quantity that scales linearly with training data requirements, incorporating not only the
1220
+ horizon length as we define it, but also reward sparsity, noise and so on. Our result specifically isolates
1221
+ the explicit horizon length, showing that training data requirements are a sum of two components,
1222
+ at least in our toy setting: one corresponding to a version of the task in which the horizon ends
1223
+ immediately, and another that is proportional to the horizon length. This implies that, for a given fixed
1224
+ task, continuing to increase the horizon length will eventually lead to a proportional increase in the
1225
+ compute budget corresponding to a given optimal model size, without changing the exponent of this
1226
+ scaling law. However, this will only happen once the first component has become negligible, and it is
1227
+ unclear whether there are realistic tasks of different horizon lengths for which this first component is
1228
+ negligible in practice.
1229
+ We are excited for future work to study other aspects of the “effective horizon length”, such as
1230
+ reward sparsity and noise, as well as studying the explicit horizon length in environments that are less
1231
+ artificial. It is not entirely clear how to quantify these properties in general, and they could potentially
1232
+ affect scaling exponents as well as scaling coefficients, if for example they change over the course of
1233
+ training.
1234
+ Measuring scaling exponents precisely is challenging. The biological anchors framework uses
1235
+ the scaling of optimal model size with compute to perform a substantial extrapolation, making it
1236
+ particularly sensitive to the exponent of this relationship. This makes it challenging to measure this
1237
+ exponent with sufficient precision. In addition to the challenges raised by Hoffmann et al. [2022]
1238
+ involving learning rate schedules, we hope that others will benefit from learning about the other
1239
+ challenges we faced, which are summarized in Section 5.3.
1240
+ 6
1241
+ Conclusion
1242
+ We have shown how to extend scaling laws to single-agent reinforcement learning using the notion of
1243
+ intrinsic performance. Across a range of environments, intrinsic performance scales as a power law
1244
+ in model size and environment interactions, and hence the optimal model size scales as a power law in
1245
+ the training compute budget. We have studied how this relationship is affected by various properties
1246
+ of the training setup, including the horizon length of the task, and have discussed the implications of
1247
+ this for the biological anchors framework for forecasting transformative artificial intelligence.
1248
+ 7
1249
+ Acknowledgments
1250
+ Thanks to Mira Murati, Karl Cobbe, Chris Hesse, David Farhi, Paul Christiano, Jared Kaplan, Long
1251
+ Ouyang and Ajeya Cotra for discussions, ideas, help, advice, support and inspiration that have greatly
1252
+ benefited this project.
1253
+ 16
1254
+
1255
+ References
1256
+ Y. Bahri, E. Dyer, J. Kaplan, J. Lee, and U. Sharma. Explaining neural scaling laws. arXiv preprint
1257
+ arXiv:2102.06701, 2021.
1258
+ K. Cobbe, C. Hesse, J. Hilton, and J. Schulman. Leveraging procedural generation to benchmark
1259
+ reinforcement learning. In International conference on machine learning, pages 2048–2056.
1260
+ PMLR, 2020.
1261
+ K. W. Cobbe, J. Hilton, O. Klimov, and J. Schulman. Phasic policy gradient. In International
1262
+ Conference on Machine Learning, pages 2020–2027. PMLR, 2021.
1263
+ A. Cotra. Forecasting transformative AI with biological anchors, 2020.
1264
+ J. Droppo and O. Elibol. Scaling laws for acoustic models. arXiv preprint arXiv:2106.09488, 2021.
1265
+ L. Espeholt, H. Soyer, R. Munos, K. Simonyan, V. Mnih, T. Ward, Y. Doron, V. Firoiu, T. Harley,
1266
+ I. Dunning, et al. Impala: Scalable distributed deep-RL with importance weighted actor-learner
1267
+ architectures. In International conference on machine learning, pages 1407–1416. PMLR, 2018.
1268
+ B. Ghorbani, O. Firat, M. Freitag, A. Bapna, M. Krikun, X. Garcia, C. Chelba, and C. Cherry. Scaling
1269
+ laws for neural machine translation. arXiv preprint arXiv:2109.07740, 2021.
1270
+ K. He, X. Zhang, S. Ren, and J. Sun. Delving deep into rectifiers: Surpassing human-level per-
1271
+ formance on imagenet classification. In Proceedings of the IEEE international conference on
1272
+ computer vision, pages 1026–1034, 2015.
1273
+ T. Henighan, J. Kaplan, M. Katz, M. Chen, C. Hesse, J. Jackson, H. Jun, T. B. Brown, P. Dhari-
1274
+ wal, S. Gray, et al.
1275
+ Scaling laws for autoregressive generative modeling.
1276
+ arXiv preprint
1277
+ arXiv:2010.14701, 2020.
1278
+ J. Hilton, K. Cobbe, and J. Schulman. Batch size-invariance for policy optimization. arXiv preprint
1279
+ arXiv:2110.00641, 2021.
1280
+ J. Hoffmann, S. Borgeaud, A. Mensch, E. Buchatskaya, T. Cai, E. Rutherford, D. d. L. Casas, L. A.
1281
+ Hendricks, J. Welbl, A. Clark, et al. Training compute-optimal large language models. arXiv
1282
+ preprint arXiv:2203.15556, 2022.
1283
+ A. L. Jones. Scaling scaling laws with board games. arXiv preprint arXiv:2104.03113, 2021.
1284
+ J. Kaplan, S. McCandlish, T. Henighan, T. B. Brown, B. Chess, R. Child, S. Gray, A. Radford, J. Wu,
1285
+ and D. Amodei. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361, 2020.
1286
+ D. P. Kingma and J. Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980,
1287
+ 2014.
1288
+ Y. LeCun. The MNIST database of handwritten digits, 1998.
1289
+ O. Neumann and C. Gros. Scaling laws for a multi-agent reinforcement learning model. arXiv
1290
+ preprint arXiv:2210.00849, 2022.
1291
+ OpenAI, C. Berner, G. Brockman, B. Chan, V. Cheung, P. D˛ebiak, C. Dennison, D. Farhi, Q. Fis-
1292
+ cher, S. Hashme, C. Hesse, R. Józefowicz, S. Gray, C. Olsson, J. Pachocki, M. Petrov, H. P.
1293
+ de Oliveira Pinto, J. Raiman, T. Salimans, J. Schlatter, J. Schneider, S. Sidor, I. Sutskever, J. Tang,
1294
+ F. Wolski, and S. Zhang. Dota 2 with large scale deep reinforcement learning. arXiv preprint
1295
+ arXiv:1912.06680, 2019.
1296
+ J. Schulman, P. Moritz, S. Levine, M. Jordan, and P. Abbeel. High-dimensional continuous control
1297
+ using generalized advantage estimation. arXiv preprint arXiv:1506.02438, 2015.
1298
+ J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization
1299
+ algorithms. arXiv preprint arXiv:1707.06347, 2017.
1300
+ D. Silver, T. Hubert, J. Schrittwieser, I. Antonoglou, M. Lai, A. Guez, M. Lanctot, L. Sifre, D. Ku-
1301
+ maran, T. Graepel, et al. A general reinforcement learning algorithm that masters chess, shogi, and
1302
+ Go through self-play. Science, 362(6419):1140–1144, 2018.
1303
+ 17
1304
+
1305
+ H. Zhang, Y. N. Dauphin, and T. Ma. Fixup initialization: Residual learning without normalization.
1306
+ arXiv preprint arXiv:1901.09321, 2019.
1307
+ 18
1308
+
1309
+ A
1310
+ Curve-fitting methodology
1311
+ In this section we discuss our methodology for computing intrinsic performance and fitting the power
1312
+ law constants, which require some care. Code for our full procedure, along with its application to
1313
+ our experiments, may be found in this Colab notebook: https://colab.research.google.com/
1314
+ drive/1PzwZyXsi9jRdVCj1GJrS8JdOPBQ7LHZV.
1315
+ Recall that the intrinsic performance of a policy is the minimum compute required to train a model of
1316
+ any size in the same family to reach the same return (averaged over random seeds). The naive way to
1317
+ compute this would be to train models of many different sizes, and to take the best-performing model
1318
+ size for each possible compute budget. However, it may not be feasible to train models of enough
1319
+ different sizes to get a reasonable level of granularity, while using enough different random seeds
1320
+ sufficiently to reduce the high variance of learning curves.
1321
+ To cope with this, we compute intrinsic performance and fit the power law constants together. This
1322
+ allows us to make use of all the data from each learning curve, instead of just a single point from
1323
+ each one. We do this by jointly fitting the power law constants and a monotonic function f to
1324
+ f (R)−β =
1325
+ �Nc
1326
+ N
1327
+ �αN
1328
+ +
1329
+ �Ec
1330
+ E
1331
+ �αE
1332
+ ,
1333
+ where R is the mean episode return (or another performance metric such as TrueSkill), N is the
1334
+ number of model parameters, and E is the number of environment interactions. By also requiring the
1335
+ relationships between the constants from Lemma 1 to hold, this provides us both with the power law
1336
+ constants, and with the desired function f satisfying f (R) = I, where I is intrinsic performance.
1337
+ We perform this fit by using a black-box optimization algorithm such as CMA-ES to fit αN, αE
1338
+ and Nc, which determine β and Ec, with monotonic regression3 in the inner loop to fit f, using the
1339
+ squared error of the regression as the black-box loss function. We actually fit log (f) rather than f in
1340
+ order to obtain a good fit to I on a logarithmic scale, and we weight the data in proportion to 1
1341
+ E so
1342
+ that each interval is given equal weight on a logarithmic scale. In our Colab notebook, this routine is
1343
+ performed by the function fit_coeffs.
1344
+ This procedure seems to work well off-the-shelf, typically converging to a unique local minimum.
1345
+ However:
1346
+ • When there is a lack of data or the data is very noisy, the local minimum may not be a global
1347
+ minimum, and the procedure can diverge to a degenerate solution.
1348
+ • It is necessary to first smooth learning curves so that they are mostly monotonic, to prevent
1349
+ the monotonic regression from overfitting. In our Colab notebook, we use the function
1350
+ smooth, which uses standard errors to automatically choose smoothing parameters (although
1351
+ note that we used slightly different smoothing parameters for MNIST).
1352
+ • As discussed in Section 4.3, it is important to exclude data from early in training.
1353
+ Our full procedure is therefore as follows.
1354
+ • Smooth learning curves. Plot the smoothed curves on a logarithmic scale to check the
1355
+ monotonicity and fit, and adjust the smoothing parameters if necessary.
1356
+ • Exclude data from early in training, balancing the need for data against how much the early
1357
+ data skews the fit. Typically at least the first
1358
+ 1
1359
+ 64 of training should be excluded.
1360
+ • Fit the power law constants and f using the black-box optimization with monotonic regres-
1361
+ sion routine.
1362
+ • Plot the fit to check the routine did not diverge. If it did, re-run routine, or constrain the
1363
+ constants and re-run, or include more data in step 2. If none of these fixes the divergence,
1364
+ then it may be necessary to collect more data.
1365
+ • Check the fit is not overly skewed by data from early in training. If it is, exclude more data
1366
+ in step 2.
1367
+ 3https://en.wikipedia.org/wiki/Isotonic_regression
1368
+ 19
1369
+
1370
+ This procedure led us to exclude the first 3 million environment interactions for Procgen, the first 2
1371
+ billion environment interactions for Dota 2, and the first 216, 219 or 222 environment interactions for
1372
+ MNIST depending on the period of training being considered, as discussed in Section 4.3.
1373
+ A.1
1374
+ Fitting to natural performance metrics
1375
+ As discussed in Section 4.5, as well as fitting our power law with I−β on the left-hand side, as in
1376
+ equation (1), we also fit it using various other expressions, such as e−αT T , where T is TrueSkill and
1377
+ αT is a fitted constant. When doing this, we adopt the convention that the constraints on β and Ec
1378
+ from Lemma 1 should continue to hold. This necessitates introducing an additional multiplier, and
1379
+ instead fitting
1380
+ Tce−αT T =
1381
+ �Nc
1382
+ N
1383
+ �αN
1384
+ +
1385
+ �Ec
1386
+ E
1387
+ �αE
1388
+ for example, where Tc is a fitted constant. Doing this allows us to continue interpret the left-hand
1389
+ side of this equation as I−β.
1390
+ To fit equations of this form, we continue use the same black-box optimization method, and simply
1391
+ replace the monotonic regression by another method of fitting log (f). For example, we may fit
1392
+ f (T)−β = Tce−αT T
1393
+ by using linear regression to fit log (f). (Recall that β is already determined by αN and αE.)
1394
+ The function from our Colab notebook, fit_coeffs, provides options for fitting various functional
1395
+ forms for f, although it can sometimes be slow. (This is because it sometimes uses black-box
1396
+ optimization again in the inner loop for ease of implementation, even though this could be collapsed
1397
+ into the outer loop if speed were important.)
1398
+ 20
1399
+
1400
+ B
1401
+ Hyperparameters
1402
+ Our default hyperparameters for Procgen, Dota 2 and MNIST are given in Tables 1, 2 and 3
1403
+ respectively. We modified these defaults in two ways:
1404
+ • We adjusted the Adam step size as the model was scaled, as explained in Section 3.4.
1405
+ • For Procgen and MNIST, we incorporated a batch ramp and learning rate schedule, as
1406
+ explained in Section B.1.
1407
+ Table 1: Default PPG-EWMA hyperparameters for Procgen.
1408
+ Hyperparameter
1409
+ Value
1410
+ PPO
1411
+ Parallel environments
1412
+ 1024
1413
+ Timesteps per rollout (T)
1414
+ 256
1415
+ Minibatches per epoch
1416
+ 8
1417
+ Adam step size (α)
1418
+ 5 × 10−4
1419
+ Value function coefficient
1420
+ 0.5
1421
+ Entropy coefficient
1422
+ 0.01
1423
+ PPO clipping parameter (ϵ)
1424
+ Not used
1425
+ PPO KL penalty coefficient (β)
1426
+ 1
1427
+ GAE discount rate (γ)
1428
+ 0.999
1429
+ GAE bootstrapping parameter (λ)
1430
+ 0.95
1431
+ Reward normalization?
1432
+ Yes
1433
+ Advantage normalization?
1434
+ Yes
1435
+ Total environment interactions
1436
+ 200 million
1437
+ PPG
1438
+ Policy iterations per phase (Nπ)
1439
+ 32
1440
+ Policy phase policy epochs (Eπ)
1441
+ 1
1442
+ Policy phase value function epochs (EV )
1443
+ 1
1444
+ Auxiliary phase epochs (Eaux)
1445
+ 6
1446
+ Auxiliary phase minibatches per epoch
1447
+ 16Nπ
1448
+ Auxiliary phase cloning coefficient (βclone)
1449
+ 1
1450
+ PPG-EWMA
1451
+ Proximal policy EWMA decay rate (βprox)
1452
+ 8
1453
+ 9
1454
+ Batch ramp
1455
+ Initial batch size multiplier
1456
+ 1
1457
+ 32
1458
+ Table 2: PPO hyperparameters for Dota 2.
1459
+ Hyperparameter
1460
+ Value
1461
+ Parallel environments
1462
+ 6144
1463
+ Timesteps per rollout (T)
1464
+ 512
1465
+ Minibatches per epoch
1466
+ 32
1467
+ Epochs (E)
1468
+ 1
1469
+ Adam step size (α)
1470
+ 10−4 to 10−3
1471
+ PPO clipping parameter (ϵ)
1472
+ 0.2
1473
+ PPO KL penalty coefficient (β)
1474
+ Not used
1475
+ GAE bootstrapping parameter (λ)
1476
+ 0.95
1477
+ Total environment interactions
1478
+ 13.6–82.6 billion
1479
+ 21
1480
+
1481
+ Table 3: Default PPO-EWMA hyperparameters for MNIST in terms the horizon length h, which
1482
+ varied from 1 to 256.
1483
+ Hyperparameter
1484
+ Value
1485
+ PPO
1486
+ Parallel environments
1487
+ 16
1488
+ Timesteps per rollout (T)
1489
+ 512
1490
+ Minibatches per epoch
1491
+ 8
1492
+ Epochs (E)
1493
+ 1
1494
+ Adam step size (α)
1495
+ 1 × 10−3
1496
+ Value function coefficient
1497
+ 0.5
1498
+ Entropy coefficient
1499
+ 0.01
1500
+ PPO clipping parameter (ϵ)
1501
+ Not used
1502
+ PPO KL penalty coefficient (β)
1503
+ 1
1504
+ GAE discount rate (γ)
1505
+ 1 −
1506
+ 2
1507
+ h+1
1508
+ GAE bootstrapping parameter (λ)
1509
+ 1
1510
+ Reward normalization?
1511
+ Yes
1512
+ Advantage normalization?
1513
+ Yes
1514
+ Total environment interactions
1515
+ 225
1516
+ PPO-EWMA
1517
+ Proximal policy EWMA decay rate (βprox)
1518
+ 8
1519
+ 9
1520
+ Batch ramp
1521
+ Initial batch size multiplier
1522
+
1523
+ h
1524
+ 64
1525
+ B.1
1526
+ Batch ramp and learning rate schedule
1527
+ As explained in Section 3.4, it was important to use a well-tuned learning rate schedule, and to use
1528
+ a schedule that works well for a variety of compute budgets. It was also important to use a batch
1529
+ ramp, i.e., to start with a small batch size and increase it over the course of training, because the
1530
+ critical batch size is smaller at the start of training, and we needed training to still be sample-efficient
1531
+ for small compute budgets. Without a batch ramp, we would have needed to adjust our power law,
1532
+ equation (1), in much the same way as the corresponding law for language [Kaplan et al., 2020,
1533
+ equation (1.6)], which uses Smin (S), the minimum number of optimization steps as estimated using
1534
+ a power law fit to the gradient noise scale.
1535
+ Note, however, that increasing the batch size has a very similar effect to lowering the learning rate.
1536
+ To simplify matters, we used PPO-EWMA and PPG-EWMA, which are batch size-invariant [Hilton
1537
+ et al., 2021], allowing us to have almost the same effect as increasing the batch size by instead
1538
+ lowering the learning rate and increasing the center of mass of the proximal policy EWMA. We then
1539
+ considered only the batch size schedule, whether implemented explicitly or implicitly via these other
1540
+ hyperparameters.
1541
+ To explore promising schedules, we implemented a greedy adaptive batch size algorithm, which tries
1542
+ doubling the batch size and switches if that performs better, or else backtracks and stays with the
1543
+ current batch size. We experimented with this on StarPilot’s easy difficulty setting, using model sizes
1544
+ spanning a factor of around 2048. We found our algorithm to fairly consistently choose a schedule
1545
+ that can be well-approximated by the power law
1546
+ B = max
1547
+
1548
+ Bmin, E0.84
1549
+ 80
1550
+
1551
+ ,
1552
+ where B is the batch size in interactions, E is the total number of interactions so far, and Bmin = 256
1553
+ was our initial batch size.
1554
+ Having fit this power law schedule on one Procgen environment, we tested it on several different
1555
+ Procgen environments, and found it to consistently outperform our usual fixed batch size both at the
1556
+ start and end of training. (Curiously, our schedule sometimes underperformed the fixed batch size in
1557
+ the middle of training. We believe this may be explained by the smaller initial batch size causing the
1558
+ entropy to fall too quickly at the start of training, highlighting a pitfall of the greedy approach.) In
1559
+ particular, we were able to use the same schedule on both the easy and hard difficulty settings. Our
1560
+ 22
1561
+
1562
+ usual fixed batch size, on the other hand, was larger for the hard setting, corresponding to the fact
1563
+ that it was tuned to longer training runs.
1564
+ The same schedule also worked well on our MNIST environment at every horizon length, although it
1565
+ was necessary to tune Bmin. Using too small a value for Bmin seemed to result in an instability which
1566
+ could not always be recovered from. We found the optimal Bmin to vary based on the horizon length
1567
+ h, and we took Bmin = 16
1568
+
1569
+ h (though taking Bmin to have the form A0 + A1h would probably have
1570
+ made more theoretical sense in hindsight, given the results of Section 4.2). If trying our schedule
1571
+ on other environments, we suggest tuning Bmin to ensure stability at the start of training, but it is
1572
+ probably less important to tune the power law constants.
1573
+ We used this batch size schedule for both our Procgen and MNIST experiments (although it would
1574
+ probably have been better to fully re-fit the schedule for MNIST). We implemented this using a batch
1575
+ size multiplier, explicitly reducing the batch size when the multiplier was less than 1, and changing
1576
+ the learning rate and center of mass of the proximal policy EWMA instead when the multiplier was
1577
+ greater than 1. With Procgen, for which we used PPG-EWMA, we also changed the number of policy
1578
+ iterations per phase, Nπ, in proportion to the batch size, since we thought the number of optimization
1579
+ steps per phase should remain constant, and we rounded the batch size multiplier to the nearest power
1580
+ of two, with minimum and maximum multipliers of
1581
+ 1
1582
+ 32 and 4 (corresponding to batch sizes of 1024
1583
+ and 131072 respectively).
1584
+ For Dota 2, we did not use a batch size schedule, since those experiments were carried out before we
1585
+ investigated batch size schedules.
1586
+ 23
1587
+
1588
+ C
1589
+ Results in full
1590
+ All the data from our experiments may be accessed using this Colab notebook: https://colab.
1591
+ research.google.com/drive/1PzwZyXsi9jRdVCj1GJrS8JdOPBQ7LHZV.
1592
+ This also includes
1593
+ code for analyzing this data, including model size and compute calculations, intrinsic performance
1594
+ and power law fitting, and generating all the plots in this paper.
1595
+ Figures 13, 14, 15 and 16 show learning curves as a function of total training compute, together with
1596
+ their power law fits, for all of our experiments. On the left of each figure we show mean episode
1597
+ return (or failure rate for CoinRun and MNIST, or TrueSkill for Dota 2), with error bars showing
1598
+ mean ±1 sample standard deviation over the random seeds. On the right of each figure, we show
1599
+ intrinsic performance, with error bars hidden for clarity.
1600
+ 1015
1601
+ 1017
1602
+ Compute (FLOPs)
1603
+ 10−2
1604
+ 10−1
1605
+ Failure rate
1606
+ CoinRun, easy
1607
+ 1015
1608
+ 1017
1609
+ Compute (FLOPs)
1610
+ 10−1
1611
+ Failure rate
1612
+ CoinRun, hard
1613
+ 1015
1614
+ 1017
1615
+ Compute (FLOPs)
1616
+ 1014
1617
+ 1015
1618
+ 1016
1619
+ 1017
1620
+ Intrinsic performance (FLOPs)
1621
+ CoinRun, easy
1622
+ 1015
1623
+ 1017
1624
+ Compute (FLOPs)
1625
+ 1014
1626
+ 1015
1627
+ 1016
1628
+ 1017
1629
+ 1018
1630
+ Intrinsic performance (FLOPs)
1631
+ CoinRun, hard
1632
+ Parameters
1633
+ 104.3
1634
+ 104.6
1635
+ 104.9
1636
+ 105.2
1637
+ 105.5
1638
+ 105.8
1639
+ 106.1
1640
+ 106.4
1641
+ 106.7
1642
+ 107.0
1643
+ 1015
1644
+ 1017
1645
+ Compute (FLOPs)
1646
+ 10
1647
+ 20
1648
+ 30
1649
+ 40
1650
+ 50
1651
+ 60
1652
+ Mean episode return
1653
+ StarPilot, easy
1654
+ 1015
1655
+ 1017
1656
+ Compute (FLOPs)
1657
+ 5
1658
+ 10
1659
+ 15
1660
+ 20
1661
+ 25
1662
+ 30
1663
+ Mean episode return
1664
+ StarPilot, hard
1665
+ 1015
1666
+ 1017
1667
+ Compute (FLOPs)
1668
+ 1014
1669
+ 1015
1670
+ 1016
1671
+ 1017
1672
+ 1018
1673
+ Intrinsic performance (FLOPs)
1674
+ StarPilot, easy
1675
+ 1015
1676
+ 1017
1677
+ Compute (FLOPs)
1678
+ 1014
1679
+ 1015
1680
+ 1016
1681
+ 1017
1682
+ 1018
1683
+ Intrinsic performance (FLOPs)
1684
+ StarPilot, hard
1685
+ Learning
1686
+ curve
1687
+ Power law
1688
+ fit
1689
+ Power law
1690
+ asymptote
1691
+ Efficient
1692
+ frontier
1693
+ Efficient
1694
+ points
1695
+ 1015
1696
+ 1017
1697
+ Compute (FLOPs)
1698
+ 5
1699
+ 10
1700
+ 15
1701
+ 20
1702
+ 25
1703
+ 30
1704
+ Mean episode return
1705
+ FruitBot, easy
1706
+ 1015
1707
+ 1017
1708
+ Compute (FLOPs)
1709
+ 0
1710
+ 5
1711
+ 10
1712
+ 15
1713
+ 20
1714
+ 25
1715
+ Mean episode return
1716
+ FruitBot, hard
1717
+ 1015
1718
+ 1017
1719
+ Compute (FLOPs)
1720
+ 1014
1721
+ 1015
1722
+ 1016
1723
+ 1017
1724
+ Intrinsic performance (FLOPs)
1725
+ FruitBot, easy
1726
+ 1015
1727
+ 1017
1728
+ Compute (FLOPs)
1729
+ 1014
1730
+ 1015
1731
+ 1016
1732
+ 1017
1733
+ 1018
1734
+ Intrinsic performance (FLOPs)
1735
+ FruitBot, hard
1736
+ Procgen, width
1737
+ Figure 13: Learning curves as a function of total training compute for our Procgen width-scaling
1738
+ experiments, together with their power law fits. Left half: mean episode return or failure rate, mean
1739
+ ±1 sample standard deviation over three seeds shown. Right half: intrinsic performance, mean only
1740
+ shown.
1741
+ 24
1742
+
1743
+ 1015
1744
+ 1017
1745
+ Compute (FLOPs)
1746
+ 10−2
1747
+ 10−1
1748
+ Failure rate
1749
+ CoinRun, easy
1750
+ 1015
1751
+ 1017
1752
+ Compute (FLOPs)
1753
+ 10−1
1754
+ Failure rate
1755
+ CoinRun, hard
1756
+ 1015
1757
+ 1017
1758
+ Compute (FLOPs)
1759
+ 1014
1760
+ 1015
1761
+ 1016
1762
+ 1017
1763
+ Intrinsic performance (FLOPs)
1764
+ CoinRun, easy
1765
+ 1015
1766
+ 1017
1767
+ Compute (FLOPs)
1768
+ 1014
1769
+ 1015
1770
+ 1016
1771
+ 1017
1772
+ 1018
1773
+ Intrinsic performance (FLOPs)
1774
+ CoinRun, hard
1775
+ Parameters
1776
+ 103.9
1777
+ 104.1
1778
+ 104.4
1779
+ 104.6
1780
+ 104.9
1781
+ 105.2
1782
+ 105.5
1783
+ 1015
1784
+ 1016
1785
+ 1017
1786
+ 1018
1787
+ Compute (FLOPs)
1788
+ 20
1789
+ 30
1790
+ 40
1791
+ 50
1792
+ 60
1793
+ Mean episode return
1794
+ StarPilot, easy
1795
+ 1015
1796
+ 1016
1797
+ 1017
1798
+ 1018
1799
+ Compute (FLOPs)
1800
+ 5
1801
+ 10
1802
+ 15
1803
+ 20
1804
+ 25
1805
+ Mean episode return
1806
+ StarPilot, hard
1807
+ 1015
1808
+ 1016
1809
+ 1017
1810
+ 1018
1811
+ Compute (FLOPs)
1812
+ 1015
1813
+ 1016
1814
+ 1017
1815
+ 1018
1816
+ Intrinsic performance (FLOPs)
1817
+ StarPilot, easy
1818
+ 1015
1819
+ 1016
1820
+ 1017
1821
+ 1018
1822
+ Compute (FLOPs)
1823
+ 1015
1824
+ 1016
1825
+ 1017
1826
+ 1018
1827
+ Intrinsic performance (FLOPs)
1828
+ StarPilot, hard
1829
+ Learning
1830
+ curve
1831
+ Power law
1832
+ fit
1833
+ Power law
1834
+ asymptote
1835
+ Efficient
1836
+ frontier
1837
+ Efficient
1838
+ points
1839
+ 1015
1840
+ 1017
1841
+ Compute (FLOPs)
1842
+ 5
1843
+ 10
1844
+ 15
1845
+ 20
1846
+ 25
1847
+ 30
1848
+ Mean episode return
1849
+ FruitBot, easy
1850
+ 1015
1851
+ 1016
1852
+ 1017
1853
+ 1018
1854
+ Compute (FLOPs)
1855
+ 0
1856
+ 5
1857
+ 10
1858
+ 15
1859
+ 20
1860
+ 25
1861
+ Mean episode return
1862
+ FruitBot, hard
1863
+ 1015
1864
+ 1017
1865
+ Compute (FLOPs)
1866
+ 1014
1867
+ 1015
1868
+ 1016
1869
+ 1017
1870
+ Intrinsic performance (FLOPs)
1871
+ FruitBot, easy
1872
+ 1015
1873
+ 1016
1874
+ 1017
1875
+ 1018
1876
+ Compute (FLOPs)
1877
+ 1015
1878
+ 1016
1879
+ 1017
1880
+ 1018
1881
+ Intrinsic performance (FLOPs)
1882
+ FruitBot, hard
1883
+ Procgen, depth
1884
+ Figure 14: Learning curves as a function of total training compute for our Procgen depth-scaling
1885
+ experiments, together with their power law fits. Left half: mean episode return or failure rate, mean
1886
+ ±1 sample standard deviation over three seeds shown. Right half: intrinsic performance, mean only
1887
+ shown.
1888
+ 1014
1889
+ 1016
1890
+ 1018
1891
+ 1020
1892
+ Compute (FLOPs)
1893
+ −5
1894
+ 0
1895
+ 5
1896
+ 10
1897
+ 15
1898
+ 20
1899
+ 25
1900
+ TrueSkill
1901
+ 1014
1902
+ 1016
1903
+ 1018
1904
+ 1020
1905
+ Compute (FLOPs)
1906
+ 1013
1907
+ 1014
1908
+ 1015
1909
+ 1016
1910
+ 1017
1911
+ 1018
1912
+ 1019
1913
+ Intrinsic performance (FLOPs)
1914
+ Parameters
1915
+ 102.7
1916
+ 104.5
1917
+ 105.1
1918
+ 105.7
1919
+ 106.3
1920
+ 106.9
1921
+ 108.1
1922
+ Learning
1923
+ curve
1924
+ Power law
1925
+ fit
1926
+ Power law
1927
+ asymptote
1928
+ Efficient
1929
+ frontier
1930
+ Efficient
1931
+ points
1932
+ Dota 2
1933
+ Figure 15: Learning curves as a function of total training compute for Dota 2, together with their
1934
+ power law fits. Only one random seed was used. Left: TrueSkill. Right: intrinsic performance.
1935
+ 25
1936
+
1937
+ 1013
1938
+ 1014
1939
+ 1015
1940
+ 1016
1941
+ Compute (FLOPs)
1942
+ 10−3
1943
+ Failure rate
1944
+ Horizon 1
1945
+ 1013
1946
+ 1014
1947
+ 1015
1948
+ 1016
1949
+ Compute (FLOPs)
1950
+ 10−3
1951
+ Failure rate
1952
+ Horizon 2
1953
+ 1013
1954
+ 1014
1955
+ 1015
1956
+ 1016
1957
+ Compute (FLOPs)
1958
+ 1013
1959
+ 1014
1960
+ 1015
1961
+ 1016
1962
+ Intrinsic performance (FLOPs)
1963
+ Horizon 1
1964
+ 1013
1965
+ 1014
1966
+ 1015
1967
+ 1016
1968
+ Compute (FLOPs)
1969
+ 1013
1970
+ 1014
1971
+ 1015
1972
+ 1016
1973
+ Intrinsic performance (FLOPs)
1974
+ Horizon 2
1975
+ Parameters
1976
+ 104.8
1977
+ 105.1
1978
+ 105.4
1979
+ 105.7
1980
+ 106.0
1981
+ 106.3
1982
+ 106.6
1983
+ 106.9
1984
+ 107.2
1985
+ 107.5
1986
+ 1013
1987
+ 1014
1988
+ 1015
1989
+ 1016
1990
+ Compute (FLOPs)
1991
+ 10−3
1992
+ Failure rate
1993
+ Horizon 4
1994
+ 1013
1995
+ 1014
1996
+ 1015
1997
+ 1016
1998
+ Compute (FLOPs)
1999
+ 10−3
2000
+ Failure rate
2001
+ Horizon 8
2002
+ 1013
2003
+ 1014
2004
+ 1015
2005
+ 1016
2006
+ Compute (FLOPs)
2007
+ 1013
2008
+ 1014
2009
+ 1015
2010
+ 1016
2011
+ Intrinsic performance (FLOPs)
2012
+ Horizon 4
2013
+ 1013
2014
+ 1014
2015
+ 1015
2016
+ 1016
2017
+ Compute (FLOPs)
2018
+ 1013
2019
+ 1014
2020
+ 1015
2021
+ 1016
2022
+ Intrinsic performance (FLOPs)
2023
+ Horizon 8
2024
+ Learning
2025
+ curve
2026
+ Power law
2027
+ fit
2028
+ Power law
2029
+ asymptote
2030
+ Efficient
2031
+ frontier
2032
+ Efficient
2033
+ points
2034
+ 1013
2035
+ 1014
2036
+ 1015
2037
+ 1016
2038
+ Compute (FLOPs)
2039
+ 10−3
2040
+ Failure rate
2041
+ Horizon 16
2042
+ 1013
2043
+ 1014
2044
+ 1015
2045
+ 1016
2046
+ Compute (FLOPs)
2047
+ 10−3
2048
+ Failure rate
2049
+ Horizon 32
2050
+ 1013
2051
+ 1014
2052
+ 1015
2053
+ 1016
2054
+ Compute (FLOPs)
2055
+ 1013
2056
+ 1014
2057
+ 1015
2058
+ 1016
2059
+ Intrinsic performance (FLOPs)
2060
+ Horizon 16
2061
+ 1013
2062
+ 1014
2063
+ 1015
2064
+ 1016
2065
+ Compute (FLOPs)
2066
+ 1013
2067
+ 1014
2068
+ 1015
2069
+ 1016
2070
+ Intrinsic performance (FLOPs)
2071
+ Horizon 32
2072
+ 1013
2073
+ 1014
2074
+ 1015
2075
+ 1016
2076
+ Compute (FLOPs)
2077
+ 10−3
2078
+ Failure rate
2079
+ Horizon 64
2080
+ 1013
2081
+ 1014
2082
+ 1015
2083
+ 1016
2084
+ Compute (FLOPs)
2085
+ 10−3
2086
+ 10−2
2087
+ Failure rate
2088
+ Horizon 128
2089
+ 1013
2090
+ 1014
2091
+ 1015
2092
+ 1016
2093
+ Compute (FLOPs)
2094
+ 1013
2095
+ 1014
2096
+ 1015
2097
+ 1016
2098
+ Intrinsic performance (FLOPs)
2099
+ Horizon 64
2100
+ 1013
2101
+ 1014
2102
+ 1015
2103
+ 1016
2104
+ Compute (FLOPs)
2105
+ 1013
2106
+ 1014
2107
+ 1015
2108
+ 1016
2109
+ Intrinsic performance (FLOPs)
2110
+ Horizon 128
2111
+ 1013
2112
+ 1014
2113
+ 1015
2114
+ 1016
2115
+ Compute (FLOPs)
2116
+ 10−3
2117
+ 10−2
2118
+ Failure rate
2119
+ Horizon 192
2120
+ 1013
2121
+ 1014
2122
+ 1015
2123
+ 1016
2124
+ Compute (FLOPs)
2125
+ 10−3
2126
+ 10−2
2127
+ Failure rate
2128
+ Horizon 256
2129
+ 1013
2130
+ 1014
2131
+ 1015
2132
+ 1016
2133
+ Compute (FLOPs)
2134
+ 1013
2135
+ 1014
2136
+ 1015
2137
+ Intrinsic performance (FLOPs)
2138
+ Horizon 192
2139
+ 1013
2140
+ 1014
2141
+ 1015
2142
+ 1016
2143
+ Compute (FLOPs)
2144
+ 1013
2145
+ 1014
2146
+ 1015
2147
+ Intrinsic performance (FLOPs)
2148
+ Horizon 256
2149
+ MNIST, late period
2150
+ Figure 16: Learning curves as a function of total training compute for MNIST, together with their
2151
+ power law fits, for the late period of training (222–225 environment interactions). Left half: failure
2152
+ rate, mean ±1 sample standard deviation over the middle-performing 16 of 20 random seeds shown.
2153
+ Right: intrinsic performance, mean only shown.
2154
+ 26
2155
+
2156
+ D
2157
+ Parameter and FLOP calculations
2158
+ In counting parameters and FLOPs, we apply the following principles:
2159
+ • We only include the part of the network that is being scaled (ignoring things like embedding
2160
+ parameters), since we consider that to be the bottleneck.
2161
+ • We use round numbers (ignoring negligible contributions such as as biases and activations),
2162
+ for simplicity.
2163
+ • We include both rollout and optimization FLOPs (including any additional overhead of
2164
+ PPO-EWMA).
2165
+ • We treat an add-multiply as 2 FLOPs.
2166
+ For example, we treat the forward pass of a dense layer as taking 2 FLOPs per batch item per
2167
+ parameter, and a convolutional layer as taking 2houtwout FLOPs per batch item per parameter. We
2168
+ treat a backward pass as taking 2× the FLOPs of a forward pass.
2169
+ For the Procgen width-scaling experiments, we ignore the first convolution, since it scales as width
2170
+ (instead of as width squared), and has few parameters. Similarly, for the depth-scaling experiments,
2171
+ we ignore the final dense layer, since we only vary the number of convolutional layers. Unfortunately,
2172
+ as discussed in Section 4.4, the final dense layer contains many parameters, which skews our constants.
2173
+ In both cases, we include both the policy and value networks, which are separate with identical
2174
+ architectures. We use PPG-EWMA with 1 policy epoch and 6 auxiliary epochs, totaling 9 forward
2175
+ and 7 backward passes per interaction.
2176
+ For the Dota experiments, we ignore the embedding layer, considering only the LSTM. Since each
2177
+ interaction was used only once, we count 2 forward passes and 1 backward pass per interaction (1
2178
+ forward pass for the rollout, and 1 forward-backward pass for optimization).
2179
+ For the MNIST experiments, we ignore the first convolution, as for the Procgen width-scaling
2180
+ experiments. However, we only include the policy network, since the task of the value network is
2181
+ trivial (due to timesteps being independent). We use PPO-EWMA with 1 epoch, totaling 3 forward
2182
+ passes and 1 backward pass per interaction.
2183
+ The numerical results of these calculations are as follows.
2184
+ • Procgen, scaling width: for the width multiplier w = 2−3, 2−2.52−2, . . . , 22.5, we count
2185
+ 1242112w2 parameters and 2652897280w2 FLOPs per interaction.
2186
+ • Procgen, scaling depth: for the number of residual blocks b = 1, 2, 4, . . . , 64, we count
2187
+ 5184b + 1944 parameters and 61046784b + 81395712 FLOPs per interaction.
2188
+ • Dota 2: for the LSTM size s = 8, 64, 128, 256, 512, 1024, 4096, we count 8s2 parameters
2189
+ and 64s2 FLOPs per interaction.
2190
+ • MNIST: for the width multiplier w = 2−3, 2−2.52−2, . . . , 22.5, we count 3948800w2
2191
+ parameters and 95648000w2 FLOPs per interaction.
2192
+ Note that one of our modeling assumptions is that the number of FLOPs per interaction is proportional
2193
+ to the number of parameters, but this is not true for our Procgen depth-scaling experiments. In other
2194
+ words, the number of FLOPs per param-interact, which is used to convert compute from units of
2195
+ parameters × interactions to units of FLOPs, is not constant. However, this number differs by at most
2196
+ 40% from the mean of this number over the different depths, and so we simply used the mean when
2197
+ doing this conversion.
2198
+ 27
2199
+
2200
+ E
2201
+ Fitted constants
2202
+ In this section we provide the constants αN, αE and Nc, together with the values of β and Ec derived
2203
+ using Lemma 1, for our fitted power laws for intrinsic performance I as given by equation (1). We
2204
+ also provide Imin and Imax, the minimum and maximum intrinsic performance obtained during the
2205
+ span of interaction counts considered; our model is not able to predict mean episode return outside
2206
+ this range. Recall that the units of I are parameters × interactions; the conversion to FLOPs may be
2207
+ performed using the values given in Appendix D.
2208
+ We also provide the derived equations for optimal model size N vs compute C in PF-days. By
2209
+ substituting equation (2) for the compute-efficient frontier into equation (1), these are given by
2210
+ N = Nc
2211
+
2212
+ 1 + αN
2213
+ αE
2214
+
2215
+ 1
2216
+ αN � C × 1015 × 24 × 3600
2217
+ FLOPs per param-interact
2218
+
2219
+ 1
2220
+ 1+ αN
2221
+ αE
2222
+ for
2223
+ Nmin ≤ N ≤ Nmax.
2224
+ We take Nmin and Nmax to be the minimum and maximum model sizes we tested whose power law
2225
+ fit intersects the compute-efficient frontier somewhere between Imin and Imax.
2226
+ For our comparison to generative modeling, we use these equations for optimal model size N vs
2227
+ compute C in PF-days:
2228
+ • Language [Hoffmann et al., 2022]: N = (
2229
+ C
2230
+ 1.4×10−18 )0.5
2231
+ • Language [Kaplan et al., 2020]: N = (
2232
+ C
2233
+ 3.3×10−13 )0.73
2234
+ • Image 32x32 [Henighan et al., 2020]: N = (
2235
+ C
2236
+ 1.6×10−13 )0.65
2237
+ Further fitted constants, such as for single seeds, for different spans of interaction counts (see Section
2238
+ 4.2), and fitted to natural performance metrics (see Section 4.5), may be found in this Colab notebook:
2239
+ https://colab.research.google.com/drive/1PzwZyXsi9jRdVCj1GJrS8JdOPBQ7LHZV.
2240
+ E.1
2241
+ Procgen, scaling width
2242
+ The fitted constants for our Procgen width-scaling experiments are as follows.
2243
+ Environment
2244
+ αN
2245
+ αE
2246
+ β
2247
+ Nc
2248
+ Ec
2249
+ Imin
2250
+ Imax
2251
+ CoinRun, easy
2252
+ 0.542
2253
+ 0.462
2254
+ 0.249
2255
+ 2.53 × 10−2
2256
+ 2.49 × 100
2257
+ 4.83 × 1010
2258
+ 2.55 × 1014
2259
+ CoinRun, hard
2260
+ 0.759
2261
+ 0.576
2262
+ 0.328
2263
+ 1.55 × 10−1
2264
+ 8.00 × 10−1
2265
+ 6.07 × 1010
2266
+ 3.45 × 1014
2267
+ StarPilot, easy
2268
+ 0.318
2269
+ 0.604
2270
+ 0.208
2271
+ 2.25 × 10−4
2272
+ 2.02 × 102
2273
+ 4.88 × 1010
2274
+ 1.95 × 1015
2275
+ StarPilot, hard
2276
+ 0.453
2277
+ 0.533
2278
+ 0.245
2279
+ 4.55 × 10−3
2280
+ 1.31 × 101
2281
+ 5.43 × 1010
2282
+ 1.09 × 1015
2283
+ FruitBot, easy
2284
+ 0.527
2285
+ 0.350
2286
+ 0.210
2287
+ 9.17 × 10−2
2288
+ 4.46 × 10−1
2289
+ 5.24 × 1010
2290
+ 1.67 × 1014
2291
+ FruitBot, hard
2292
+ 0.478
2293
+ 0.346
2294
+ 0.201
2295
+ 1.14 × 10−1
2296
+ 2.96 × 10−1
2297
+ 6.00 × 1010
2298
+ 7.26 × 1014
2299
+ These imply the following equations for optimal model size N vs compute C in PF-days.
2300
+ • CoinRun, easy: N = 4.615 × 106 × C0.4600 for 19408 ≤ N ≤ 310528
2301
+ • CoinRun, hard: N = 6.881 × 106 × C0.4315 for 43668 ≤ N ≤ 587092
2302
+ • StarPilot, easy: N = 6.383 × 107 × C0.6549 for 19408 ≤ N ≤ 4968448
2303
+ • StarPilot, hard: N = 1.668 × 107 × C0.5404 for 19408 ≤ N ≤ 1242112
2304
+ • FruitBot, easy: N = 2.243 × 106 × C0.3994 for 19408 ≤ N ≤ 174672
2305
+ • FruitBot, hard: N = 6.631 × 106 × C0.4201 for 43668 ≤ N ≤ 587092
2306
+ As discussed in Section 4.5, for CoinRun, we also fit power laws using the fail-to-success ratio F,
2307
+ excluding data for which F > 0.5. As explained in Section A.1, we replaced I−β with F
2308
+ Fc , where Fc
2309
+ is a fitted constant. The fitted constants for these power laws are as follows.
2310
+ 28
2311
+
2312
+ Difficulty
2313
+ αN
2314
+ αE
2315
+ β
2316
+ Nc
2317
+ Ec
2318
+ Imin
2319
+ Imax
2320
+ Easy
2321
+ 0.899
2322
+ 1.007
2323
+ 0.475
2324
+ 1.00 × 10−2
2325
+ 2.33 × 101
2326
+ 2.55 × 1010
2327
+ 2.60 × 1014
2328
+ Hard
2329
+ 0.833
2330
+ 0.776
2331
+ 0.402
2332
+ 4.69 × 10−2
2333
+ 3.80 × 100
2334
+ 5.14 × 1011
2335
+ 7.38 × 1014
2336
+ Difficulty
2337
+ Fc
2338
+ Easy
2339
+ 3.88 × 104
2340
+ Hard
2341
+ 2.52 × 104
2342
+ These imply the following relationships between I and F.
2343
+ • Easy: I = 4.57 × 109 × F −
2344
+ 1
2345
+ 0.475
2346
+ • Hard: I = 9.15 × 1010 × F −
2347
+ 1
2348
+ 0.402
2349
+ They also imply the following equations for optimal model size N vs compute C in PF-days.
2350
+ • Easy: N = 1.216 × 107 × C0.5285 for 19408 ≤ N ≤ 587092
2351
+ • Hard: N = 1.148 × 107 × C0.4822 for 77632 ≤ N ≤ 1242112
2352
+ E.2
2353
+ Procgen, scaling depth
2354
+ The fitted constants for our Procgen depth-scaling experiments are as follows.
2355
+ Environment
2356
+ αN
2357
+ αE
2358
+ β
2359
+ Nc
2360
+ Ec
2361
+ Imin
2362
+ Imax
2363
+ CoinRun, easy
2364
+ 0.351
2365
+ 0.469
2366
+ 0.201
2367
+ 2.64 × 10−4
2368
+ 1.26 × 102
2369
+ 5.43 × 109
2370
+ 3.72 × 1013
2371
+ CoinRun, hard
2372
+ 0.336
2373
+ 0.581
2374
+ 0.213
2375
+ 1.02 × 10−4
2376
+ 4.47 × 102
2377
+ 6.58 × 109
2378
+ 6.24 × 1013
2379
+ StarPilot, easy
2380
+ 0.800
2381
+ 0.821
2382
+ 0.405
2383
+ 9.65 × 10−3
2384
+ 1.87 × 101
2385
+ 1.70 × 1010
2386
+ 5.52 × 1013
2387
+ StarPilot, hard
2388
+ 0.380
2389
+ 0.381
2390
+ 0.190
2391
+ 2.87 × 10−3
2392
+ 9.11 × 100
2393
+ 1.58 × 1010
2394
+ 5.21 × 1013
2395
+ FruitBot, easy
2396
+ 0.539
2397
+ 0.564
2398
+ 0.276
2399
+ 2.92 × 10−3
2400
+ 2.77 × 101
2401
+ 9.58 × 109
2402
+ 3.76 × 1013
2403
+ FruitBot, hard
2404
+ 0.401
2405
+ 0.463
2406
+ 0.215
2407
+ 1.23 × 10−3
2408
+ 3.26 × 101
2409
+ 1.34 × 1010
2410
+ 4.64 × 1013
2411
+ These imply the following equations for optimal model size N vs compute C in PF-days. Note,
2412
+ however, that:
2413
+ • As discussed in Section 4.4, we exclude the final dense layer, which would have accounted
2414
+ for between 16% and 90% of the parameters, depending on the depth. This skews the
2415
+ leading constants here.
2416
+ • As discussed in Appendix D, we also ignored the variation in the number of FLOPs per
2417
+ param-interact between models of different depths, leading to errors of up to 40%.
2418
+ • CoinRun, easy: N = 1.390 × 106 × C0.5723 for 7128 ≤ N ≤ 43416
2419
+ • CoinRun, hard: N = 3.962 × 106 × C0.6337 for 7128 ≤ N ≤ 167832
2420
+ • StarPilot, easy: N = 2.202 × 106 × C0.5063 for 7128 ≤ N ≤ 167832
2421
+ • StarPilot, hard: N = 1.410 × 106 × C0.5007 for 7128 ≤ N ≤ 84888
2422
+ • FruitBot, easy: N = 1.172 × 106 × C0.5110 for 7128 ≤ N ≤ 84888
2423
+ • FruitBot, hard: N = 1.671 × 106 × C0.5359 for 7128 ≤ N ≤ 84888
2424
+ 29
2425
+
2426
+ E.3
2427
+ Dota 2
2428
+ As explained in Sections 4.5 and A.1, we fit power laws to I−β, Tce−αT T , Tc
2429
+
2430
+ e−αT T − eαT T ∗�
2431
+ and Tc (T ∗ − T)αT , where I is intrinsic performance, T is TrueSkill, and αT , Tc and T ∗ are fitted
2432
+ constants. The fitted constants for these different functional forms are as follows.
2433
+ Fit to
2434
+ αN
2435
+ αE
2436
+ β
2437
+ Nc
2438
+ Ec
2439
+ Imin
2440
+ Imax
2441
+ I−β
2442
+ 0.186
2443
+ 0.593
2444
+ 0.141
2445
+ 1.98 × 10−8
2446
+ 1.04 × 106
2447
+ 6.83 × 1011
2448
+ 1.79 × 1018
2449
+ Tce−αT T
2450
+ 0.180
2451
+ 0.486
2452
+ 0.131
2453
+ 3.53 × 10−8
2454
+ 3.33 × 105
2455
+ 4.62 × 1011
2456
+ 2.24 × 1017
2457
+ Tc(e−αT T − eαT T ∗)
2458
+ 0.181
2459
+ 0.560
2460
+ 0.137
2461
+ 2.07 × 10−8
2462
+ 8.32 × 105
2463
+ 6.31 × 1011
2464
+ 1.77 × 1018
2465
+ Tc(T ∗ − T)αT
2466
+ 0.183
2467
+ 0.569
2468
+ 0.138
2469
+ 2.06 × 10−8
2470
+ 8.82 × 1005
2471
+ 6.71 × 1011
2472
+ 1.23 × 1018
2473
+ Fit to
2474
+ αT
2475
+ Tc
2476
+ T ∗
2477
+ I−β
2478
+ -
2479
+ -
2480
+ -
2481
+ Tce−αT T
2482
+ 0.0572
2483
+ 2.16 × 10−2
2484
+ -
2485
+ Tc(e−αT T − eαT T ∗)
2486
+ 0.0402
2487
+ 2.40 × 10−2
2488
+ 35.43
2489
+ Tc(T ∗ − T)αT
2490
+ 2.84
2491
+ 2.14 × 10−7
2492
+ 54.01
2493
+ As discussed in Section 4.5, we have less confidence in the last two functional forms, which is
2494
+ reflected in the very different estimates for T ∗, which represents the maximum attainable TrueSkill
2495
+ for the family of models we trained.
2496
+ These imply the following relationships between I and T for the last three fits.
2497
+ • Tce−αT T :
2498
+ I = 4.93 × 1012 × 1.5462T
2499
+ • Tc(e−αT T − eαT T ∗):
2500
+ I = 6.49 × 1011 ×
2501
+
2502
+ 1.0410−T − 1.0410−35.43�−
2503
+ 1
2504
+ 0.137
2505
+ • Tc(T ∗ − T)αT :
2506
+ I = 1.48 × 1048 × (54.01 − T)− 2.84
2507
+ 0.138
2508
+ They also imply the following equations for optimal model size N vs compute C in PF-days.
2509
+ • I−β:
2510
+ N = 2.703 × 107 × C0.7617 for 512 ≤ N ≤ 2097152
2511
+ • Tce−αT T :
2512
+ N = 1.607 × 107 × C0.7302 for 512 ≤ N ≤ 524288
2513
+ • Tc(e−αT T − eαT T ∗):
2514
+ N = 2.305 × 107 × C0.7552 for 512 ≤ N ≤ 2097152
2515
+ • Tc(T ∗ − T)αT :
2516
+ N = 2.385 × 107 × C0.7567 for 512 ≤ N ≤ 2097152
2517
+ E.4
2518
+ MNIST
2519
+ The fitted constants for our MNIST experiments are as follows. As discussed in Section 4.3, these
2520
+ constants are for the late period of training (222–225 environment interactions). Recall also that the
2521
+ horizon h is such that the interval [0, h − 1] has the same center of mass as an exponentially-weighted
2522
+ moving average with decay parameter γ, i.e., γ = 1 −
2523
+ 2
2524
+ h+1.
2525
+ 30
2526
+
2527
+ Horizon
2528
+ αN
2529
+ αE
2530
+ β
2531
+ Nc
2532
+ Ec
2533
+ Imin
2534
+ Imax
2535
+ 1
2536
+ 0.263
2537
+ 1.050
2538
+ 0.210
2539
+ 9.79 × 10−6
2540
+ 9.43 × 103
2541
+ 1.79 × 1011
2542
+ 1.00 × 1015
2543
+ 2
2544
+ 0.265
2545
+ 0.979
2546
+ 0.208
2547
+ 1.32 × 10−5
2548
+ 6.30 × 103
2549
+ 1.87 × 1011
2550
+ 9.66 × 1014
2551
+ 4
2552
+ 0.284
2553
+ 0.791
2554
+ 0.209
2555
+ 4.21 × 10−5
2556
+ 1.50 × 103
2557
+ 1.94 × 1011
2558
+ 4.19 × 1014
2559
+ 8
2560
+ 0.276
2561
+ 0.826
2562
+ 0.207
2563
+ 2.83 × 10−5
2564
+ 2.33 × 103
2565
+ 1.80 × 1011
2566
+ 6.24 × 1014
2567
+ 16
2568
+ 0.252
2569
+ 0.830
2570
+ 0.193
2571
+ 1.59 × 10−5
2572
+ 3.78 × 103
2573
+ 1.62 × 1011
2574
+ 7.69 × 1014
2575
+ 32
2576
+ 0.263
2577
+ 0.856
2578
+ 0.201
2579
+ 1.73 × 10−5
2580
+ 3.83 × 103
2581
+ 1.59 × 1011
2582
+ 7.47 × 1014
2583
+ 64
2584
+ 0.307
2585
+ 0.736
2586
+ 0.217
2587
+ 7.27 × 10−5
2588
+ 8.40 × 102
2589
+ 1.64 × 1011
2590
+ 4.16 × 1014
2591
+ 128
2592
+ 0.315
2593
+ 0.769
2594
+ 0.224
2595
+ 6.27 × 10−5
2596
+ 1.08 × 103
2597
+ 1.45 × 1011
2598
+ 3.64 × 1014
2599
+ 192
2600
+ 0.330
2601
+ 0.688
2602
+ 0.223
2603
+ 1.22 × 10−4
2604
+ 4.86 × 102
2605
+ 1.33 × 1011
2606
+ 2.08 × 1014
2607
+ 256
2608
+ 0.358
2609
+ 0.681
2610
+ 0.235
2611
+ 2.11 × 10−4
2612
+ 3.04 × 102
2613
+ 1.33 × 1011
2614
+ 1.53 × 1014
2615
+ These imply the following equations for optimal model size N vs compute C in PF-days.
2616
+ • Horizon 1:
2617
+ N = 1.586 × 1010 × C0.7999 for 61700 ≤ N ≤ 15795200
2618
+ • Horizon 2:
2619
+ N = 1.309 × 1010 × C0.7871 for 61700 ≤ N ≤ 15795200
2620
+ • Horizon 4:
2621
+ N = 5.507 × 109 × C0.7357 for 61700 ≤ N ≤ 3948800
2622
+ • Horizon 8:
2623
+ N = 6.406 × 109 × C0.7493 for 61700 ≤ N ≤ 7739648
2624
+ • Horizon 16: N = 7.787 × 109 × C0.7671 for 61700 ≤ N ≤ 7739648
2625
+ • Horizon 32: N = 7.535 × 109 × C0.7652 for 61700 ≤ N ≤ 7739648
2626
+ • Horizon 64: N = 2.746 × 109 × C0.7053 for 61700 ≤ N ≤ 3948800
2627
+ • Horizon 128: N = 2.681 × 109 × C0.7092 for 61700 ≤ N ≤ 3948800
2628
+ • Horizon 192: N = 1.376 × 109 × C0.6757 for 61700 ≤ N ≤ 987200
2629
+ • Horizon 256: N = 9.876 × 108 × C0.6553 for 61700 ≤ N ≤ 987200
2630
+ 31
2631
+
2632
+ F
2633
+ Proof of the lemma
2634
+ Proof of Lemma 1. We may write I (N, E) as a function of N and compute C := NE:
2635
+ I (N, C)−β =
2636
+ �Nc
2637
+ N
2638
+ �αN
2639
+ +
2640
+ �EcN
2641
+ C
2642
+ �αE
2643
+ .
2644
+ The compute-efficient frontier is defined by the value of N that maximizes I (N, C) for each C.
2645
+ Equivalently, since β > 0, this value of N minimizes I (N, C)−β, and so it satisfies
2646
+
2647
+ ∂N
2648
+
2649
+ I (N, C)−β�
2650
+ = 0.
2651
+ Differentiating and multiplying through by N, this equation becomes
2652
+ −αN
2653
+ �Nc
2654
+ N
2655
+ �αN
2656
+ + αE
2657
+ �EcN
2658
+ C
2659
+ �αE
2660
+ = 0.
2661
+ Eliminating C, this is exactly equation (2), as required.
2662
+ By assumption, we also have I (N, E) = NE along the compute-efficient frontier. Substituting (2)
2663
+ into I (N, E), this equation becomes
2664
+
2665
+ 1 + αN
2666
+ αE
2667
+ � �Nc
2668
+ N
2669
+ �αN
2670
+ = (NE)−β .
2671
+ (3)
2672
+ Thus both equations (2) and (3) are power law relationships between N and E that hold along the
2673
+ compute-efficient frontier, so we may simply equate exponents and constants. Equating exponents,
2674
+ αN
2675
+ αE
2676
+ = αN
2677
+ β − 1
2678
+ and hence
2679
+ 1
2680
+ β =
2681
+ 1
2682
+ αN
2683
+ + 1
2684
+ αE
2685
+ ,
2686
+ as required. Equating constants,
2687
+ �αN
2688
+ αE
2689
+
2690
+ 1
2691
+ αE N
2692
+ αN
2693
+ αE
2694
+ c
2695
+ E−1
2696
+ c
2697
+ =
2698
+
2699
+ 1 + αN
2700
+ αE
2701
+ � 1
2702
+ β
2703
+ N
2704
+ αN
2705
+ β
2706
+ c
2707
+ ,
2708
+ and hence
2709
+ 1
2710
+ NcEc
2711
+ =
2712
+
2713
+ 1 + αN
2714
+ αE
2715
+
2716
+ 1
2717
+ αN +
2718
+ 1
2719
+ αE � αE
2720
+ αN
2721
+
2722
+ 1
2723
+ αE =
2724
+
2725
+ 1 + αN
2726
+ αE
2727
+
2728
+ 1
2729
+ αN �
2730
+ 1 + αE
2731
+ αN
2732
+
2733
+ 1
2734
+ αE ,
2735
+ as required.
2736
+ 32
2737
+
2738
+ G
2739
+ Proof sketch of the proposition
2740
+ A formal statement and proof of Proposition 1 would require a formal analysis of Vanilla Policy
2741
+ Gradient, which is beyond the scope of this work. Instead, we provide a proof sketch in which we
2742
+ make approximations informally.
2743
+ Proof sketch of Proposition 1. The horizon length h only affects the algorithm via GAE, which in
2744
+ the case λ = 1 produces the value function targets and advantage estimates
2745
+ ˆVt := rt + γrt+1 + · · · + γT −trT = rt + γ ˆVt+1
2746
+ and
2747
+ ˆAt := ˆVt − V (st) = rt − V (st) + γ ˆVt+1,
2748
+ where V is the value function. Since timesteps are independent, γ ˆVt+1 is independent of st and at,
2749
+ and so should be thought of as noise. The value function will quickly learn to incorporate the mean
2750
+ of this noise, and so
2751
+ V (st) ≈ V 0 (st) + E
2752
+
2753
+ γ ˆVt+1
2754
+
2755
+ ,
2756
+ where V 0 (st) is the “immediate reward value function” that would have been obtained had we
2757
+ used the value function targets ˆV 0
2758
+ t := ˆVt − E
2759
+
2760
+ γ ˆVt+1
2761
+
2762
+ . Writing ϵ := γ ˆVt+1 − E
2763
+
2764
+ γ ˆVt+1
2765
+
2766
+ for the
2767
+ zero-mean component of γ ˆVt+1, we obtain
2768
+ ˆV 0
2769
+ t = rt + ϵ
2770
+ and
2771
+ ˆAt ≈ rt − V 0 (st) + ϵ.
2772
+ In other words, the entire impact of varying h is that it changes the variance of the noise term ϵ added
2773
+ to the value function targets and advantage estimates.
2774
+ Let us now analyze the policy gradient, which equals
2775
+ ˆEt
2776
+
2777
+ ∇θρt (θ) ˆAt
2778
+
2779
+ ≈ ˆEt
2780
+
2781
+ ∇θρt (θ)
2782
+
2783
+ rt − V 0 (st) + ϵ
2784
+ ��
2785
+ ,
2786
+ where ρt (θ) :=
2787
+ πθ(at|st)
2788
+ πθold(at|st). Since ϵ is independent of st and at and E [ϵ] = 0, the covariance matrix
2789
+ of this decomposes as
2790
+ Σθ + ΦθVar [ϵ] ,
2791
+ where Σθ is the covariance matrix of ∇θρt (θ)
2792
+
2793
+ rt − V 0 (st)
2794
+
2795
+ , and Φθ := E
2796
+
2797
+ ∇θρt (θ) ∇T
2798
+ θ ρt (θ)
2799
+
2800
+ is
2801
+ the uncentered covariance matrix of ∇θρt (θ).
2802
+ Note that V 0 (st) simply estimates E [rt], which does not depend on h. The variance of V 0 (st) does
2803
+ depend on h via the addition of ϵ to the value function targets, but this additional variance is small
2804
+ compared to the variance of ϵ itself. We may therefore treat Σθ as approximately independent of h.
2805
+ It remains to express Var [ϵ] in terms of h. We assume that T is large enough compared to h that we
2806
+ may take T → ∞. (In our experiments, we use rollouts of length 512 and h ≤ 256.) Thus
2807
+ Var [ϵ] = Var
2808
+
2809
+ γ ˆVt+1
2810
+
2811
+ =
2812
+
2813
+ γ2 + γ4 + γ6 + . . .
2814
+
2815
+ Var [rt]
2816
+ =
2817
+ γ2
2818
+ 1 − γ2 Var [rt]
2819
+ = 1
2820
+ 4
2821
+
2822
+ h + 1
2823
+ h − 2
2824
+
2825
+ Var [rt] .
2826
+ Hence the covariance matrix of the policy gradient is approximately
2827
+ Σθ + Πθ
2828
+
2829
+ h + 1
2830
+ h − 2
2831
+
2832
+ ,
2833
+ where Σθ and Πθ := 1
2834
+ 4Var [rt] Φθ are symmetric positive semi-definite matrices that do not depend
2835
+ on h, as required.
2836
+ 33
2837
+
-9FQT4oBgHgl3EQf7Ta5/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
-NFLT4oBgHgl3EQfuy_h/content/tmp_files/2301.12157v1.pdf.txt ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Simple Realistic Model of Spin Reorientation in 4f-3d
2
+ Compounds
3
+ Alexander Moskvin*, Evgenii Vasinovich, Anton Shadrin
4
+ Ural Federal University, Ekaterinburg, Russia
5
+ Abstract: Spin reorientation is an important phenomenon of rare-earth perovskites, orthoferrites
6
+ and orthochromites. In this study, we consider a simple but realistic microscopic theory of the
7
+ spontaneous spin-reorientation transitions induced by the 4f-3d interaction, more specifically, the
8
+ interaction of the main Kramers doublet or non-Kramers quasi-doublet of the 4f ion with an effective
9
+ magnetic field induced by the 3d sublattice. The obtained results indicate that the cause of both the
10
+ temperature and the character of the spin-reorientation transition is a competition between the second
11
+ and fourth order spin anisotropy of the 3d sublattice, the crystal field for 4f ions, and the 4f-3d
12
+ interaction.
13
+ Keywords: 4f-3d interaction; (quasi)doublets; spin reorientation
14
+ 1 Introduction
15
+ Rare-earth orthorhombic perovskites, orthoferrites RFeO3 and orthochromites RCrO3 (where
16
+ R is a rare-earth ion and yttrium), exhibit many important features such as weak ferro- and
17
+ antiferromagnetism, magnetization reversal, anomalous circular magnetooptics, and the phenomenon
18
+ of the spontaneous spin reorientation. The spin reorientation (SR) is one of their unique properties
19
+ that have attracted a lot of attention back in the 70s of the last century [1, 2], though their exact
20
+ microscopic origin is still a challenge to theorists and experimentalists.
21
+ The revival of interest in the mechanism of the spontaneous spin reorientation and
22
+ magnetic compensation in rare-earth perovskites in recent years is related with the discovery
23
+ of the magnetoelectric and the exchange bias effect, which can have a direct application in
24
+ magnetoelectronics. Along with the emergence of new experimental studies (see, e.g., Refs. [3, 4]),
25
+ there also appeared theoretical works claiming to modify the mean-field theory of the spontaneous
26
+ spin-reorientation transitions [5] or to scrutinize the microscopic mechanism responsible for
27
+ spin reorientations and magnetization reversal [6]. In fact, these results are not directly related
28
+ to the microscopic theory of the spontaneous spin reorientation in rare-earth orthoferrites and
29
+ orthochromites. For instance, the authors of the most recent paper [6] did not take into account
30
31
+ 1
32
+ arXiv:2301.12157v1 [cond-mat.str-el] 28 Jan 2023
33
+
34
+ a number of interactions, such as the fourth-order anisotropy for the 3𝑑 sublattice of orthoferrites
35
+ and the crystal field for 𝑅-ions, which play a fundamental role in determining the spontaneous
36
+ spin reorientation. The spin anisotropy of the second order in the 3𝑑 sublattice of orthorhombic
37
+ orthoferrites and orthochromites is generally not reduced to an effective uniaxial form as adopted in
38
+ Ref. [6]. Furthermore, the density functional theory does not allow in principle to give an adequate
39
+ description of such effects of higher orders of perturbation theory as spin anisotropy or antisymmetric
40
+ exchange [7].
41
+ In this paper, we present the results of a simple but realistic microscopic model of the spontaneous
42
+ spin reorientation in rare-earth orthoferrites and orthochromites, which takes into account all the main
43
+ relevant interactions. This model was developed back in the 80s of the last century [8], but has not
44
+ been published until now.
45
+ 2 Model formulation
46
+ The most popular examples of systems with the spontaneous SR transitions are magnets based
47
+ on 3𝑑 and 4𝑓 elements such as rare-earth orthoferrites RFeO3, orthochromites RCrO3, intermetallic
48
+ compounds RCo5, RFe2 etc. In all cases, an important cause of the spontaneous SR is the 4𝑓 − 3𝑑
49
+ interaction. Usually this interaction is taken into account by introducing an effective field of the
50
+ magnetically ordered 3𝑑 sublattice acting on the 4𝑓 ions.
51
+ To consider the contribution of the rare-earth sublattice to the free energy at low temperatures, we
52
+ are developing a model which takes into account either the well isolated lower Kramers doublet of the
53
+ 4𝑓 ions (with an odd number of the 4𝑓 electrons) or the well isolated two lower Stark sublevels with
54
+ close energies that form a quasi-doublet.
55
+ Within the framework of such “single-doublet” approximation we consider the spontaneous SR
56
+ transition in orthorhombic weak ferromagnets RFeO3 and RCrO3, where the free energy per ion can
57
+ be represented as follows
58
+ Φ(𝜃) = 𝐾1 cos 2𝜃 + 𝐾2 cos 4𝜃 − 𝑘𝑇 ln 2 cosh ∆(𝜃)
59
+ 2𝑘𝑇 ,
60
+ (1)
61
+ where 𝐾1, 𝐾2 are the first and second anisotropy constants of the 3𝑑 sublattice, which are temperature
62
+ independent (at least in the SR region), 𝜃 is the orientation angle of the antiferromagnetic, or N´eel
63
+ vector G of the 3𝑑 sublattice (e.g. in the 𝑎𝑐 plane), and ∆(𝜃) is the lower doublet (quasi-doublet)
64
+ splitting of the 4𝑓 ion in a magnetic field induced by the 3𝑑 sublattice.
65
+ Theoretical estimations [8–10] of the different contributions to the first constants of the magnetic
66
+ anisotropy for orthoferrites RFeO3 point to a competition of several main mechanisms with relatively
67
+ regular (Dzyaloshinskii-Moriya (DM) coupling, magnetodipole interaction) or irregular (single-ion
68
+ anisotropy, SIA) dependence on the type of R-ion. For instance, the microscopic theory predicts an
69
+ unexpectedly strong increase in values of the constant 𝐾1(𝑎𝑐) for LuFeO3 as compared with YFeO3.
70
+ The SIA contribution to 𝐾1(𝑎𝑐) partially compensates for the large contribution of the DM interaction
71
+ in YFeO3, whereas in LuFeO3, they add up. This result is confirmed by experimental data on the
72
+ 2
73
+
74
+ measurement of the threshold field 𝐻𝑆𝑅 of spin reorientation Γ4 → Γ2 (𝐺𝑥 → 𝐺𝑧) in the orthoferrite
75
+ Lu0.5Y0.5FeO3, in which 𝐻𝑆𝑅 = 15 T as compared to 𝐻𝑆𝑅 = 7.5 T in YFeO3 [10]. Thus, one can
76
+ estimate 𝐾1(𝑎𝑐) in LuFeO3 as around three times as much as 𝐾1(𝑎𝑐) in YFeO3.
77
+ Let us pay attention to recent works on the determination of the parameters of the spin Hamiltonian
78
+ in YFeO3 from measurements of the spin-wave spectrum by the inelastic neutron scattering [11,
79
+ 12] and terahertz absorption spectroscopy [13]. However, these authors started with a simplified
80
+ spin-Hamiltonian that took into account only Heisenberg exchange, DM interaction, and single-
81
+ ion anisotropy. Obviously, disregarding the magnetic dipole and exchange-relativistic anisotropy, the
82
+ “single-ion anisotropy” constants found by the authors are some effective quantities that are not directly
83
+ related to the SIA.
84
+ Unfortunately, despite numerous, including fairly recent, studies of the magnetic anisotropy of
85
+ orthoferrites, we do not have reliable experimental data on the magnitude of the contributions of
86
+ various anisotropy mechanisms.
87
+ As shown by theoretical calculations [8,9,14] the constants 𝐾2 of the fourth order spin anisotropy
88
+ rather smoothly decrease in absolute value, changing by no more than two times on going from La to
89
+ Lu. But the most interesting was the conclusion about the different signs of these constants, positive
90
+ for the 𝑎𝑐 and 𝑏𝑐 planes and negative for the 𝑎𝑏 plane, thus indicating a different character of spin-
91
+ reorientation transitions in the corresponding planes, i.e., second-order transitions in the 𝑎𝑐 and 𝑏𝑐
92
+ planes and first-order transitions in the 𝑎𝑏 plane [2]. Indeed, all currently known spin-reorientation
93
+ transitions of the Γ4 −Γ2 (𝐺𝑥 −𝐺𝑧) type in orthoferrites RFeO3 (R = Pr, Nd, Sm, Tb, Ho, Er, Tm, Yb)
94
+ are smooth, with two characteristic temperatures of the second-order phase transitions to be a start
95
+ and finish of the spin reorientation, and the only known jump-like first order SR transition for these
96
+ crystals is the SR transition Γ4 − Γ1 (𝐺𝑥 − 𝐺𝑦) in the 𝑎𝑏 plane in DyFeO3 [2]. A unique example
97
+ that confirms the conclusions about the sign of the second anisotropy constant is a mixed orthoferrite
98
+ Ho0.5Dy0.5FeO3 [2] in which two spin-reorientation transitions 𝐺𝑥 − 𝐺𝑦 (𝑇 = 46 K) and 𝐺𝑦 − 𝐺𝑧
99
+ (18 ÷ 24 K) are realized through one phase transition of the first order in the 𝑎𝑏 plane and two phase
100
+ transitions of the second order in the 𝑏𝑐 plane, respectively.
101
+ The splitting value ∆(𝜃) for the Kramers doublet in a magnetic field H has the well-known form
102
+ ∆(𝜃) = 𝜇𝐵
103
+ [︀
104
+ (𝑔𝑥𝑥𝐻𝑥 + 𝑔𝑥𝑦𝐻𝑦)2 + (𝑔𝑥𝑦𝐻𝑥 + 𝑔𝑦𝑦𝐻𝑦)2 + 𝑔2
105
+ 𝑧𝑧𝐻2
106
+ 𝑧
107
+ ]︀1/2 ,
108
+ (2)
109
+ where it is taken into account that for the 4𝑓 ions in RFeO3 the ˆ𝑔-tensor (with the local symmetry 𝐶𝑠)
110
+ has the form
111
+ ˆ𝑔 =
112
+
113
+
114
+
115
+ 𝑔𝑥𝑥
116
+ 𝑔𝑥𝑦
117
+ 0
118
+ 𝑔𝑥𝑦
119
+ 𝑔𝑦𝑦
120
+ 0
121
+ 0
122
+ 0
123
+ 𝑔𝑧𝑧
124
+
125
+
126
+ ⎠ .
127
+ (3)
128
+ The effective field H for the SR transition 𝐺𝑥 → 𝐺𝑧 in the 𝑎𝑐 plane can be represented as follows
129
+ 𝐻𝑥 = 𝐻(0)
130
+ 𝑥 cos 𝜃, 𝐻𝑦 = 𝐻(0)
131
+ 𝑦
132
+ cos 𝜃, 𝐻𝑧 = 𝐻(0)
133
+ 𝑧
134
+ sin 𝜃,
135
+ (4)
136
+ 3
137
+
138
+ so in the absence of an external magnetic field, for ∆(𝜃) we have the rather simple expression:
139
+ ∆(𝜃) =
140
+ (︂∆2
141
+ 𝑎 − ∆2
142
+ 𝑐
143
+ 2
144
+ cos 2𝜃 + ∆2
145
+ 𝑎 + ∆2
146
+ 𝑐
147
+ 2
148
+ )︂1/2
149
+ ,
150
+ (5)
151
+ where ∆𝑎,𝑐 are the doublet splitting for the cases of 𝜃 = 0 (𝐺𝑧-phase) and 𝜃 = 𝜋/2 (𝐺𝑥-phase)
152
+ respectively. The dependence ∆(𝜃) from (5) is also valid in the case of quasi-doublet.
153
+ A contribution of splitting ∆ to the free energy Φ(𝜃) for the rare-earth sublattice is usually
154
+ considered in the “high-temperature” approximation, when 𝑘𝑇 ≫ ∆ and the influence of the 4𝑓
155
+ sublattice are reduced only to renormalization of the first anisotropy constant 𝐾1:
156
+ 𝐾*
157
+ 1 = 𝐾1
158
+ (︂
159
+ 1 − 1
160
+ 𝜏
161
+ )︂
162
+ ,
163
+ (6)
164
+ where 𝜏 = 𝑇/𝑇𝑆𝑅 is the reduced temperature and 𝑇𝑆𝑅 = (∆2
165
+ 𝑎 − ∆2
166
+ 𝑐)/16𝑘𝐾1 is the characteristic
167
+ transition temperature.
168
+ Below we will consider a specific situation when 𝐾1 > 0 and ∆𝑎 > ∆𝑐, i.e. when the configuration
169
+ 𝐺𝑥 (𝜃 = 𝜋/2) is realized at high temperatures and a decrease in temperature can lead to the spin
170
+ reorientation 𝐺𝑥 → 𝐺𝑧 or 𝐺𝑥 → 𝐺𝑥𝑧 (transition to an angular spin structure). The type of the phase
171
+ transition of the spin reorientation in the “high-temperature” approximation is determined by the sign
172
+ of the second constant 𝐾2: at 𝐾2 < 0 it will be realized by one first-order phase transition at 𝑇 = 𝑇𝑆𝑅,
173
+ i.e. 𝜏 = 1, or at 𝐾2 > 0 by two second-order phase transitions at 𝜏𝑠 = (1 + 𝛾)−1 and 𝜏𝑓 = (1 − 𝛾)−1,
174
+ where 𝜏𝑠 and 𝜏𝑓 are the reduced temperatures of the beginning and end of the SR phase transition and
175
+ 𝛾 = 4𝐾2/𝐾1.
176
+ 3 Analysis of the “single-doublet” model
177
+ A behavior of a system described by the free energy (1) can be analyzed rigorously. The condition
178
+ 𝜕Φ/𝜕𝜃 = 0 reduces in this case to two equations:
179
+ sin 2𝜃 = 0,
180
+ (7)
181
+ 𝛼𝜇 + 𝛽𝜇3 = tanh 𝜇
182
+ 𝜏 ;
183
+ (8)
184
+ where the following notations are introduced:
185
+ 𝛼 = 1 − 𝛾 ∆2
186
+ 𝑎 + ∆2
187
+ 𝑐
188
+ ∆2
189
+ 𝑎 − ∆2
190
+ 𝑐
191
+ , 𝛽 =
192
+ 2𝛾
193
+ 𝜇2
194
+ 𝑓 − 𝜇2
195
+ 𝑠
196
+ , 𝜇 = ∆(𝜃)
197
+ 2𝑘𝑇𝑆𝑅
198
+ , 𝜇𝑠 =
199
+ ∆𝑐
200
+ 2𝑘𝑇𝑆𝑅
201
+ , 𝜇𝑓 =
202
+ ∆𝑎
203
+ 2𝑘𝑇𝑆𝑅
204
+ .
205
+ (9)
206
+ This corresponds to three possible magnetic configurations:
207
+ • The configuration 𝐺𝑥: 𝜃 = ±𝜋/2, stable at tanh 𝜇𝑠/𝜏 ≤ 𝛼𝜇𝑠 + 𝛽𝜇3
208
+ 𝑠 .
209
+ • The configuration 𝐺𝑧: 𝜃 = 0, 𝜋, stable at tanh 𝜇𝑓/𝜏 ≥ 𝛼𝜇𝑓 + 𝛽𝜇3
210
+ 𝑓 .
211
+ 4
212
+
213
+ • The angular configuration 𝐺𝑥𝑧: the temperature dependence of 𝜃(𝜏) is determined by solving
214
+ the equation (8) (see Figure 1), the state is stable at 𝜕𝜇/𝜕𝜏 ≤ 0.
215
+ The peculiar 𝜇-𝜏 phase diagram which represents solutions of the master equation (8) given a fixed
216
+ value of the 𝛼 parameter and different value of the 𝛽 parameter is shown in Figure 1, where areas
217
+ with different character of the SR transition are highlighted in different colors. For the solutions in
218
+ the FO region, the SR goes through one first-order phase transition, in the SO region we arrive at one
219
+ or two second-order phase transitions, in the MO1,2 regions we arrive at a “mixture” of the first and
220
+ second-order phase transitions. All the lines 𝜇(𝜏) on the right side converge to
221
+ √︀
222
+ |𝛼/𝛽| at 𝜏 → ∞; on
223
+ the left side, when 𝜏 → 0 the branch point 𝜇 =
224
+ 3
225
+ 2𝛼 is obtained at 𝛽 = − 4
226
+ 27𝛼3, and the point 𝜇 = 1/𝛼
227
+ at 𝛽 = 0; all the solutions, where 𝜇 can reach zero, converge to 𝜏 = 1/𝛼.
228
+ 0
229
+ 1/α
230
+ τ
231
+ 1/α
232
+ 3
233
+ 2 α
234
+ μ
235
+ α / β1
236
+ α / β2
237
+ α / β3
238
+ FO
239
+ MO1
240
+ MO2
241
+ SO
242
+ Fig. 1: (Color online) The peculiar 𝜇-𝜏 phase diagram which represents solutions of the master
243
+ equation (8) given a fixed value of the 𝛼 parameter and different value of the 𝛽 parameter (see text for
244
+ detail).
245
+ The character of the SR transition will be determined by the form of the solution of the equation
246
+ (8) in the region 𝜇𝑠 ≤ 𝜇 ≤ 𝜇𝑓. Let us analyze this equation starting with the simplest case 𝐾2 = 0,
247
+ i.e. 𝛼 = 1, 𝛽 = 0. In this case, the main equation transforms into the molecular field equation well
248
+ known in the basic theory of ferromagnetism:
249
+ 𝜇 = tanh 𝜇
250
+ 𝜏 = 𝐵 1
251
+ 2
252
+ (︁𝜇
253
+ 𝜏
254
+ )︁
255
+ ,
256
+ (10)
257
+ where 𝐵1/2(𝑥) is the Brillouin function. The equation has only one non-trivial solution at 0 ≤ 𝜏 ≤ 1,
258
+ 0 ≤ 𝜇 ≤ 1, and the function 𝜇(𝜏) has the usual “Weiss” form. Thus, with the absence of the
259
+ cubic anisotropy (𝐾2 = 0) in the “single-doublet” model the SR will be realized either through two
260
+ second-order phase transitions at 𝜇𝑓 ≤ 1 (the complete spin-reorientation 𝐺𝑥 → 𝐺𝑧), or through one
261
+ second-order phase transition at 𝜇𝑓 > 1, but in this case the SR will be incomplete, i.e. it will end
262
+ with a transition to the angular spin structure 𝐺𝑥𝑧. The spin reorientation will begin at a temperature
263
+ 5
264
+
265
+ 𝑇𝑠 ≤ 𝑇𝑆𝑅 and 𝑇𝑠 is equal to 𝑇𝑆𝑅 only in the case 𝜇𝑠 = 0 (∆𝑐 = 0), which can be realized in the general
266
+ case only for Ising ions (e.g. Dy3+ in DyFeO3). For this type of ions, the temperature dependence of
267
+ the “order parameter” 𝜇 (in fact the splitting ∆(𝜃) of the doublet) in a close range of 𝑇𝑆𝑅 will be very
268
+ sharp: 𝜇(𝑇) ∼ (𝑇 − 𝑇𝑆𝑅)−1/2. Nevertheless, the SR will be continuous and the temperature range of
269
+ the SR ∆𝑇 = 𝑇𝑠 − 𝑇𝑓 at 𝜇 ≪ 1 can theoretically reach arbitrarily small values.
270
+ Thus, the results of the rigorous analysis of the “single-doublet” model are fundamentally different
271
+ from the conclusions of the simplified model (the “high-temperature” approximation), according to
272
+ which for 𝐾2 = 0 the spin reorientation always occurs as the first-order phase transition at 𝑇 = 𝑇𝑆𝑅.
273
+ For a positive second anisotropy constant (𝐾2 > 0, 𝛽 > 0), the main equation (8) has one non-
274
+ trivial solution in the region 0 ≤ 𝜏 ≤ 1/𝛼, 0 ≤ 𝜇 ≤ 𝜇0 at 𝛼 > 0, and one in the region 0 ≤ 𝜏 ≤ ∞,
275
+ √︀
276
+ |𝛼/𝛽| ≤ 𝜇 ≤ 𝜇0 at 𝛼 ≤ 0, where 𝜇0 is determined from the solution of the equation 𝛼𝜇0 +𝛽𝜇3
277
+ 0 = 1.
278
+ The situation in this case is very similar to the previous one, i.e. the beginning of the SR will always
279
+ be a second-order phase transition, and the reorientation will be complete (𝐺𝑥 → 𝐺𝑧) or incomplete
280
+ (𝐺𝑥 → 𝐺𝑥𝑧). Note that under the condition (𝜇2
281
+ 𝑓 − 𝜇2
282
+ 𝑠)/(𝜇2
283
+ 𝑓 + 𝜇2
284
+ 𝑠) ≥ 𝛾, i.e. 𝛼 ≤ 0, the width of the
285
+ reorientation region becomes very large, even if 𝜇𝑠 differs slightly from 𝜇𝑓.
286
+ For Ising ions at ∆𝑐 = 0, the SR beginning temperature is determined in exactly the same way as
287
+ in the “high-temperature” approximation 𝑇𝑠 = 𝑇𝑆𝑅/(1 + 𝛾).
288
+ For a negative second anisotropy constant (𝐾2 < 0, 𝛽 < 0), the several fundamentally different
289
+ solutions of the main equation (8) are possible. For 𝐾*
290
+ 2 ≥ 𝐾2, where 𝐾*
291
+ 2 is determined from the
292
+ condition 𝛽 = − 1
293
+ 3𝛼3, i.e.
294
+ 2𝛾
295
+ 𝜇2
296
+ 𝑓 − 𝜇2
297
+ 𝑠
298
+ = −1
299
+ 3
300
+ (︃
301
+ 1 − 𝛾 𝜇2
302
+ 𝑓 + 𝜇2
303
+ 𝑠
304
+ 𝜇2
305
+ 𝑓 − 𝜇2
306
+ 𝑠
307
+ )︃3
308
+ ,
309
+ (11)
310
+ there is one non-trivial solution of the equation (8) in the region 1/𝛼 ≤ 𝜏 < ∞, 𝜇 ≤
311
+ √︀
312
+ 𝛼/𝛽, but
313
+ here 𝜇(𝑇) decreases with decreasing temperature, i.e. 𝜕𝜇/𝜕𝜏 > 0. This solution is unstable and there
314
+ is no fundamental possibility for a smooth rotation of spins, the SR is always realized through the
315
+ first-order phase transition.
316
+ In the intermediate range of values 𝐾2 (𝐾*
317
+ 2 < 𝐾2 < 0 or − 1
318
+ 3𝛼3 < 𝛽 < 0) the main equation
319
+ has two non-trivial solutions, and for one of them 𝜕𝜇/𝜕𝜏 > 0 (corresponding to bigger values of 𝜇),
320
+ and for the second 𝜕𝜇/𝜕𝜏 < 0 (corresponding to smaller values of 𝜇). It is convenient to consider
321
+ separately three areas of variation 𝛽.
322
+ 1. − 4
323
+ 27𝛼3 < 𝛽 < 0:
324
+ a) the first solution: 0 ≤ 𝜏 < ∞, 𝜇> ≤ 𝜇 <
325
+ √︀
326
+ |𝛼/𝛽|,
327
+ b) the second solution: 0 ≤ 𝜏 ≤ 1/𝛼, 0 ≤ 𝜇 ≤ 𝜇<,
328
+ where 𝜇>, 𝜇< are the bigger and smaller positive solution of the equation 𝛼𝜇 + 𝛽𝜇3 = 1.
329
+ 2. 𝛽 = − 4
330
+ 27𝛼3:
331
+ a) the first solution: 0 ≤ 𝜏 < ∞, 3/(2𝛼) ≤ 𝜇 <
332
+ √︀
333
+ |𝛼/𝛽|,
334
+ b) the second solution: 0 ≤ 𝜏 ≤ 1/𝛼, 0 ≤ 𝜇 ≤ 3/(2𝛼),
335
+ moreover, in this case we have a branch point of the main equation solution at 𝜏 = 0, 𝜇 = 1.
336
+ 3. − 1
337
+ 3𝛼3 < 𝛽 < − 4
338
+ 27𝛼3:
339
+ a) the first solution: 𝜏0 ≤ 𝜏 < ∞, 𝜇0 ≤ 𝜇 <
340
+ √︀
341
+ |𝛼/𝛽|,
342
+ 6
343
+
344
+ b) the second solution: 𝜏0 ≤ 𝜏 ≤ 1/𝛼, 0 ≤ 𝜇 ≤ 𝜇0,
345
+ where the quantities 𝜇0, 𝜏0 correspond to the branch points of the main equation solutions.
346
+ Illustrations of typical (a,b) and unconventional (c,d) SR transitions predicted by simple
347
+ (quasi)doublet model are shown in Figure 2. The Figure 2a, built with 𝐾1 = 1, 𝛾 = 0.05, ∆𝑎 =
348
+ 30.84, ∆𝑐 = 14.82, which corresponds to 𝑇𝑆𝑅 = 45.73, 𝜇𝑠 = 0.162, 𝜇𝑓 = 0.337, 𝜏𝑠 = 1.04, 𝜏𝑓 =
349
+ 0.91, describes a typical smooth SR transition with two second-order phase transitions 𝐺𝑥 − 𝐺𝑥𝑧 at
350
+ the beginning (𝜏𝑠) and 𝐺𝑥𝑧 − 𝐺𝑧 at the end (𝜏𝑓) of the spin reorientation.
351
+ The Figure 2b, built with 𝐾1 = 1, 𝛾 = −0.1, ∆𝑎 = 33.19, ∆𝑐 = 27.1, which corresponds to
352
+ 𝑇𝑆𝑅 = 22.95, 𝜇𝑠 = 0.59, 𝜇𝑓 = 0.72, 𝜏𝑠 = 0.762, 𝜏𝑓 = 0.93, describes an abrupt first-order SR
353
+ transition. For 𝜏 > 𝜏𝑓 there is the 𝐺𝑥-phase, which can remain stable up to 𝜏𝑠 when cooled. For 𝜏 < 𝜏𝑠
354
+ there is the 𝐺𝑧-phase, which can remain stable up to 𝜏𝑓 when heated. The point 𝐴 marks a phase
355
+ transition point when the phases 𝐺𝑥 and 𝐺𝑧 have equal energies.
356
+ The Figure 2c, built with 𝐾1 = 1, 𝛾 = −0.222, ∆𝑎 = 6.72, ∆𝑐 = 1.63, which corresponds
357
+ to 𝑇𝑆𝑅 = 2.65, 𝜇𝑠 = 0.307, 𝜇𝑓 = 1.266, 𝜏𝑠 = 0.778, 𝜏𝑓 = 0.523 and the Figure 2d, built with
358
+ 𝐾1 = 1, 𝛾 = −0.25, ∆𝑎 = 6.71, ∆𝑐 = 2.02, which corresponds to 𝑇𝑆𝑅 = 2.56, 𝜇𝑠 = 0.396, 𝜇𝑓 =
359
+ 1.31, 𝜏𝑠 = 0.73, 𝜏𝑓 = 0.545 describe unconventional "mixed"SR transitions. At 𝜏𝑠 there is the smooth
360
+ second-order phase transition 𝐺𝑥 − 𝐺𝑥𝑧. At 𝜏 ≤ 𝜏𝑓 we have two stable phases 𝐺𝑧 and 𝐺𝑥𝑧: at those
361
+ temperatures the sharp first-order phase transition 𝐺𝑥𝑧 − 𝐺𝑧 can happen, or the system could stay in
362
+ the angular 𝐺𝑥𝑧-phase.
363
+ (a)
364
+ τ
365
+ τs
366
+ τf
367
+ μ
368
+ μs
369
+ μf
370
+ (c)
371
+ τ
372
+ τs
373
+ τf
374
+ μ
375
+ μs
376
+ μf
377
+ (d)
378
+ τ
379
+ τs
380
+ τf
381
+ μ
382
+ μs
383
+ μf
384
+ θ
385
+ Φ
386
+ θ
387
+ Φ
388
+ θ
389
+ Φ
390
+ τ > τf
391
+ τ < τs
392
+ A
393
+ (b)
394
+ τ
395
+ τs
396
+ τf
397
+ μ
398
+ μs
399
+ μf
400
+ A
401
+ Fig. 2: Illustrations of typical (a,b) and unconventional (c,d) SR transitions predicted by simple
402
+ (quasi)doublet model (see text for detail). The arrows indicate the direction of the antiferromagnetic
403
+ vector G in the 𝑎𝑐 plane. The insets in panel (b) show the 𝜃-dependence of the free energy.
404
+ 7
405
+
406
+ Thus, there are not only the smooth and abrupt SR transitions, a characteristic feature of the range
407
+ of intermediate values 𝐾2 is the fundamental possibility of the existence of “mixed” SR transitions, in
408
+ which the spins first smoothly rotate through a certain angle and then jump to the position with 𝜃 = 0.
409
+ For this, it is sufficient that 𝜇𝑓 corresponds to a point on the upper branch of solutions, and 𝜇𝑠 to a point
410
+ on the lower branch of solutions at 𝜏𝑓 < 𝜏𝑠. In this case, the spin reorientation begins with the single
411
+ second-order transition 𝐺𝑥 → 𝐺𝑥𝑧 and then ends with the first-order phase transition 𝐺𝑥𝑧 → 𝐺𝑧.
412
+ In contrast to the “high-temperature” approximation, the “single-doublet” model claims the nature
413
+ of the phase transition is determined not simply by the sign of the second anisotropy constant, but
414
+ also it depends on the ratio between 𝐾1, 𝐾2 and the doublet splitting in both phases. Nevertheless,
415
+ if we apply the simplified model to describe the SR transition, we have to renormalize both the first
416
+ and the second anisotropy constant, giving the last one sometimes a rather complicated temperature
417
+ dependence, in particular with a change in sign when considering transitions of the “mixed” type.
418
+ Of course, in this case Fe sublattice alone is not enough to provide the value of the effective second
419
+ constant.
420
+ 4 Conclusion
421
+ The model of the spin-reorientation transitions induced by the 4𝑓 − 3𝑑 interaction in rare-earth
422
+ orthoferrites and orthochromites has been investigated. It is shown that both the temperature and
423
+ the character of the spin-reorientation transition following from the solution of the transcendental
424
+ equation (8) are the result of competition between the second and fourth order spin anisotropy of
425
+ the 3𝑑 sublattice, the crystal field for 4f ions, and the 4𝑓 − 3𝑑 interaction. At variance with the
426
+ “high-temperature” approximation, the “single-doublet” model, along with typical smooth and abrupt
427
+ SR transitions, predicts the appearance of mixed-type SR transitions, with an initial second-order
428
+ transition and a final abrupt first-order transition.
429
+ Funding: The research was supported by the Ministry of Education and Science of the Russian
430
+ Federation, project № FEUZ-2020-0054, and by Russian Science Foundation, project № 22-22-00682.
431
+ References
432
+ [1] Belov, K.P.; Zvezdin, A.K.; Kadomtseva, A.M.; Levitin, R.Z. Spin-reorientation transitions in
433
+ rare-earth magnets. Sov. Phys. Usp. 1976, 19, 574.
434
+ [2] Belov, K.P.; Zvezdin, A.K.; Kadomtseva, A.M.; Levitin, R.Z. Orientational Transitions in Rare-
435
+ Earth Magnetics; Nauka: Moscow, Russia, 1979. (In Russian)
436
+ [3] Singh, A.; Rajput, S.; Padmanabhan, B.; Anas, M.; Damay, F.; Kumar, C.M.N.; Eguchi, G.; Jain,
437
+ A.; Yusuf, S.M.; Maitra, T.; Malik V.K. Successive spin reorientations and rare earth ordering
438
+ in Nd0.5Dy0.5FeO3: Experimental and ab initio investigations. Phys. Rev. B 2020, 102, 144432.
439
+ 8
440
+
441
+ [4] Hoogeboom, G.R.; Kuschel, T.; Bauer, G.E.W.; Mostovoy, M.V.; Kimel, A.V.; van Wees, B.J.
442
+ Magnetic order of Dy3+ and Fe3+ moments in antiferromagnetic DyFeO3 probed by spin Hall
443
+ magnetoresistance and spin Seebeck effect. Phys. Rev. B 2021, 103, 134406.
444
+ [5] Tsymbal, L.T.; Bazaliy, Y.B.; Derkachenko, V.N.; Kamenev, V.I.; Kakazei, G.N.; Palomares, F.J.;
445
+ Wigen, P.E. Magnetic and structural properties of spin-reorientation transitions in orthoferrites.
446
+ J. Appl. Phys. 2007, 101, 123919–123926.
447
+ [6] Sasani, A.; I˜niguez, J.; Bousquet, E. Magnetic phase diagram of rare-earth orthorhombic
448
+ perovskite oxides. Phys. Rev. B 2021, 104, 064431.
449
+ [7] Moskvin, A.S. Dzyaloshinskii–Moriya Coupling in 3d Insulators. Condens. Matter 2019, 4, 84.
450
+ [8] Moskvin, A.S. Antisymmetric Exchange and Magnetic Anisotropy in Weak Ferromagnets. D.
451
+ Sc. Thesis, Lomonosov Moscow State University, Moscow, Russia, 1984. (In Russian)
452
+ [9] Moskvin,
453
+ A.
454
+ Structure–Property
455
+ Relationships
456
+ for
457
+ Weak
458
+ Ferromagnetic
459
+ Perovskites.
460
+ Magnetochemistry 2021, 7, 111.
461
+ [10] Kadomtseva, A.M.; Agafonov, A.P.; Lukina, M.M.; Milov, V.N.; Moskvin, A.S.; Semenov, V.A.;
462
+ Sinitsyn, E.V. Nature of the Magnetic Anisotropy and Magnetostriction of Orthoferrites and
463
+ Orthochromites. JETP 1981, 81, 700–706.
464
+ [11] Hahn, S.E.; Podlesnyak, A.A.; Ehlers, G.; Granroth, G.E.; Fishman, R.S.; Kolesnikov, A.I.;
465
+ Pomjakushina, E.; Conder, K. Inelastic neutron scattering studies of YFeO3. Phys. Rev. B 2014,
466
+ 89, 014420.
467
+ [12] Park, K.; Sim, H.; Leiner, J.C.; Yoshida, Y.; Jeong, J.; Yano, S.; Gardner, J.; Bourges, P.; Klicpera,
468
+ M.; Sechovsk´y, V.; Boehm, M.; Park, J.-G. Low-energy spin dynamics of orthoferrites AFeO3
469
+ (A = Y, La, Bi). J. Phys. Condens. Matter 2018, 30, 235802.
470
+ [13] Amelin, K.; Nagel, U.; Fishman, R.S.; Yoshida, Y.; Sim, H.; Park, K.; Park, J.-G.; R˜o˜om, T.
471
+ Terahertz absorption spectroscopy study of spin waves in orthoferrite YFeO3 in a magnetic field.
472
+ Phys. Rev. B 2018, 98, 174417.
473
+ [14] Moskvin, A.S.; Bostrem, I.G. Cubic Anisotropy of Rare-Earth Orthoferrites. Sov. Phys. Solid St.
474
+ 1979, 21, 628.
475
+ 9
476
+
-NFLT4oBgHgl3EQfuy_h/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf,len=409
2
+ page_content='Simple Realistic Model of Spin Reorientation in 4f-3d Compounds Alexander Moskvin*, Evgenii Vasinovich, Anton Shadrin Ural Federal University, Ekaterinburg, Russia Abstract: Spin reorientation is an important phenomenon of rare-earth perovskites, orthoferrites and orthochromites.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
3
+ page_content=' In this study, we consider a simple but realistic microscopic theory of the spontaneous spin-reorientation transitions induced by the 4f-3d interaction, more specifically, the interaction of the main Kramers doublet or non-Kramers quasi-doublet of the 4f ion with an effective magnetic field induced by the 3d sublattice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
4
+ page_content=' The obtained results indicate that the cause of both the temperature and the character of the spin-reorientation transition is a competition between the second and fourth order spin anisotropy of the 3d sublattice, the crystal field for 4f ions, and the 4f-3d interaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
5
+ page_content=' Keywords: 4f-3d interaction;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
6
+ page_content=' (quasi)doublets;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
7
+ page_content=' spin reorientation 1 Introduction Rare-earth orthorhombic perovskites, orthoferrites RFeO3 and orthochromites RCrO3 (where R is a rare-earth ion and yttrium), exhibit many important features such as weak ferro- and antiferromagnetism, magnetization reversal, anomalous circular magnetooptics, and the phenomenon of the spontaneous spin reorientation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
8
+ page_content=' The spin reorientation (SR) is one of their unique properties that have attracted a lot of attention back in the 70s of the last century [1, 2], though their exact microscopic origin is still a challenge to theorists and experimentalists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
9
+ page_content=' The revival of interest in the mechanism of the spontaneous spin reorientation and magnetic compensation in rare-earth perovskites in recent years is related with the discovery of the magnetoelectric and the exchange bias effect, which can have a direct application in magnetoelectronics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
10
+ page_content=' Along with the emergence of new experimental studies (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
11
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
12
+ page_content=', Refs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
13
+ page_content=' [3, 4]), there also appeared theoretical works claiming to modify the mean-field theory of the spontaneous spin-reorientation transitions [5] or to scrutinize the microscopic mechanism responsible for spin reorientations and magnetization reversal [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
14
+ page_content=' In fact, these results are not directly related to the microscopic theory of the spontaneous spin reorientation in rare-earth orthoferrites and orthochromites.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
15
+ page_content=' For instance, the authors of the most recent paper [6] did not take into account alexander.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
16
+ page_content='moskvin@urfu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
17
+ page_content='ru 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
18
+ page_content='12157v1 [cond-mat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
19
+ page_content='str-el] 28 Jan 2023 a number of interactions, such as the fourth-order anisotropy for the 3𝑑 sublattice of orthoferrites and the crystal field for 𝑅-ions, which play a fundamental role in determining the spontaneous spin reorientation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
20
+ page_content=' The spin anisotropy of the second order in the 3𝑑 sublattice of orthorhombic orthoferrites and orthochromites is generally not reduced to an effective uniaxial form as adopted in Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
21
+ page_content=' [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
22
+ page_content=' Furthermore, the density functional theory does not allow in principle to give an adequate description of such effects of higher orders of perturbation theory as spin anisotropy or antisymmetric exchange [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
23
+ page_content=' In this paper, we present the results of a simple but realistic microscopic model of the spontaneous spin reorientation in rare-earth orthoferrites and orthochromites, which takes into account all the main relevant interactions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
24
+ page_content=' This model was developed back in the 80s of the last century [8], but has not been published until now.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
25
+ page_content=' 2 Model formulation The most popular examples of systems with the spontaneous SR transitions are magnets based on 3𝑑 and 4𝑓 elements such as rare-earth orthoferrites RFeO3, orthochromites RCrO3, intermetallic compounds RCo5, RFe2 etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
26
+ page_content=' In all cases, an important cause of the spontaneous SR is the 4𝑓 − 3𝑑 interaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
27
+ page_content=' Usually this interaction is taken into account by introducing an effective field of the magnetically ordered 3𝑑 sublattice acting on the 4𝑓 ions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
28
+ page_content=' To consider the contribution of the rare-earth sublattice to the free energy at low temperatures, we are developing a model which takes into account either the well isolated lower Kramers doublet of the 4𝑓 ions (with an odd number of the 4𝑓 electrons) or the well isolated two lower Stark sublevels with close energies that form a quasi-doublet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
29
+ page_content=' Within the framework of such “single-doublet” approximation we consider the spontaneous SR transition in orthorhombic weak ferromagnets RFeO3 and RCrO3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
30
+ page_content=' where the free energy per ion can be represented as follows Φ(𝜃) = 𝐾1 cos 2𝜃 + 𝐾2 cos 4𝜃 − 𝑘𝑇 ln 2 cosh ∆(𝜃) 2𝑘𝑇 ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
31
+ page_content=' (1) where 𝐾1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
32
+ page_content=' 𝐾2 are the first and second anisotropy constants of the 3𝑑 sublattice,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
33
+ page_content=' which are temperature independent (at least in the SR region),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
34
+ page_content=' 𝜃 is the orientation angle of the antiferromagnetic,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
35
+ page_content=' or N´eel vector G of the 3𝑑 sublattice (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
36
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
37
+ page_content=' in the 𝑎𝑐 plane), and ∆(𝜃) is the lower doublet (quasi-doublet) splitting of the 4𝑓 ion in a magnetic field induced by the 3𝑑 sublattice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
38
+ page_content=' Theoretical estimations [8–10] of the different contributions to the first constants of the magnetic anisotropy for orthoferrites RFeO3 point to a competition of several main mechanisms with relatively regular (Dzyaloshinskii-Moriya (DM) coupling, magnetodipole interaction) or irregular (single-ion anisotropy, SIA) dependence on the type of R-ion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
39
+ page_content=' For instance, the microscopic theory predicts an unexpectedly strong increase in values of the constant 𝐾1(𝑎𝑐) for LuFeO3 as compared with YFeO3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
40
+ page_content=' The SIA contribution to 𝐾1(𝑎𝑐) partially compensates for the large contribution of the DM interaction in YFeO3, whereas in LuFeO3, they add up.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
41
+ page_content=' This result is confirmed by experimental data on the 2 measurement of the threshold field 𝐻𝑆𝑅 of spin reorientation Γ4 → Γ2 (𝐺𝑥 → 𝐺𝑧) in the orthoferrite Lu0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
42
+ page_content='5Y0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
43
+ page_content='5FeO3, in which 𝐻𝑆𝑅 = 15 T as compared to 𝐻𝑆𝑅 = 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
44
+ page_content='5 T in YFeO3 [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
45
+ page_content=' Thus, one can estimate 𝐾1(𝑎𝑐) in LuFeO3 as around three times as much as 𝐾1(𝑎𝑐) in YFeO3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
46
+ page_content=' Let us pay attention to recent works on the determination of the parameters of the spin Hamiltonian in YFeO3 from measurements of the spin-wave spectrum by the inelastic neutron scattering [11, 12] and terahertz absorption spectroscopy [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
47
+ page_content=' However, these authors started with a simplified spin-Hamiltonian that took into account only Heisenberg exchange, DM interaction, and single- ion anisotropy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
48
+ page_content=' Obviously, disregarding the magnetic dipole and exchange-relativistic anisotropy, the “single-ion anisotropy” constants found by the authors are some effective quantities that are not directly related to the SIA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
49
+ page_content=' Unfortunately, despite numerous, including fairly recent, studies of the magnetic anisotropy of orthoferrites, we do not have reliable experimental data on the magnitude of the contributions of various anisotropy mechanisms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
50
+ page_content=' As shown by theoretical calculations [8,9,14] the constants 𝐾2 of the fourth order spin anisotropy rather smoothly decrease in absolute value, changing by no more than two times on going from La to Lu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
51
+ page_content=' But the most interesting was the conclusion about the different signs of these constants, positive for the 𝑎𝑐 and 𝑏𝑐 planes and negative for the 𝑎𝑏 plane, thus indicating a different character of spin- reorientation transitions in the corresponding planes, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
52
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
53
+ page_content=', second-order transitions in the 𝑎𝑐 and 𝑏𝑐 planes and first-order transitions in the 𝑎𝑏 plane [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
54
+ page_content=' Indeed, all currently known spin-reorientation transitions of the Γ4 −Γ2 (𝐺𝑥 −𝐺𝑧) type in orthoferrites RFeO3 (R = Pr, Nd, Sm, Tb, Ho, Er, Tm, Yb) are smooth, with two characteristic temperatures of the second-order phase transitions to be a start and finish of the spin reorientation, and the only known jump-like first order SR transition for these crystals is the SR transition Γ4 − Γ1 (𝐺𝑥 − 𝐺𝑦) in the 𝑎𝑏 plane in DyFeO3 [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
55
+ page_content=' A unique example that confirms the conclusions about the sign of the second anisotropy constant is a mixed orthoferrite Ho0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
56
+ page_content='5Dy0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
57
+ page_content='5FeO3 [2] in which two spin-reorientation transitions 𝐺𝑥 − 𝐺𝑦 (𝑇 = 46 K) and 𝐺𝑦 − 𝐺𝑧 (18 ÷ 24 K) are realized through one phase transition of the first order in the 𝑎𝑏 plane and two phase transitions of the second order in the 𝑏𝑐 plane, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
58
+ page_content=' The splitting value ∆(𝜃) for the Kramers doublet in a magnetic field H has the well-known form ∆(𝜃) = 𝜇𝐵 [︀ (𝑔𝑥𝑥𝐻𝑥 + 𝑔𝑥𝑦𝐻𝑦)2 + (𝑔𝑥𝑦𝐻𝑥 + 𝑔𝑦𝑦𝐻𝑦)2 + 𝑔2 𝑧𝑧𝐻2 𝑧 ]︀1/2 , (2) where it is taken into account that for the 4𝑓 ions in RFeO3 the ˆ𝑔-tensor (with the local symmetry 𝐶𝑠) has the form ˆ𝑔 = ⎛ ⎜ ⎝ 𝑔𝑥𝑥 𝑔𝑥𝑦 0 𝑔𝑥𝑦 𝑔𝑦𝑦 0 0 0 𝑔𝑧𝑧 ⎞ ⎟ ⎠ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
59
+ page_content=' (3) The effective field H for the SR transition 𝐺𝑥 → 𝐺𝑧 in the 𝑎𝑐 plane can be represented as follows 𝐻𝑥 = 𝐻(0) 𝑥 cos 𝜃, 𝐻𝑦 = 𝐻(0) 𝑦 cos 𝜃, 𝐻𝑧 = 𝐻(0) 𝑧 sin 𝜃, (4) 3 so in the absence of an external magnetic field, for ∆(𝜃) we have the rather simple expression: ∆(𝜃) = (︂∆2 𝑎 − ∆2 𝑐 2 cos 2𝜃 + ∆2 𝑎 + ∆2 𝑐 2 )︂1/2 , (5) where ∆𝑎,𝑐 are the doublet splitting for the cases of 𝜃 = 0 (𝐺𝑧-phase) and 𝜃 = 𝜋/2 (𝐺𝑥-phase) respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
60
+ page_content=' The dependence ∆(𝜃) from (5) is also valid in the case of quasi-doublet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
61
+ page_content=' A contribution of splitting ∆ to the free energy Φ(𝜃) for the rare-earth sublattice is usually considered in the “high-temperature” approximation, when 𝑘𝑇 ≫ ∆ and the influence of the 4𝑓 sublattice are reduced only to renormalization of the first anisotropy constant 𝐾1: 𝐾* 1 = 𝐾1 (︂ 1 − 1 𝜏 )︂ , (6) where 𝜏 = 𝑇/𝑇𝑆𝑅 is the reduced temperature and 𝑇𝑆𝑅 = (∆2 𝑎 − ∆2 𝑐)/16𝑘𝐾1 is the characteristic transition temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
62
+ page_content=' Below we will consider a specific situation when 𝐾1 > 0 and ∆𝑎 > ∆𝑐, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
63
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
64
+ page_content=' when the configuration 𝐺𝑥 (𝜃 = 𝜋/2) is realized at high temperatures and a decrease in temperature can lead to the spin reorientation 𝐺𝑥 → 𝐺𝑧 or 𝐺𝑥 → 𝐺𝑥𝑧 (transition to an angular spin structure).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
65
+ page_content=' The type of the phase transition of the spin reorientation in the “high-temperature” approximation is determined by the sign of the second constant 𝐾2: at 𝐾2 < 0 it will be realized by one first-order phase transition at 𝑇 = 𝑇𝑆𝑅, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
66
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
67
+ page_content=' 𝜏 = 1, or at 𝐾2 > 0 by two second-order phase transitions at 𝜏𝑠 = (1 + 𝛾)−1 and 𝜏𝑓 = (1 − 𝛾)−1, where 𝜏𝑠 and 𝜏𝑓 are the reduced temperatures of the beginning and end of the SR phase transition and 𝛾 = 4𝐾2/𝐾1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
68
+ page_content=' 3 Analysis of the “single-doublet” model A behavior of a system described by the free energy (1) can be analyzed rigorously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
69
+ page_content=' The condition 𝜕Φ/𝜕𝜃 = 0 reduces in this case to two equations: sin 2𝜃 = 0, (7) 𝛼𝜇 + 𝛽𝜇3 = tanh 𝜇 𝜏 ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
70
+ page_content=' (8) where the following notations are introduced: 𝛼 = 1 − 𝛾 ∆2 𝑎 + ∆2 𝑐 ∆2 𝑎 − ∆2 𝑐 , 𝛽 = 2𝛾 𝜇2 𝑓 − 𝜇2 𝑠 , 𝜇 = ∆(𝜃) 2𝑘𝑇𝑆𝑅 , 𝜇𝑠 = ∆𝑐 2𝑘𝑇𝑆𝑅 , 𝜇𝑓 = ∆𝑎 2𝑘𝑇𝑆𝑅 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
71
+ page_content=' (9) This corresponds to three possible magnetic configurations: The configuration 𝐺𝑥: 𝜃 = ±𝜋/2, stable at tanh 𝜇𝑠/𝜏 ≤ 𝛼𝜇𝑠 + 𝛽𝜇3 𝑠 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
72
+ page_content=' The configuration 𝐺𝑧: 𝜃 = 0, 𝜋, stable at tanh 𝜇𝑓/𝜏 ≥ 𝛼𝜇𝑓 + 𝛽𝜇3 𝑓 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
73
+ page_content=' 4 The angular configuration 𝐺𝑥𝑧: the temperature dependence of 𝜃(𝜏) is determined by solving the equation (8) (see Figure 1), the state is stable at 𝜕𝜇/𝜕𝜏 ≤ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
74
+ page_content=' The peculiar 𝜇-𝜏 phase diagram which represents solutions of the master equation (8) given a fixed value of the 𝛼 parameter and different value of the 𝛽 parameter is shown in Figure 1, where areas with different character of the SR transition are highlighted in different colors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
75
+ page_content=' For the solutions in the FO region, the SR goes through one first-order phase transition, in the SO region we arrive at one or two second-order phase transitions, in the MO1,2 regions we arrive at a “mixture” of the first and second-order phase transitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
76
+ page_content=' All the lines 𝜇(𝜏) on the right side converge to √︀ |𝛼/𝛽| at 𝜏 → ∞;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
77
+ page_content=' on the left side, when 𝜏 → 0 the branch point 𝜇 = 3 2𝛼 is obtained at 𝛽 = − 4 27𝛼3, and the point 𝜇 = 1/𝛼 at 𝛽 = 0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
78
+ page_content=' all the solutions, where 𝜇 can reach zero, converge to 𝜏 = 1/𝛼.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
79
+ page_content=' 0 1/α τ 1/α 3 2 α μ \uf603α / β1\uf604 \uf603α / β2\uf604 \uf603α / β3\uf604 FO MO1 MO2 SO Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
80
+ page_content=' 1: (Color online) The peculiar 𝜇-𝜏 phase diagram which represents solutions of the master equation (8) given a fixed value of the 𝛼 parameter and different value of the 𝛽 parameter (see text for detail).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
81
+ page_content=' The character of the SR transition will be determined by the form of the solution of the equation (8) in the region 𝜇𝑠 ≤ 𝜇 ≤ 𝜇𝑓.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
82
+ page_content=' Let us analyze this equation starting with the simplest case 𝐾2 = 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
83
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
84
+ page_content=' 𝛼 = 1, 𝛽 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
85
+ page_content=' In this case, the main equation transforms into the molecular field equation well known in the basic theory of ferromagnetism: 𝜇 = tanh 𝜇 𝜏 = 𝐵 1 2 (︁𝜇 𝜏 )︁ , (10) where 𝐵1/2(𝑥) is the Brillouin function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
86
+ page_content=' The equation has only one non-trivial solution at 0 ≤ 𝜏 ≤ 1, 0 ≤ 𝜇 ≤ 1, and the function 𝜇(𝜏) has the usual “Weiss” form.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
87
+ page_content=' Thus, with the absence of the cubic anisotropy (𝐾2 = 0) in the “single-doublet” model the SR will be realized either through two second-order phase transitions at 𝜇𝑓 ≤ 1 (the complete spin-reorientation 𝐺𝑥 → 𝐺𝑧), or through one second-order phase transition at 𝜇𝑓 > 1, but in this case the SR will be incomplete, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
88
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
89
+ page_content=' it will end with a transition to the angular spin structure 𝐺𝑥𝑧.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
90
+ page_content=' The spin reorientation will begin at a temperature 5 𝑇𝑠 ≤ 𝑇𝑆𝑅 and 𝑇𝑠 is equal to 𝑇𝑆𝑅 only in the case 𝜇𝑠 = 0 (∆𝑐 = 0), which can be realized in the general case only for Ising ions (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
91
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
92
+ page_content=' Dy3+ in DyFeO3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
93
+ page_content=' For this type of ions, the temperature dependence of the “order parameter” 𝜇 (in fact the splitting ∆(𝜃) of the doublet) in a close range of 𝑇𝑆𝑅 will be very sharp: 𝜇(𝑇) ∼ (𝑇 − 𝑇𝑆𝑅)−1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
94
+ page_content=' Nevertheless, the SR will be continuous and the temperature range of the SR ∆𝑇 = 𝑇𝑠 − 𝑇𝑓 at 𝜇 ≪ 1 can theoretically reach arbitrarily small values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
95
+ page_content=' Thus, the results of the rigorous analysis of the “single-doublet” model are fundamentally different from the conclusions of the simplified model (the “high-temperature” approximation), according to which for 𝐾2 = 0 the spin reorientation always occurs as the first-order phase transition at 𝑇 = 𝑇𝑆𝑅.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
96
+ page_content=' For a positive second anisotropy constant (𝐾2 > 0, 𝛽 > 0), the main equation (8) has one non- trivial solution in the region 0 ≤ 𝜏 ≤ 1/𝛼, 0 ≤ 𝜇 ≤ 𝜇0 at 𝛼 > 0, and one in the region 0 ≤ 𝜏 ≤ ∞, √︀ |𝛼/𝛽| ≤ 𝜇 ≤ 𝜇0 at 𝛼 ≤ 0, where 𝜇0 is determined from the solution of the equation 𝛼𝜇0 +𝛽𝜇3 0 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
97
+ page_content=' The situation in this case is very similar to the previous one, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
98
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
99
+ page_content=' the beginning of the SR will always be a second-order phase transition, and the reorientation will be complete (𝐺𝑥 → 𝐺𝑧) or incomplete (𝐺𝑥 → 𝐺𝑥𝑧).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
100
+ page_content=' Note that under the condition (𝜇2 𝑓 − 𝜇2 𝑠)/(𝜇2 𝑓 + 𝜇2 𝑠) ≥ 𝛾, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
101
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
102
+ page_content=' 𝛼 ≤ 0, the width of the reorientation region becomes very large, even if 𝜇𝑠 differs slightly from 𝜇𝑓.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
103
+ page_content=' For Ising ions at ∆𝑐 = 0, the SR beginning temperature is determined in exactly the same way as in the “high-temperature” approximation 𝑇𝑠 = 𝑇𝑆𝑅/(1 + 𝛾).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
104
+ page_content=' For a negative second anisotropy constant (𝐾2 < 0, 𝛽 < 0), the several fundamentally different solutions of the main equation (8) are possible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
105
+ page_content=' For 𝐾* 2 ≥ 𝐾2, where 𝐾* 2 is determined from the condition 𝛽 = − 1 3𝛼3, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
106
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
107
+ page_content=' 2𝛾 𝜇2 𝑓 − 𝜇2 𝑠 = −1 3 (︃ 1 − 𝛾 𝜇2 𝑓 + 𝜇2 𝑠 𝜇2 𝑓 − 𝜇2 𝑠 )︃3 , (11) there is one non-trivial solution of the equation (8) in the region 1/𝛼 ≤ 𝜏 < ∞, 𝜇 ≤ √︀ 𝛼/𝛽, but here 𝜇(𝑇) decreases with decreasing temperature, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
108
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
109
+ page_content=' 𝜕𝜇/𝜕𝜏 > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
110
+ page_content=' This solution is unstable and there is no fundamental possibility for a smooth rotation of spins, the SR is always realized through the first-order phase transition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
111
+ page_content=' In the intermediate range of values 𝐾2 (𝐾* 2 < 𝐾2 < 0 or − 1 3𝛼3 < 𝛽 < 0) the main equation has two non-trivial solutions, and for one of them 𝜕𝜇/𝜕𝜏 > 0 (corresponding to bigger values of 𝜇), and for the second 𝜕𝜇/𝜕𝜏 < 0 (corresponding to smaller values of 𝜇).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
112
+ page_content=' It is convenient to consider separately three areas of variation 𝛽.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
113
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
114
+ page_content=' − 4 27𝛼3 < 𝛽 < 0: a) the first solution: 0 ≤ 𝜏 < ∞, 𝜇> ≤ 𝜇 < √︀ |𝛼/𝛽|, b) the second solution: 0 ≤ 𝜏 ≤ 1/𝛼, 0 ≤ 𝜇 ≤ 𝜇<, where 𝜇>, 𝜇< are the bigger and smaller positive solution of the equation 𝛼𝜇 + 𝛽𝜇3 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
115
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
116
+ page_content=' 𝛽 = − 4 27𝛼3: a) the first solution: 0 ≤ 𝜏 < ∞, 3/(2𝛼) ≤ 𝜇 < √︀ |𝛼/𝛽|, b) the second solution: 0 ≤ 𝜏 ≤ 1/𝛼, 0 ≤ 𝜇 ≤ 3/(2𝛼), moreover, in this case we have a branch point of the main equation solution at 𝜏 = 0, 𝜇 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
117
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
118
+ page_content=' − 1 3𝛼3 < 𝛽 < − 4 27𝛼3: a) the first solution: 𝜏0 ≤ 𝜏 < ∞, 𝜇0 ≤ 𝜇 < √︀ |𝛼/𝛽|, 6 b) the second solution: 𝜏0 ≤ 𝜏 ≤ 1/𝛼, 0 ≤ 𝜇 ≤ 𝜇0, where the quantities 𝜇0, 𝜏0 correspond to the branch points of the main equation solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
119
+ page_content=' Illustrations of typical (a,b) and unconventional (c,d) SR transitions predicted by simple (quasi)doublet model are shown in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
120
+ page_content=' The Figure 2a, built with 𝐾1 = 1, 𝛾 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
121
+ page_content='05, ∆𝑎 = 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
122
+ page_content='84, ∆𝑐 = 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
123
+ page_content='82, which corresponds to 𝑇𝑆𝑅 = 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
124
+ page_content='73, 𝜇𝑠 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
125
+ page_content='162, 𝜇𝑓 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
126
+ page_content='337, 𝜏𝑠 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
127
+ page_content='04, 𝜏𝑓 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
128
+ page_content='91, describes a typical smooth SR transition with two second-order phase transitions 𝐺𝑥 − 𝐺���𝑧 at the beginning (𝜏𝑠) and 𝐺𝑥𝑧 − 𝐺𝑧 at the end (𝜏𝑓) of the spin reorientation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
129
+ page_content=' The Figure 2b, built with 𝐾1 = 1, 𝛾 = −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
130
+ page_content='1, ∆𝑎 = 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
131
+ page_content='19, ∆𝑐 = 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
132
+ page_content='1, which corresponds to 𝑇𝑆𝑅 = 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
133
+ page_content='95, 𝜇𝑠 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
134
+ page_content='59, 𝜇𝑓 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
135
+ page_content='72, 𝜏𝑠 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
136
+ page_content='762, 𝜏𝑓 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
137
+ page_content='93, describes an abrupt first-order SR transition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
138
+ page_content=' For 𝜏 > 𝜏𝑓 there is the 𝐺𝑥-phase, which can remain stable up to 𝜏𝑠 when cooled.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
139
+ page_content=' For 𝜏 < 𝜏𝑠 there is the 𝐺𝑧-phase, which can remain stable up to 𝜏𝑓 when heated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
140
+ page_content=' The point 𝐴 marks a phase transition point when the phases 𝐺𝑥 and 𝐺𝑧 have equal energies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
141
+ page_content=' The Figure 2c, built with 𝐾1 = 1, 𝛾 = −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
142
+ page_content='222, ∆𝑎 = 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
143
+ page_content='72, ∆𝑐 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
144
+ page_content='63, which corresponds to 𝑇𝑆𝑅 = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
145
+ page_content='65, 𝜇𝑠 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
146
+ page_content='307, 𝜇𝑓 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
147
+ page_content='266, 𝜏𝑠 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
148
+ page_content='778, 𝜏𝑓 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
149
+ page_content='523 and the Figure 2d, built with 𝐾1 = 1, 𝛾 = −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
150
+ page_content='25, ∆𝑎 = 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
151
+ page_content='71, ∆𝑐 = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
152
+ page_content='02, which corresponds to 𝑇𝑆𝑅 = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
153
+ page_content='56, 𝜇𝑠 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
154
+ page_content='396, 𝜇𝑓 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
155
+ page_content='31, 𝜏𝑠 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
156
+ page_content='73, 𝜏𝑓 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
157
+ page_content='545 describe unconventional "mixed"SR transitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
158
+ page_content=' At 𝜏𝑠 there is the smooth second-order phase transition 𝐺𝑥 − 𝐺𝑥𝑧.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
159
+ page_content=' At 𝜏 ≤ 𝜏𝑓 we have two stable phases 𝐺𝑧 and 𝐺𝑥𝑧: at those temperatures the sharp first-order phase transition 𝐺𝑥𝑧 − 𝐺𝑧 can happen, or the system could stay in the angular 𝐺𝑥𝑧-phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
160
+ page_content=' (a) τ τs τf μ μs μf (c) τ τs τf μ μs μf (d) τ τs τf μ μs μf θ Φ θ Φ θ Φ τ > τf τ < τs A (b) τ τs τf μ μs μf A Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
161
+ page_content=' 2: Illustrations of typical (a,b) and unconventional (c,d) SR transitions predicted by simple (quasi)doublet model (see text for detail).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
162
+ page_content=' The arrows indicate the direction of the antiferromagnetic vector G in the 𝑎𝑐 plane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
163
+ page_content=' The insets in panel (b) show the 𝜃-dependence of the free energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
164
+ page_content=' 7 Thus, there are not only the smooth and abrupt SR transitions, a characteristic feature of the range of intermediate values 𝐾2 is the fundamental possibility of the existence of “mixed” SR transitions, in which the spins first smoothly rotate through a certain angle and then jump to the position with 𝜃 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
165
+ page_content=' For this, it is sufficient that 𝜇𝑓 corresponds to a point on the upper branch of solutions, and 𝜇𝑠 to a point on the lower branch of solutions at 𝜏𝑓 < 𝜏𝑠.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
166
+ page_content=' In this case, the spin reorientation begins with the single second-order transition 𝐺𝑥 → 𝐺𝑥𝑧 and then ends with the first-order phase transition 𝐺𝑥𝑧 → 𝐺𝑧.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
167
+ page_content=' In contrast to the “high-temperature” approximation, the “single-doublet” model claims the nature of the phase transition is determined not simply by the sign of the second anisotropy constant, but also it depends on the ratio between 𝐾1, 𝐾2 and the doublet splitting in both phases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
168
+ page_content=' Nevertheless, if we apply the simplified model to describe the SR transition, we have to renormalize both the first and the second anisotropy constant, giving the last one sometimes a rather complicated temperature dependence, in particular with a change in sign when considering transitions of the “mixed” type.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
169
+ page_content=' Of course, in this case Fe sublattice alone is not enough to provide the value of the effective second constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
170
+ page_content=' 4 Conclusion The model of the spin-reorientation transitions induced by the 4𝑓 − 3𝑑 interaction in rare-earth orthoferrites and orthochromites has been investigated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
171
+ page_content=' It is shown that both the temperature and the character of the spin-reorientation transition following from the solution of the transcendental equation (8) are the result of competition between the second and fourth order spin anisotropy of the 3𝑑 sublattice, the crystal field for 4f ions, and the 4𝑓 − 3𝑑 interaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
172
+ page_content=' At variance with the “high-temperature” approximation, the “single-doublet” model, along with typical smooth and abrupt SR transitions, predicts the appearance of mixed-type SR transitions, with an initial second-order transition and a final abrupt first-order transition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
173
+ page_content=' Funding: The research was supported by the Ministry of Education and Science of the Russian Federation, project № FEUZ-2020-0054, and by Russian Science Foundation, project № 22-22-00682.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
174
+ page_content=' References [1] Belov, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
175
+ page_content='P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
176
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
177
+ page_content=' Zvezdin, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
178
+ page_content='K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
179
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
180
+ page_content=' Kadomtseva, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
181
+ page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
182
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
183
+ page_content=' Levitin, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
184
+ page_content='Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
185
+ page_content=' Spin-reorientation transitions in rare-earth magnets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
186
+ page_content=' Sov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
187
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
188
+ page_content=' Usp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
189
+ page_content=' 1976, 19, 574.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
190
+ page_content=' [2] Belov, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
191
+ page_content='P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
192
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
193
+ page_content=' Zvezdin, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
194
+ page_content='K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
195
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
196
+ page_content=' Kadomtseva, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
197
+ page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
198
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
199
+ page_content=' Levitin, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
200
+ page_content='Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
201
+ page_content=' Orientational Transitions in Rare- Earth Magnetics;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
202
+ page_content=' Nauka: Moscow, Russia, 1979.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
203
+ page_content=' (In Russian) [3] Singh, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
204
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
205
+ page_content=' Rajput, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
206
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
207
+ page_content=' Padmanabhan, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
208
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
209
+ page_content=' Anas, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
210
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
211
+ page_content=' Damay, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
212
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
213
+ page_content=' Kumar, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
214
+ page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
215
+ page_content='N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
216
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
217
+ page_content=' Eguchi, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
218
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
219
+ page_content=' Jain, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
220
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
221
+ page_content=' Yusuf, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
222
+ page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
223
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
224
+ page_content=' Maitra, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
225
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
226
+ page_content=' Malik V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
227
+ page_content='K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
228
+ page_content=' Successive spin reorientations and rare earth ordering in Nd0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
229
+ page_content='5Dy0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
230
+ page_content='5FeO3: Experimental and ab initio investigations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
231
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
232
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
233
+ page_content=' B 2020, 102, 144432.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
234
+ page_content=' 8 [4] Hoogeboom, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
235
+ page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
236
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
237
+ page_content=' Kuschel, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
238
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
239
+ page_content=' Bauer, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
240
+ page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
241
+ page_content='W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
242
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
243
+ page_content=' Mostovoy, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
244
+ page_content='V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
245
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
246
+ page_content=' Kimel, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
247
+ page_content='V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
248
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
249
+ page_content=' van Wees, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
250
+ page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
251
+ page_content=' Magnetic order of Dy3+ and Fe3+ moments in antiferromagnetic DyFeO3 probed by spin Hall magnetoresistance and spin Seebeck effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
252
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
253
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
254
+ page_content=' B 2021, 103, 134406.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
255
+ page_content=' [5] Tsymbal, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
256
+ page_content='T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
257
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
258
+ page_content=' Bazaliy, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
259
+ page_content='B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
260
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
261
+ page_content=' Derkachenko, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
262
+ page_content='N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
263
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
264
+ page_content=' Kamenev, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
265
+ page_content='I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
266
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
267
+ page_content=' Kakazei, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
268
+ page_content='N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
269
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
270
+ page_content=' Palomares, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
271
+ page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
272
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
273
+ page_content=' Wigen, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
274
+ page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
275
+ page_content=' Magnetic and structural properties of spin-reorientation transitions in orthoferrites.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
276
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
277
+ page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
278
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
279
+ page_content=' 2007, 101, 123919–123926.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
280
+ page_content=' [6] Sasani, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
281
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
282
+ page_content=' I˜niguez, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
283
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
284
+ page_content=' Bousquet, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
285
+ page_content=' Magnetic phase diagram of rare-earth orthorhombic perovskite oxides.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
286
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
287
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
288
+ page_content=' B 2021, 104, 064431.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
289
+ page_content=' [7] Moskvin, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
290
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
291
+ page_content=' Dzyaloshinskii–Moriya Coupling in 3d Insulators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
292
+ page_content=' Condens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
293
+ page_content=' Matter 2019, 4, 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
294
+ page_content=' [8] Moskvin, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
295
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
296
+ page_content=' Antisymmetric Exchange and Magnetic Anisotropy in Weak Ferromagnets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
297
+ page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
298
+ page_content=' Sc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
299
+ page_content=' Thesis, Lomonosov Moscow State University, Moscow, Russia, 1984.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
300
+ page_content=' (In Russian) [9] Moskvin, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
301
+ page_content=' Structure–Property Relationships for Weak Ferromagnetic Perovskites.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
302
+ page_content=' Magnetochemistry 2021, 7, 111.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
303
+ page_content=' [10] Kadomtseva, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
304
+ page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
305
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
306
+ page_content=' Agafonov, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
307
+ page_content='P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
308
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
309
+ page_content=' Lukina, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
310
+ page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
311
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
312
+ page_content=' Milov, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
313
+ page_content='N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
314
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
315
+ page_content=' Moskvin, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
316
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
317
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
318
+ page_content=' Semenov, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
319
+ page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
320
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
321
+ page_content=' Sinitsyn, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
322
+ page_content='V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
323
+ page_content=' Nature of the Magnetic Anisotropy and Magnetostriction of Orthoferrites and Orthochromites.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
324
+ page_content=' JETP 1981, 81, 700–706.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
325
+ page_content=' [11] Hahn, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
326
+ page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
327
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
328
+ page_content=' Podlesnyak, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
329
+ page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
330
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
331
+ page_content=' Ehlers, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
332
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
333
+ page_content=' Granroth, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
334
+ page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
335
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
336
+ page_content=' Fishman, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
337
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
338
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
339
+ page_content=' Kolesnikov, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
340
+ page_content='I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
341
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
342
+ page_content=' Pomjakushina, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
343
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
344
+ page_content=' Conder, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
345
+ page_content=' Inelastic neutron scattering studies of YFeO3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
346
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
347
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
348
+ page_content=' B 2014, 89, 014420.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
349
+ page_content=' [12] Park, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
350
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
351
+ page_content=' Sim, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
352
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
353
+ page_content=' Leiner, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
354
+ page_content='C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
355
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
356
+ page_content=' Yoshida, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
357
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
358
+ page_content=' Jeong, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
359
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
360
+ page_content=' Yano, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
361
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
362
+ page_content=' Gardner, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
363
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
364
+ page_content=' Bourges, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
365
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
366
+ page_content=' Klicpera, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
367
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
368
+ page_content=' Sechovsk´y, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
369
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
370
+ page_content=' Boehm, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
371
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
372
+ page_content=' Park, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
373
+ page_content='-G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
374
+ page_content=' Low-energy spin dynamics of orthoferrites AFeO3 (A = Y, La, Bi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
375
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
376
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
377
+ page_content=' Condens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
378
+ page_content=' Matter 2018, 30, 235802.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
379
+ page_content=' [13] Amelin, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
380
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
381
+ page_content=' Nagel, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
382
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
383
+ page_content=' Fishman, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
384
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
385
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
386
+ page_content=' Yoshida, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
387
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
388
+ page_content=' Sim, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
389
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
390
+ page_content=' Park, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
391
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
392
+ page_content=' Park, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
393
+ page_content='-G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
394
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
395
+ page_content=' R˜o˜om, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
396
+ page_content=' Terahertz absorption spectroscopy study of spin waves in orthoferrite YFeO3 in a magnetic field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
397
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
398
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
399
+ page_content=' B 2018, 98, 174417.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
400
+ page_content=' [14] Moskvin, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
401
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
402
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
403
+ page_content=' Bostrem, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
404
+ page_content='G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
405
+ page_content=' Cubic Anisotropy of Rare-Earth Orthoferrites.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
406
+ page_content=' Sov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
407
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
408
+ page_content=' Solid St.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
409
+ page_content=' 1979, 21, 628.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
410
+ page_content=' 9' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/-NFLT4oBgHgl3EQfuy_h/content/2301.12157v1.pdf'}
-dE2T4oBgHgl3EQfQgYx/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6dea370554a09950ce45115c13c9b7807f225dbbeae513b07021c0aa7361ebd7
3
+ size 6553645
.gitattributes CHANGED
@@ -3662,3 +3662,72 @@ vdFQT4oBgHgl3EQfuzZ_/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -tex
3662
  2NE1T4oBgHgl3EQf5QXz/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3663
  JNFOT4oBgHgl3EQfxjTe/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3664
  i9FRT4oBgHgl3EQfVze1/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3662
  2NE1T4oBgHgl3EQf5QXz/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3663
  JNFOT4oBgHgl3EQfxjTe/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3664
  i9FRT4oBgHgl3EQfVze1/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3665
+ ZNAzT4oBgHgl3EQfKvv0/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3666
+ sdE0T4oBgHgl3EQfbAAV/content/2301.02341v1.pdf filter=lfs diff=lfs merge=lfs -text
3667
+ -dE2T4oBgHgl3EQfQgYx/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3668
+ BtAzT4oBgHgl3EQfTfyw/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3669
+ gdAyT4oBgHgl3EQfj_hk/content/2301.00424v1.pdf filter=lfs diff=lfs merge=lfs -text
3670
+ WtFJT4oBgHgl3EQf4y1Z/content/2301.11667v1.pdf filter=lfs diff=lfs merge=lfs -text
3671
+ x9E0T4oBgHgl3EQf-gLJ/content/2301.02816v1.pdf filter=lfs diff=lfs merge=lfs -text
3672
+ OtFAT4oBgHgl3EQfyx73/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3673
+ INAyT4oBgHgl3EQf5vp6/content/2301.00810v1.pdf filter=lfs diff=lfs merge=lfs -text
3674
+ wtFJT4oBgHgl3EQfgSwn/content/2301.11560v1.pdf filter=lfs diff=lfs merge=lfs -text
3675
+ gdAyT4oBgHgl3EQfj_hk/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3676
+ VdAzT4oBgHgl3EQfl_1k/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3677
+ OtAyT4oBgHgl3EQftfkF/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3678
+ wtAyT4oBgHgl3EQfnfhR/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3679
+ wtFJT4oBgHgl3EQfgSwn/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3680
+ OtAyT4oBgHgl3EQftfkF/content/2301.00594v1.pdf filter=lfs diff=lfs merge=lfs -text
3681
+ DdAyT4oBgHgl3EQf4vob/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3682
+ YtE1T4oBgHgl3EQfwQVD/content/2301.03408v1.pdf filter=lfs diff=lfs merge=lfs -text
3683
+ p9E5T4oBgHgl3EQfkw_7/content/2301.05666v1.pdf filter=lfs diff=lfs merge=lfs -text
3684
+ gtAzT4oBgHgl3EQfMfvB/content/2301.01134v1.pdf filter=lfs diff=lfs merge=lfs -text
3685
+ rtAyT4oBgHgl3EQfZvdA/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3686
+ zdFLT4oBgHgl3EQfny-J/content/2301.12129v1.pdf filter=lfs diff=lfs merge=lfs -text
3687
+ TNE3T4oBgHgl3EQfaApj/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3688
+ rtFIT4oBgHgl3EQfyCvK/content/2301.11359v1.pdf filter=lfs diff=lfs merge=lfs -text
3689
+ 19AyT4oBgHgl3EQfbvd_/content/2301.00269v1.pdf filter=lfs diff=lfs merge=lfs -text
3690
+ YdAzT4oBgHgl3EQfmv1k/content/2301.01568v1.pdf filter=lfs diff=lfs merge=lfs -text
3691
+ nNAyT4oBgHgl3EQfYveM/content/2301.00210v1.pdf filter=lfs diff=lfs merge=lfs -text
3692
+ g9AzT4oBgHgl3EQfMvsC/content/2301.01135v1.pdf filter=lfs diff=lfs merge=lfs -text
3693
+ vtE3T4oBgHgl3EQfOQkJ/content/2301.04390v1.pdf filter=lfs diff=lfs merge=lfs -text
3694
+ FtE1T4oBgHgl3EQfXARM/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3695
+ M9FOT4oBgHgl3EQf1zQJ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3696
+ nNAyT4oBgHgl3EQfYveM/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3697
+ t9FAT4oBgHgl3EQfhR3i/content/2301.08593v1.pdf filter=lfs diff=lfs merge=lfs -text
3698
+ ANE0T4oBgHgl3EQfPgBb/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3699
+ WtE1T4oBgHgl3EQfbgSn/content/2301.03174v1.pdf filter=lfs diff=lfs merge=lfs -text
3700
+ ndFLT4oBgHgl3EQffi-_/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3701
+ GdE2T4oBgHgl3EQfTQfv/content/2301.03802v1.pdf filter=lfs diff=lfs merge=lfs -text
3702
+ D9A0T4oBgHgl3EQfAv_u/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3703
+ rtFIT4oBgHgl3EQfyCvK/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3704
+ 19AyT4oBgHgl3EQfbvd_/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3705
+ BtAzT4oBgHgl3EQfTfyw/content/2301.01251v1.pdf filter=lfs diff=lfs merge=lfs -text
3706
+ GdE2T4oBgHgl3EQfTQfv/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3707
+ FNAzT4oBgHgl3EQfG_ve/content/2301.01039v1.pdf filter=lfs diff=lfs merge=lfs -text
3708
+ idAyT4oBgHgl3EQf-_pB/content/2301.00900v1.pdf filter=lfs diff=lfs merge=lfs -text
3709
+ WtE1T4oBgHgl3EQfbgSn/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3710
+ 4NA0T4oBgHgl3EQfNf9p/content/2301.02147v1.pdf filter=lfs diff=lfs merge=lfs -text
3711
+ 7dE1T4oBgHgl3EQf7QV5/content/2301.03532v1.pdf filter=lfs diff=lfs merge=lfs -text
3712
+ YdAzT4oBgHgl3EQfmv1k/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3713
+ AdAyT4oBgHgl3EQf3_pa/content/2301.00778v1.pdf filter=lfs diff=lfs merge=lfs -text
3714
+ YtE1T4oBgHgl3EQfwQVD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3715
+ ZtE3T4oBgHgl3EQf2QvF/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3716
+ dNE0T4oBgHgl3EQfWgC_/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3717
+ z9AyT4oBgHgl3EQfbfcG/content/2301.00261v1.pdf filter=lfs diff=lfs merge=lfs -text
3718
+ zNFRT4oBgHgl3EQfjjf7/content/2301.13591v1.pdf filter=lfs diff=lfs merge=lfs -text
3719
+ 4NA0T4oBgHgl3EQfNf9p/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3720
+ otAzT4oBgHgl3EQfOftW/content/2301.01166v1.pdf filter=lfs diff=lfs merge=lfs -text
3721
+ r9AzT4oBgHgl3EQfPPs-/content/2301.01179v1.pdf filter=lfs diff=lfs merge=lfs -text
3722
+ ZNAzT4oBgHgl3EQfKvv0/content/2301.01104v1.pdf filter=lfs diff=lfs merge=lfs -text
3723
+ FNAzT4oBgHgl3EQfG_ve/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3724
+ r9AzT4oBgHgl3EQfPPs-/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3725
+ g9AzT4oBgHgl3EQfMvsC/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3726
+ vtE3T4oBgHgl3EQfOQkJ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3727
+ itAyT4oBgHgl3EQf-_qA/content/2301.00901v1.pdf filter=lfs diff=lfs merge=lfs -text
3728
+ t9AzT4oBgHgl3EQfBvof/content/2301.00946v1.pdf filter=lfs diff=lfs merge=lfs -text
3729
+ zdFLT4oBgHgl3EQfny-J/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3730
+ stE2T4oBgHgl3EQffQeV/content/2301.03925v1.pdf filter=lfs diff=lfs merge=lfs -text
3731
+ sdE0T4oBgHgl3EQfbAAV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3732
+ B9E5T4oBgHgl3EQfTg8R/content/2301.05536v1.pdf filter=lfs diff=lfs merge=lfs -text
3733
+ stE2T4oBgHgl3EQffQeV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
09E3T4oBgHgl3EQfnArs/content/tmp_files/2301.04622v1.pdf.txt ADDED
@@ -0,0 +1,1653 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Ion filling of a one-dimensional nanofluidic channel in the interaction
2
+ confinement regime
3
+ Paul Robin,1 Adrien Delahais,1 Lyd´eric Bocquet,1 and Nikita Kavokine2, 3, a)
4
+ 1)Laboratoire de Physique de l’´Ecole Normale Sup´erieure, ENS, Universit´e PSL, CNRS, Sorbonne Universit´e,
5
+ Universit´e Paris Cit´e, Paris, France
6
+ 2)Department of Molecular Spectroscopy, Max Planck Institute for Polymer Research, Ackermannweg 10,
7
+ 55128 Mainz, Germany
8
+ 3)Center for Computational Quantum Physics, Flatiron Institute, 162 5th Avenue, New York, NY 10010,
9
+ USA
10
+ (Dated: 12 January 2023)
11
+ Ion transport measurements are widely used as an indirect probe for various properties of confined electrolytes.
12
+ It is generally assumed that the ion concentration in a nanoscale channel is equal to the ion concentration
13
+ in the macroscopic reservoirs it connects to, with deviations arising only in the presence of surface charges
14
+ on the channel walls.
15
+ Here, we show that this assumption may break down even in a neutral channel,
16
+ due to electrostatic correlations between the ions arising in the regime of interaction confinement, where
17
+ Coulomb interactions are reinforced due to the presence of the channel walls. We focus on a one-dimensional
18
+ channel geometry, where an exact evaluation of the electrolyte’s partition function is possible with a transfer
19
+ operator approach. Our exact solution reveals that in nanometre-scale channels, the ion concentration is
20
+ generally lower than in the reservoirs, and depends continuously on the bulk salt concentration, in contrast to
21
+ conventional mean-field theory that predicts an abrupt filling transition. We develop a modified mean-field
22
+ theory taking into account the presence of ion pairs that agrees quantitatively with the exact solution and
23
+ provides predictions for experimentally-relevant observables such as the ionic conductivity. Our results will
24
+ guide the interpretation of nanoscale ion transport measurements.
25
+ I.
26
+ INTRODUCTION
27
+ A channel connects two reservoirs filled with a salt so-
28
+ lution at concentration cout. What is the salt concentra-
29
+ tion cin inside the channel? The straightforward answer
30
+ cin = cout is challenged as soon as the channel’s dimen-
31
+ sions are at the nanometre scale1. A deviation typically
32
+ occurs because of the presence of a surface charge density
33
+ Σ on the channel walls. Indeed, a sufficiently long chan-
34
+ nel must remain electrically neutral2, which results in an
35
+ imbalance of the concentrations c±
36
+ in of the positive and
37
+ negative ions. In a cylindrical channel of radius R that
38
+ is smaller than the electrolyte’s Debye length, the con-
39
+ centrations are given by the famous Donnan equilibrium
40
+ result3:
41
+
42
+ in =
43
+
44
+ c2
45
+ out + (2Σ/R)2 ± 2Σ/R.
46
+ (1)
47
+ Eq. (1) is widely used to infer a channel’s surface charge
48
+ from measurements of its conductivity at different salt
49
+ concentrations.
50
+ For sufficiently small surface charges
51
+ (2Σ/R ≪ cout), Eq. (1) predicts cin = cout even at ex-
52
+ treme nanoscales.
53
+ Importantly, this prediction under-
54
+ lies the method for extracting confined ion mobilities
55
+ from transport measurements, which has been applied
56
+ down to 7-˚A-wide two-dimensional channels4. Yet, phys-
57
+ ically, cin = cout stems from the assumption that the
58
+ electrolyte solutions, both in the reservoirs and in the
59
+ a)Electronic mail: [email protected]
60
+ channel, behave as ideal gases of non-interacting ions.
61
+ While such a description is valid in the bulk reservoirs
62
+ at reasonable salt concentrations5, it must be challenged
63
+ in the nanometre-scale channel which is subject to in-
64
+ teraction confinement6 – a reinforcement of the effective
65
+ Coulomb interactions between the ions due to the dielec-
66
+ tric contrast between the solvent (water) and the channel
67
+ wall3,6–14.
68
+ Due to interaction confinement, ions face a self-energy
69
+ barrier Es when entering the channel7,8.
70
+ It was first
71
+ noted by Parsegian7 that this should result in ion exclu-
72
+ sion: the salt concentration within the channel is then
73
+ given by an Arrhenius scaling cin = coute−Es/kBT under
74
+ the assumption of non-interacting ions. However, the re-
75
+ sult becomes more subtle as the confinement-reinforced
76
+ ionic interactions are taken into account.
77
+ Within a
78
+ mean-field description of a spherical nanopore, Dres-
79
+ ner15 predicted an abrupt filling transition, where cin
80
+ was a discontinuous function of cout.
81
+ Later, Palmeri
82
+ and coworkers16,17 recovered a similar transition using a
83
+ three-dimensional model of a cylindrical channel, treated
84
+ within the variational field theory formalism of Netz and
85
+ Orland18.
86
+ While this approach could be applied to a
87
+ realistic geometry, it took into account electrostatic cor-
88
+ relations only approximately.
89
+ An exact treatment of electrostatic correlations is pos-
90
+ sible upon simplification of the geometry to a purely
91
+ one-dimensional model, with the channel wall being
92
+ taken into account by introducing an effective confined
93
+ Coulomb interaction.
94
+ The 1D electrolyte can then be
95
+ mapped onto an Ising or 1D Coulomb-gas-type model;
96
+ the transfer matrix solution of such models was used, for
97
+ arXiv:2301.04622v1 [cond-mat.soft] 11 Jan 2023
98
+
99
+ 2
100
+ Effective interaction
101
+ Self-energy
102
+ B
103
+ A
104
+ FIG. 1.
105
+ Ion filling in the interaction confinement regime.
106
+ A. Schematic of the ion filling problem: a cylindrical
107
+ nanochannel (radius R ∼ 1 nm) is connected to macroscopic reservoirs of aqueous electrolyte. The salt concentration inside
108
+ the channel, cin, may differ from that in the reservoirs, cout. B. Physics of interaction confinement. When a charged species
109
+ enters a nanochannel, the dielectric contrast between water (ϵw ∼ 80) and walls (ϵm ∼ 2) constraints the electric field lines to
110
+ remain within the channel. This process can be interpreted in terms of image charges inside the channel walls, and results in
111
+ an electrostatic self-energy barrier for ions to enter the channel, and reinforced interactions between ions.
112
+ example, to discuss the capacitance of nanoporous sys-
113
+ tems19–21. The lattice models may be taken to the con-
114
+ tinuum limit, and the resulting path integral solutions
115
+ have been used to discuss various ion-exchange phase
116
+ transitions that arise in the presence of fixed discrete
117
+ charges inside the channel9,22,23 and the ionic Coulomb
118
+ blockade phenomenon13.
119
+ Such models are particularly
120
+ rich theoretically, as they support a mapping to non-
121
+ Hermitian quantum mechanics24.
122
+ Nevertheless, to our
123
+ knowledge, the fundamental problem of ion filling in
124
+ an uncharged channel has not been tackled within this
125
+ framework.
126
+ In this paper, we treat the ion-filling problem in the
127
+ interaction confinement regime using an exactly-solvable
128
+ one-dimensional model.
129
+ We find that the value of cin
130
+ is strongly affected by the formation of Bjerrum pairs
131
+ – pairs of oppositely charged ions – within the channel,
132
+ which preclude the occurence of an abrupt filling transi-
133
+ tion. This is in contrast to the prediction of Palmeri and
134
+ coworkers16,17, and to the result of conventional mean-
135
+ field theory. We then build on our exact results to pro-
136
+ pose a modified mean-field model that accounts for the
137
+ relevant physical ingredients, and, particularly, for the
138
+ presence of ion pairs.
139
+ The paper is organized as follows.
140
+ In Section II,
141
+ we present the one-dimensional model and its solution
142
+ within a path-integral formalism. The reader interested
143
+ only in the physical outcomes may skip directly to Sec-
144
+ tion III, where we discuss the model’s prediction for the
145
+ ion concentration within the channel, compare it to the
146
+ mean-field solution, and interpret it in terms of tightly
147
+ bound Bjerrum pairs. In Section IV, we establish a mod-
148
+ ified mean-field theory, based on the notion of phantom
149
+ pairs, that reproduces our exact solution. The mean-field
150
+ theory allows us to determine the number of unpaired
151
+ ions and produces experimentally relevant predictions for
152
+ a nanochannel’s ionic conductance. Section V establishes
153
+ our conclusions.
154
+ II.
155
+ 1D COULOMB GAS MODEL
156
+ A.
157
+ Confined interaction
158
+ We consider a cylindrical channel of radius R and
159
+ length L, connected to macroscopic reservoirs (Fig. 1A).
160
+ We first assume for simplicity that the channel is filled
161
+ with water that has isotropic dielectric permittivity ϵw =
162
+ 80, and that it is embedded in an insulating medium
163
+ with much lower permittivity ϵm (for a lipid membrane7,
164
+ ϵm ∼ 2).
165
+ The effective Coulomb interaction V (x) be-
166
+ tween two monovalent ions separated by a distance x
167
+ on the channel axis can be computed exactly by solving
168
+ Poisson’s equation8,12,13. A simple approximate expres-
169
+ sion can be obtained for x ∼ R (ref.3):
170
+ V (x) ≈
171
+ e2α
172
+ 2πϵ0ϵwRe−|x|/(αR),
173
+ (2)
174
+ where α is a numerical coefficient that depends on the
175
+ ratio ϵw/ϵm (α = 6.3 for ϵw/ϵm = 40). The reinforce-
176
+ ment of electrostatic interactions compared to the usual
177
+ e2/4πϵ0ϵwr Coulomb interaction that ions experience in
178
+ bulk water can be interpreted in terms of images charges
179
+ within the channel walls (Fig. 1B). Two confined ions
180
+ interact not only with each other, but also with their
181
+ respective image charges.
182
+ We introduce the parameters ξ ≡ αR and xT
183
+
184
+ 2πϵ0ϵwR2kBT/e2: both have the dimension of a length.
185
+
186
+ 3
187
+ With these notations,
188
+ V (x) = kBT ξ
189
+ xT
190
+ e−|x|/ξ.
191
+ (3)
192
+ The effects of ion valence and of anisotropic dielectric
193
+ response of confined water can be taken into account by
194
+ adjusting ξ and xT 13. Formally, the expression in Eq. (2)
195
+ is valid for any channel radius.
196
+ Yet, it is only physi-
197
+ cally relevant if at x ∼ R the interaction is significant
198
+ compared to kBT, which restricts in practice the appli-
199
+ cability of Eq. (2) to R ≲ 2 nm. In such extreme 1D
200
+ confinement, we may neglect the ions’ degrees of free-
201
+ dom perpendicular to the channel axis and assume that
202
+ they are constrained to move in one dimension. The par-
203
+ tition function of such a 1D electrolyte may be computed
204
+ exactly, as detailed in the next section.
205
+ B.
206
+ Path integral formalism
207
+ Here, we detail the analytical solution for the partition
208
+ function of a 1D Coulomb gas-like system that was first
209
+ introduced in ref.13. We set kBT = 1 until the end of Sec.
210
+ II. We start from a lattice model, in order to rigorously
211
+ establish a path integral description in the continuum
212
+ limit.
213
+ Our computation is inspired by the original solution of the 1D Coulomb gas model by Lenard and Edwards25, and
214
+ subsequent studies by Demery, Dean and coworkers19,21,26,27, as well as Shklovskii and coworkers22,23. We consider
215
+ a one-dimensional lattice with sites 1, . . . , M as a model for the nanochannel of radius R and length L. Each lattice
216
+ site i carries a spin Si, which takes the values {0, 1, −1}, corresponding respectively to no ion, a positive ion, or a
217
+ negative ion occupying the site. We model the surface charge distribution as an extra fixed charge qi added at each
218
+ lattice site. The spins interact with the Hamiltonian
219
+ H({Si}) =
220
+ ξ
221
+ 2xT
222
+ M
223
+
224
+ i,j=1
225
+ (Si + qi)(Sj + qj)e−|i−j|/ξ ≡
226
+ 1
227
+ 2xT
228
+ (S + q)T C(S + q).
229
+ (4)
230
+ The system is in contact with a particle reservoir at concentration cout. Here the parameters ξ and xT are dimension-
231
+ less, expressed in number of lattice sites.
232
+ The grand partition function is given by
233
+ Ξ =
234
+
235
+ S1,...,SM
236
+ z
237
+
238
+ i |Si|e−
239
+ 1
240
+ 2xT (S+q)T C(S+q),
241
+ (5)
242
+ with z = coutπR2L/M the fugacity. The matrix C can be analytically inverted:
243
+ C−1 =
244
+ 1
245
+ 2ξ sinh(1/ξ) ·
246
+
247
+
248
+
249
+
250
+
251
+
252
+
253
+
254
+
255
+
256
+
257
+
258
+
259
+ e1/ξ
260
+ −1
261
+ 0
262
+ 0
263
+ . . .
264
+ 0
265
+ 0
266
+ −1
267
+ 2 cosh(1/ξ) −1
268
+ 0
269
+ . . .
270
+ 0
271
+ 0
272
+ ...
273
+ ...
274
+ ... ...
275
+ ...
276
+ ...
277
+ ...
278
+ ... ... ...
279
+ ...
280
+ ...
281
+ ...
282
+ ... ...
283
+ ...
284
+ ...
285
+ 0
286
+ 0
287
+ . . .
288
+ 0
289
+ −1 2 cosh(1/ξ)
290
+ −1
291
+ 0
292
+ 0
293
+ . . . . . .
294
+ 0
295
+ −1
296
+ e1/ξ
297
+
298
+
299
+
300
+
301
+
302
+
303
+
304
+
305
+
306
+
307
+
308
+
309
+
310
+ .
311
+ (6)
312
+ Hence we can carry out a Hubbard-Stratonovich transformation, that is rewrite the partition function as a gaussian
313
+ integral, introducing the integration variable ϕ:
314
+ Ξ =
315
+
316
+ xM
317
+ T
318
+ (2π)Mdet(C) ·
319
+
320
+ S1,...,SM
321
+ z
322
+
323
+ i |Si|
324
+
325
+ dϕe− xT
326
+ 2 ϕT C−1ϕ+i(S+q)T ϕ,
327
+ (7)
328
+ with det(C) =
329
+ e1/ξ
330
+ 2 sinh(1/ξ) ·
331
+
332
+ ξ(1 − e−2/ξ)
333
+ �M. After performing the sum over the spins, which is now decoupled, we
334
+ obtain
335
+ Ξ =
336
+
337
+ xM
338
+ T
339
+ (2π)Mdet(C) ·
340
+
341
+ dϕ1 . . . dϕM
342
+ M
343
+
344
+ j=1
345
+ (1 + 2z cos ϕj)
346
+ M
347
+
348
+ j=1
349
+ eiqjϕj . . .
350
+ . . . exp
351
+
352
+ �−
353
+ xT
354
+ 4ξ sinh(1/ξ)
355
+
356
+
357
+ M−1
358
+
359
+ j=1
360
+ (ϕj+1 − ϕj)2 + 2(cosh(1/ξ) − 1)
361
+ M−1
362
+
363
+ j=2
364
+ ϕ2
365
+ j + (e1/ξ − 1)(ϕ2
366
+ 1 + ϕ2
367
+ M)
368
+
369
+
370
+
371
+ � .
372
+ (8)
373
+
374
+ 4
375
+ We now take a continuum limit of the lattice model. We call a the physical lattice spacing and let ˜ξ = aξ, ˜xT = axT
376
+ and ˜z = Mz/L. We then let a → 0 and M → ∞ while keeping the physical length of the system L = aM constant.
377
+ We then drop the tilde sign to lighten the notation and obtain
378
+ Ξ =
379
+
380
+ dϕ(0)e−xT ϕ(0)2/4ξ
381
+
382
+ [dϕ]e−S[ϕ]
383
+
384
+ dϕ(L)e−xT ϕ(L)2/4ξ
385
+ (9)
386
+ with
387
+ S[ϕ] =
388
+ � L
389
+ 0
390
+ dx
391
+
392
+ xT
393
+ 4
394
+ �dϕ
395
+ dx
396
+ �2
397
+ + xT
398
+ 4ξ2 ϕ(x)2 − iq(x)ϕ(x) − 2z cos ϕ(x)
399
+
400
+
401
+ � L
402
+ 0
403
+ L(ϕ, ˙ϕ).
404
+ (10)
405
+ q(x) is the one-dimensional density corresponding to the surface charge, and z ≡ πR2cout. At this point ξ and xT
406
+ have the dimension of length. The path integral measure is defined as
407
+ [dϕ] =
408
+ lim
409
+ a→0
410
+ M→∞
411
+ L=aM
412
+
413
+
414
+ M
415
+
416
+ j=1
417
+ � xT
418
+ 4πadϕj
419
+
420
+ � .
421
+ (11)
422
+ We now define the propagator P(ϕ, x|ϕ0, 0), or simply P(ϕ, x), as
423
+ P(ϕ, x) =
424
+
425
+ dϕ(x)δ(ϕ(x) − ϕ)
426
+
427
+ [dϕ]e−
428
+ � x
429
+ 0 L(ϕ, ˙ϕ)
430
+
431
+ dϕ(0)δ(ϕ(0) − ϕ0).
432
+ (12)
433
+ Considering an infinitesimal displacement ∆x,
434
+ P(ϕ, x) =
435
+ � xT
436
+ 4π∆x
437
+
438
+ d(∆ϕ)P(ϕ − ∆ϕ, x − ∆x) . . .
439
+ . . . exp
440
+
441
+
442
+ � x
443
+ x−∆x
444
+ dx′
445
+
446
+ xT
447
+ 4
448
+ �∆ϕ
449
+ ∆x
450
+ �2
451
+ + xT
452
+ 4ξ2 ϕ2 − iq(x)ϕ − 2z cos ϕ
453
+ ��
454
+ .
455
+ (13)
456
+ Expanding the propagator as P(ϕ − ∆ϕ, x − ∆x) = P(ϕ, x) − ∆x∂P/∂x − ∆ϕ∂P/∂ϕ + (1/2)(∆ϕ2)∂2P/∂ϕ2, and
457
+ carrying out the gaussian integrals, we obtain
458
+ P(ϕ, x) =
459
+
460
+ P(ϕ, x) − ∆x∂P
461
+ ∂x + O(∆x2)
462
+ � �
463
+ 1 − ∆x
464
+ � xT
465
+ 4ξ2 ϕ2 − iq(x)ϕ − 2z cos ϕ
466
+
467
+ + O(∆x2)
468
+
469
+ + ∆x
470
+ xT
471
+ ∂2P
472
+ ∂x2 (1 + O(∆x)).
473
+ (14)
474
+ P(ϕ, x) thus solves the partial differential equation
475
+ ∂P
476
+ ∂x = 1
477
+ xT
478
+ ∂2P
479
+ ∂ϕ2 +
480
+
481
+ iqϕ − xT
482
+ 4ξ2 ϕ2 + 2z cos ϕ
483
+
484
+ P,
485
+ (15)
486
+ with initial condition P(ϕ, 0) = δ(ϕ − ϕ0), which is the equivalent of a Schr¨odinger equation for the path integral
487
+ representation (9). The partition function can thus be computed as
488
+ Ξ =
489
+
490
+ dϕ(L)e−xT ϕ2/4ξP(ϕ, L|f0),
491
+ (16)
492
+ where P(ϕ, L|f0) is the solution of (15) with initial condition P(ϕ, 0) = f0(ϕ) ≡ e−xT ϕ2/4ξ.
493
+ C.
494
+ Transfer operator
495
+ We introduce the Fourier transform of P with respect to ϕ:
496
+ ˜P(k, x) =
497
+ 1
498
+
499
+
500
+
501
+ dϕe−ikϕP(ϕ, x).
502
+ (17)
503
+
504
+ 5
505
+ Then ˜P(k, x) satisfies
506
+ ∂ ˜P
507
+ ∂x = − k2
508
+ xT
509
+ ˜P − q ∂ ˜P
510
+ ∂k + xT
511
+ 4ξ2
512
+ ∂2 ˜P
513
+ ∂k2 + z
514
+
515
+ ˜P(k + 1, x) + ˜P(k − 1, x)
516
+
517
+ .
518
+ (18)
519
+ From now on, we restrict ourselves to an uncharged channel (q = 0). We then define the operator T such that
520
+ [T ( ˜P)](k) = − k2
521
+ xT
522
+ ˜P + xT
523
+ 4ξ2
524
+ ∂2 ˜P
525
+ ∂k2 + z
526
+
527
+ ˜P(k + 1, x) + ˜P(k − 1, x)
528
+
529
+ ,
530
+ (19)
531
+ which plays the role of a functional transfer matrix. Recalling eq. (16), the partition function then reads
532
+ Ξ = ⟨f0|eLT |f0⟩
533
+ (20)
534
+ with f0(k) = e−ξk2/xT and ⟨f(k)|g(k)⟩ ≡
535
+
536
+ dkf ∗(k)g(k).
537
+ Now, in the limit L → ∞, we may consider the largest eigenvalue λ of the operator T , and the associated eigen-
538
+ function χ:
539
+ [T (χ)](k) = λχ(k).
540
+ (21)
541
+ Then, up to an exponentially small correction,
542
+ Ξ = |⟨f0|χ⟩|2⟨χ|χ⟩eλL.
543
+ (22)
544
+ D.
545
+ Ion concentration
546
+ Our aim is to compute the salt concentration cin in the nanoscale channel given a salt concentration cout in the
547
+ reservoir. At the level of the lattice model, the probability to find, say, a positive ion at position k, can be computed
548
+ by replacing a factor (1 + 2z cos ϕk) by zeiϕk in Eq. (8). In the continuum limit, we obtain the positive (negative) ion
549
+ linear density at position x by inserting the operator zeiϕ (ze−iϕ) at position x:
550
+ πR2⟨c±
551
+ in(x)⟩ = 1
552
+ Ξ
553
+
554
+ dϕ(0)dϕ(x)dϕ(L)e−xT ϕ(0)2/4ξP(ϕ(x), x|ϕ(0), 0)ze±iϕ(x)P(ϕ(L), L|ϕ(x), x)e−xT ϕ(L)2/4ξ,
555
+ (23)
556
+ Upon Fourier-transformation, the insertion of eiϕ amounts to a shift by unity. Introducing the operator,
557
+ SQ : f �→ (g : k �→ f(k − Q)),
558
+ (24)
559
+ the concentrations are given by
560
+ ⟨c±
561
+ in(x)⟩ =
562
+ z
563
+ πR2
564
+ ⟨f0|exT S±1e(L−x)T |f0⟩
565
+ Ξ
566
+ = cout
567
+ ⟨f0|exT S±1e(L−x)T |f0⟩
568
+ Ξ
569
+ ,
570
+ (25)
571
+ since z = coutπR2. In the thermodynamic limit, and using Eq. (22) for the partition function, we obtain
572
+ ⟨c±
573
+ in⟩ = cout
574
+ ⟨χ(k)|χ(k ∓ 1)⟩
575
+ ⟨χ(k)|χ(k)⟩
576
+ .
577
+ (26)
578
+ Eq. (26) is the main result of our exact computation. In practice, the function χ(k) is determined numerically, by
579
+ finite-difference integration of Eq. (18).
580
+ III.
581
+ PHYSICS OF ION FILLING
582
+ A.
583
+ Debye-H¨uckel solution
584
+ We now go back to the ion filling problem (Fig. 1A)
585
+ and present first a one-dimensional mean-field solution.
586
+ Typically, the mean-field solution of an electrolyte prob-
587
+ lem is obtained by solving the Poisson-Boltzmann equa-
588
+ tion28,29. For the conventional Poisson-Boltzmann equa-
589
+ tion to apply, we would need to consider the full three-
590
+ dimensional geometry of our problem, and the effective
591
+ interaction of Eq. (3) would be introduced implicitly
592
+ through the boundary conditions at the channel walls15.
593
+
594
+ 6
595
+ B
596
+ A
597
+ C
598
+ Concentration
599
+ Distance
600
+ Anions
601
+ Cations
602
+ Debye cloud
603
+ 10-4
604
+ 10-2
605
+ 100
606
+ Reservoir concentration (M)
607
+ 0.5
608
+ 0.6
609
+ 0.7
610
+ 0.8
611
+ 0.9
612
+ 1
613
+ Channel conc./Res. conc.
614
+ Exact solution
615
+ Series expansion
616
+ Poisson-Boltzmann
617
+ Debye-Hückel
618
+ 10-4
619
+ 10-2
620
+ 100
621
+ Reservoir concentration (M)
622
+ 10-3
623
+ 10-2
624
+ 10-1
625
+ 100
626
+ Channel conc./Res. conc.
627
+ Exact solution
628
+ Series expansion
629
+ Poisson-Boltzmann
630
+ Debye-Hückel
631
+ Bulk
632
+ Self-energy
633
+ barrier
634
+ Bulk
635
+ Self-energy
636
+ barrier
637
+ Ion pairs
638
+ Weak interactions:
639
+ Es = 0.5 kBT
640
+ Strong interactions:
641
+ Es = 6 kBT
642
+ FIG. 2. Comparing mean-field approximations with the exact Coulomb gas solution. A. Schematic description
643
+ of the mean-field approaches.
644
+ The chemical potential of confined ions is determined by solving the (linear or nonlinear)
645
+ Poisson-Boltzmann equation around a given ion, interacting with an oppositely charged Debye cloud.
646
+ B. Dependence of
647
+ the channel salt concentration cin on the reservoir salt concentration cout, in a weakly-interacting case (R = 1 nm, ξ = 7 nm,
648
+ xT = 7 nm, Es = 0.5 kBT). We plot four different predictions for the ratio cin/cout: the exact field-theoretical solution (Eq. (26),
649
+ blue circles), its low concentration expansion (Eq. (47), black line), the mean-field predictions from solving the full Poisson-
650
+ Boltzmann equation (Eq. (40), orange curve) or from its Debye-H¨uckel linearization (Eq. (36), yellow line). The two mean-field
651
+ predictions are indistinguishable.
652
+ In all cases, the naive estimate cin = cout is recovered for high enough concentrations.
653
+ In the dilute limit, the concentration inside the channel is well approximated by the Arrhenius scaling cin = coute−Es/kBT .
654
+ C. Dependence of the channel salt concentration cin on the reservoir salt concentration cout, in a strongly-interacting case
655
+ (R = 1 nm, ξ = 7 nm, xT = 0.6 nm, Es = 6 kBT). The color code is the same as in B. Here, the mean-field predictions strongly
656
+ deviate from the exact solution, with the Debye-H¨uckel model predicting an abrupt filling transition. This discrepancy is due
657
+ to the formation of Bjerrum pairs at intermediate concentrations, as evidenced by the scaling cin ∝ c2
658
+ out in the exact solution.
659
+ In order to obtain a mean-field solution directly in the
660
+ 1D geometry, we need to introduce a modified Poisson’s
661
+ equation for the electrostatic potential Φ whose Green’s
662
+ function coincides with Eq. (3):
663
+ � d2
664
+ dx2 − 1
665
+ ξ2
666
+
667
+ φ = −2πR2 c+ − c−
668
+ xT
669
+ ,
670
+ (27)
671
+ with φ ≡ eΦ/kBT the dimensionless potential.
672
+ Im-
673
+ posing that the ions follow a Boltzmann distribution
674
+ (c± = cine∓φ, where cin is understood as the average con-
675
+ centration inside the channel), we obtain the analogue of
676
+ the Poisson-Boltzmann equation in our 1D geometry:
677
+ � d2
678
+ dx2 − 1
679
+ ξ2
680
+
681
+ φ = 2πR2 cin
682
+ xT
683
+ sinh φ.
684
+ (28)
685
+ In order to proceed analytically, we make a Debye-
686
+ H¨uckel-type approximation and linearize Eq. (28) with
687
+ respect to φ. Then, the potential around an ion placed
688
+ in the channel at x = 0 is given by
689
+ φ(x) = ξeff
690
+ xT
691
+ e−|x|/ξeff,
692
+ (29)
693
+ with
694
+ ξ2
695
+ eff =
696
+ ξ2
697
+ 1 + 4πR2cinξ2/xT
698
+ .
699
+ (30)
700
+ The chemical potential inside the channel is the sum of
701
+ an ideal gas entropic part and of an excess part due to
702
+ interactions:
703
+ µin = µent + µex,
704
+ (31)
705
+ with
706
+ µent = kBT log coutΛ3,
707
+ (32)
708
+ Λ being the De Broglie thermal wavelength of the ions.
709
+ µex can be obtained via a Debye charging process30:
710
+ µex
711
+ kBT =
712
+ � 1
713
+ 0
714
+ φλ(0)dλ, φλ(0) =
715
+ λξ/xT
716
+
717
+ 1 + 4λπR2cinξ2/xT
718
+ .
719
+ (33)
720
+ We determine cin by imposing equality of the chemical
721
+ potentials between the channel and the reservoir:
722
+ µout = kBT log coutΛ3 = µin,
723
+ (34)
724
+ which yields
725
+ cin = coute−µex/kBT .
726
+ (35)
727
+ Evaluating analytically the integral in Eq. (33), we obtain
728
+ an implicit equation for cin.
729
+ With the notation ˆcin ≡
730
+ πR2cin,
731
+ cin = cout exp
732
+
733
+ − ξ
734
+ 2xT
735
+ ×
736
+ x2
737
+ T
738
+ 6ξ2ˆc2
739
+ inξ2
740
+
741
+ 1 − 3
742
+ 2(1 + 4ˆcinξ2/xT )1/2
743
+ +1
744
+ 2(1 + 4ˆcinξ2/xT )3/2
745
+ ��
746
+ .
747
+ (36)
748
+
749
+ 7
750
+ In Fig. 2B and C, we plot the ratio cin/cout as a func-
751
+ tion of cout, as obtained by numerically solving Eq. (36).
752
+ We fix ξ = 7 nm (which corresponds to a channel with
753
+ R ≈ 1 nm and strong dielectric contrast), and vary
754
+ xT to set the ionic interaction strength.
755
+ The interac-
756
+ tion strength may be quantified through the self-energy
757
+ barrier, Es = kBT × ξ/(2xT ). The limiting behavior of
758
+ cin/cout may be understood directly from Eq. (36). When
759
+ cin is small, Eq. (36) reduces to the Arrhenius scaling
760
+ cin = coute−Es/kBT : this results typically holds for bio-
761
+ logical ion channels which may contain either 0 or 1 ion at
762
+ any given time, and the effect of inter-ionic interactions
763
+ is negligible. When cin is large, we recover cin = cout. In-
764
+ deed, the excess term in the chemical potential vanishes
765
+ at high concentrations, which is then dominated by the
766
+ entropic term. The fact that µex → 0 as cin → ∞ is non-
767
+ trivial: it can be seen, physically, as resulting from the
768
+ Coulomb potential of each ion being perfectly screened
769
+ by the other ions. At small values of Es, Eq. (36) has
770
+ a single solution for all values of cout, which interpolates
771
+ smoothly between the two limiting regimes.
772
+ However,
773
+ for Es ≳ 5kBT, it has three solutions in a certain range
774
+ of cout, pointing to a pseudo-first-order phase transition
775
+ between a low-concentration and a high-concentration
776
+ phase, similar to the one predicted by Dresner15 and
777
+ Palmeri et al.16. The transition occurs at ˆcin ∼ xT /ξ2: as
778
+ per Eq. (30), this corresponds to the concentration where
779
+ the effect of the screening cloud on an ion’s Coulomb po-
780
+ tential becomes significant.
781
+ B.
782
+ Full Poisson-Boltzmann solution
783
+ The physical content of the mean-field solution pre-
784
+ sented above is similar to the one of Dresner, based on
785
+ a linearized Poisson-Boltzmann equation15. The differ-
786
+ ence in geometry, and the fact that he foregoes the use
787
+ of the Debye charging process, do not seem to play a sig-
788
+ nificant qualitative role. The solution of Palmeri et al.16
789
+ takes ionic correlations into account to some extent, yet
790
+ it still involves a Debye-H¨uckel-type linear equation for
791
+ the mean-field interaction potential between the ions.
792
+ One may ask whether the same phenomenology per-
793
+ sists if one does not linearize the Poisson-Boltzmann
794
+ equation. The full Poisson-Boltzmann equation cannot
795
+ be solved analytically, but supports the following inte-
796
+ gral form:
797
+ �dφ
798
+ dx
799
+ �2
800
+ − 1
801
+ ξ2 φ2 = 4πR2 cin
802
+ xT
803
+ (cosh φ − 1) ,
804
+ (37)
805
+ where we have used the fact that φ should vanish at x →
806
+ ∞. For x → 0, the solution of Eq. (37) should reduce
807
+ to the unscreened potential in Eq. (3) up to an additive
808
+ constant, so that
809
+ 1
810
+ x2
811
+ T
812
+ − 1
813
+ ξ2 φ2(0) = 4πR2 cin
814
+ xT
815
+ (cosh φ(0) − 1) .
816
+ (38)
817
+ Once again, one may express the excess chemical po-
818
+ tential of the confined ions through a Debye charging
819
+ process:
820
+ µex
821
+ kBT =
822
+ � 1
823
+ 0
824
+ φλ(0)dλ,
825
+ λ2
826
+ x2
827
+ T
828
+ − 1
829
+ ξ2 φ2
830
+ λ(0) = 4πR2 λcin
831
+ xT
832
+ (cosh φλ(0) − 1) .
833
+ (39)
834
+ This result is the analogue of Eq. (33), with φλ(0) now
835
+ being the solution of an implicit non-linear equation, so
836
+ that µex must be determined numerically. As before, the
837
+ concentration inside the channel is then given by:
838
+ cin = coute−µex/kBT .
839
+ (40)
840
+ The prediction of the full Poisson-Boltzmann equation
841
+ is shown in Fig. 2B and C: we find cin to be a smooth
842
+ function of cout for all values of parameters, in contrast to
843
+ the linearized solution. We may not, however, unambigu-
844
+ ously conclude that the filling transition is an artifact of
845
+ linearization, since the non-linear solution still involves a
846
+ mean-field approximation and is not guaranteed to yield
847
+ the correct result.
848
+ Interestingly, the “physically-motivated” mean-field
849
+ solution in Eq. (28) differs from the mean-field limit of
850
+ our exact solution. It is obtained by taking the saddle-
851
+ point approximation in the path-integral expression of
852
+ the partition function (Eq. (9)).
853
+ The Euler-Lagrange
854
+ equation for the minimizer ϕ(x) of the action S[ϕ] in
855
+ Eq. (10) is, upon identifying φ = −iϕ,
856
+ � d2
857
+ dx2 − 1
858
+ ξ2
859
+
860
+ φ = 2πR2 cout
861
+ xT
862
+ sinh φ.
863
+ (41)
864
+ This is Eq. (28) with cin replaced with cout, and corre-
865
+ sponds to a first order treatment of interactions. Indeed,
866
+ if the ions are non-interacting, cin = cout. By solving the
867
+ mean-field equation, we determine how the ions’ chemi-
868
+ cal potential is affected by Debye screening, which then
869
+ results in value of cin that is different from cout. Within
870
+ a straightforward interaction expansion procedure, one
871
+ should determine the effect of screening assuming the ze-
872
+ roth order value for the ion concentration inside the chan-
873
+ nel, which is cout: this corresponds to Eq. (41). Eq. (28)
874
+ contains an additional self-consistency condition, as it
875
+ assumes the actual value cin for the ion concentration,
876
+ which is not known until Eq. (28) is solved. One may
877
+ draw a loose condensed matter physics analogy, where
878
+ Eq. (41) resembles the Born approximation for impu-
879
+ rity scattering, while Eq. (28) is analogous to the self-
880
+ consistent Born approximation.31
881
+ C.
882
+ Exact solution
883
+ We now turn to the exact solution obtained in Sec.
884
+ II to unambiguously solve the ion filling problem. We
885
+
886
+ 8
887
+ determine cin according to Eq. (26):
888
+ ⟨c±
889
+ in⟩ = cout
890
+ ⟨χ(k)|χ(k ∓ 1)⟩
891
+ ⟨χ(k)|χ(k)⟩
892
+ ,
893
+ (42)
894
+ where χ(k) is the highest eigenfunction of the trans-
895
+ fer operator in Eq. (19), determined in practice by nu-
896
+ merical integration. The exact results for cin, with the
897
+ same parameter values as for the mean-field solution,
898
+ are shown in Fig. 2 B and C. When interactions are
899
+ weak (small values of Es, Fig. 2B), the exact and mean-
900
+ field solutions are in good agreement. Notably, all so-
901
+ lutions smoothly interpolate between the bulk scaling
902
+ cin = cout at high concentration, and the Arrhenius scal-
903
+ ing cin = coute−Es/kBT at low concentration. Conversely,
904
+ in the strongly-interacting case (large Es, Fig. 2C), the
905
+ exact result yields a much larger ion concentration that
906
+ the mean-field solutions for intermediate values of cout.
907
+ In this intermediate regime, cin remains a smooth func-
908
+ tion of cout, and obeys the scaling cin ∝ c2
909
+ out.
910
+ Such a scaling is the signature of the formation of
911
+ tightly bound Bjerrum pairs of positive and negative ions
912
+ – strongly-correlated configurations that are not taken
913
+ into account by mean-field solutions. Indeed, let us as-
914
+ sume that the channel contains an ideal gas of ion pairs at
915
+ concentration cin. We further assume that in a pair, the
916
+ distance between the two ions is uniformly distributed
917
+ in the interval [−xT /2, xT /2], and the binding energy of
918
+ a pair is kBTξ/xT = 2Es.
919
+ Then, the grand partition
920
+ function reads
921
+ Ξ =
922
+
923
+ N
924
+ (ze−βEs)2N 1
925
+ N!
926
+ N
927
+
928
+ i=1
929
+ L
930
+ � xT /2
931
+ −xT /2
932
+ dx e2βEs
933
+ (43)
934
+ =
935
+
936
+ N
937
+ (z2LxT )N
938
+ N!
939
+ = ez2LxT ,
940
+ (44)
941
+ where we recall that z = πR2cout and β ≡ 1/(kBT).
942
+ Using that
943
+ πR2cin = 1
944
+ L
945
+ ∂ log Ξ
946
+ ∂(βµ) = z
947
+ L
948
+ ∂ log Ξ
949
+ ∂z
950
+ ,
951
+ (45)
952
+ we obtain
953
+ cin = 2z2xT
954
+ πR2
955
+ = 2πR2xT c2
956
+ out.
957
+ (46)
958
+ We recover indeed the quadratic scaling.
959
+ We may check that the prefactor in Eq. (46) is the
960
+ correct one by evaluating analytically the expression in
961
+ Eq. (26) in the low concentration limit zT ≡ zxT ≪ 1.
962
+ An analytical expansion of the function χ(k) in powers
963
+ of zT was derived in ref.13. Substituting it into Eq. (26),
964
+ we obtain
965
+ πR2cin = z(e−βEs + 2zT − 13
966
+ 2 z2
967
+ T e−βEs
968
+ −7z3
969
+ T + O(z4
970
+ T ) + O(e−2βEs)).
971
+ (47)
972
+ The first term in the expansion corresponds to cin =
973
+ coute−βEs.
974
+ At the lowest salt concentrations, forming
975
+ Bjerrum pairs is too entropically unfavorable, and the
976
+ concentration inside the channel is controlled by the self-
977
+ energy barrier.
978
+ However, as the salt concentration in-
979
+ creases, there is no abrupt transition to a highly-screened
980
+ concentrated phase inside the channel; instead, the chan-
981
+ nel is progressively filled by Bjerrum pairs. This corre-
982
+ sponds to the quadratic term in the expansion, with the
983
+ prefactor agreeing indeed with Eq. (46).1 The expansion
984
+ in Eq. (47) reproduces quite well the low-concentration
985
+ behavior of the exact solution as shown in Fig. 2B and
986
+ C. However, it fails at high concentrations, where it does
987
+ not recover cin = cout.
988
+ Our exact analysis of the ion statistics in a nanoscale
989
+ channel has revealed that Bjerrum pairs are a crucial in-
990
+ gredient of the filling process. We now develop a modified
991
+ mean-field theory that accounts the presence of Bjerrum
992
+ pairs and compare it to the exact solution.
993
+ IV.
994
+ PAIR-ENHANCED MEAN-FIELD THEORY
995
+ A.
996
+ Debye-H¨uckel-Bjerrum theory
997
+ The traditional mean-field treatment of electrolytes is
998
+ incapable of taking Bjerrum pairs into account, as it nat-
999
+ urally neglects any strong ion-ion correlations – pairing
1000
+ being a fundamentally discrete phenomenon.
1001
+ An idea
1002
+ proposed by Bjerrum to amend the Debye-H¨uckel theory
1003
+ was to introduce ion pairs as a separate species encapsu-
1004
+ lating all “strong” ion-ion correlations32. More precisely,
1005
+ any two oppositely charged ions that are closer than some
1006
+ minimum distance can be considered as a single neutral
1007
+ entity – a Bjerrum pair. The remaining “free” ions should
1008
+ then only experience weak interactions with each other,
1009
+ and can be treated at the mean-field level. Importantly,
1010
+ this last remark justifies the Debye-H¨uckel linearization,
1011
+ as all non-linear effects are assumed to be hidden in the
1012
+ definition of ion pairs.
1013
+ As before, we consider that pairs behave like particles
1014
+ of an ideal gas, and that their maximum extension is
1015
+ given by xT . Defining cp
1016
+ in the concentration pairs inside
1017
+ the channel, the chemical potential of pairs is given by:
1018
+ µp
1019
+ in = kBT log
1020
+ cp
1021
+ inΛ6
1022
+ 2πxT R2 ,
1023
+ (48)
1024
+ where the geometrical factor inside the logarithm ac-
1025
+ counts for the internal degrees of freedom of a pair. The
1026
+ chemical potential only has an entropic term, because
1027
+ the binding energy of the pair exactly compensates the
1028
+ self-energy of the two separate ions. The chemical equi-
1029
+ librium between free ions and pairs inside the channel
1030
+ 1 This justifies a posteriori our choice of [−xT /2, xT /2] as the
1031
+ interval in which a paired-up ion is allowed to move.
1032
+
1033
+ 9
1034
+ Concentration
1035
+ Distance
1036
+ Anions
1037
+ Cations
1038
+ B
1039
+ A
1040
+ C
1041
+ Debye cloud
1042
+ Bjerrum pair
1043
+ Well-defined
1044
+ pair
1045
+ Phantom pair
1046
+ 10-4
1047
+ 10-2
1048
+ 100
1049
+ Reservoir concentration (M)
1050
+ 10-3
1051
+ 10-2
1052
+ 10-1
1053
+ 100
1054
+ 101
1055
+ 102
1056
+ Channel conc./Res. conc.
1057
+ Exact solution
1058
+ Debye-Hückel-Bjerrum mean-field
1059
+ Phantom pair mean-field
1060
+ Strong interactions:
1061
+ Es = 6 kBT
1062
+ FIG. 3. Pair-enhanced mean-field theory. A. Treatment of ion pairing in mean-field approaches. Top panel: Mean-field
1063
+ theories inevitably underestimate ion-ion correlations. To circumvent this problem, two ions that are distant by less than xT
1064
+ are considered to form an ion pair, which is treated as a separate chemical species. Bottom panel: schematic representation
1065
+ of ion distribution around a fixed positive ion. The distribution is very peaked close to the central ion, due to the formation
1066
+ of an ion pair, and then relaxes smoothly to the mean value cin. B. Evolution of channel concentration cin as function of
1067
+ reservoir concentration cout, in a strongly-interacting cacse (R = 1 nm, ξ = 7 nm, xT = 0.6 nm, Es = 6 kBT). We plot the ratio
1068
+ cin/cout obtained from three different models taking Bjerrum pairs into account: the exact field-theoretical solution (Eq. (26),
1069
+ blue circles), the Debye-H¨uckel-Bjerrum mean-field theory (Eq. (51), red line) and our modified mean-field theory based on the
1070
+ notion of phantom pairs (Eq. (55), orange line), which reproduces the exact solution quantitatively for all values of parameters.
1071
+ At high concentration, the Debye-H¨uckel-Bjerrum prediction fails due to the uncontrolled proliferation of Bjerrum pairs. C.
1072
+ Formation of phantom pairs inside the nanochannel. At low concentration (top panel), pairs are well-separated and ions forming
1073
+ a pair are tightly bound to each other. At high concentration (bottom panel), ionic interactions are weakened as a result of
1074
+ Debye screening, and two quasi-non-interacting ions may find themselves within a distance xT of each other without actually
1075
+ binding: this is a phantom pair.
1076
+ can be written as:
1077
+ µ+
1078
+ in + µ−
1079
+ in = 2µin = µp
1080
+ in,
1081
+ (49)
1082
+ where µ+
1083
+ in and µ−
1084
+ in are the chemical potentials of cations
1085
+ and anions, respectively.
1086
+ We then obtain, using the
1087
+ Debye-H¨uckel solution for µin (equations (31) to (33)):
1088
+ cp
1089
+ in = 2πR2xT c2
1090
+ out,
1091
+ (50)
1092
+ which is the result obtained in the previous section. The
1093
+ average concentration in free ions cf
1094
+ in is not modified com-
1095
+ pared to the Debye-H¨uckel solution, and is therefore the
1096
+ solution of the self-consistent Eq. (36).
1097
+ One can then
1098
+ compute the total concentration inside the channel as
1099
+ cin = cf
1100
+ in + cp
1101
+ in, or, explicitly
1102
+ cin = coute−µex(cf
1103
+ in)/kBT + 2πR2xT c2
1104
+ out.
1105
+ (51)
1106
+ In other words, the only impact of pairs in Bjerrum’s
1107
+ computation is to add a quadratic term 2πR2xT c2
1108
+ out to
1109
+ the Debye-H¨uckel result, matching with the expansion
1110
+ (47) of the exact solution up order 2 in the bulk concen-
1111
+ tration. We compare the two predictions on Fig. 3B. The
1112
+ Debye-H¨uckel-Bjerrum solution is found to match the ex-
1113
+ act one quite well at low and intermediate concentrations.
1114
+ This result is, however, unphysical for cout ≳ 1/πR2xT :
1115
+ cin is found to grow much faster than the bulk concen-
1116
+ tration. One solution would be to consider higher-order
1117
+ terms in the mean-field treatment through the inclusion
1118
+ of triplets, quadruplets, etc. of ions, and all possible in-
1119
+ teractions between these entities.
1120
+ Truncating the sum
1121
+ at any finite order, however, would not yield a solution
1122
+ valid in the entire range of concentrations, nor is it guar-
1123
+ anteed to converge to the exact solution. This approach
1124
+ is also unsatisfactory as it would not yield a closed-form
1125
+ expression for cin and would not allow for qualitative un-
1126
+ derstanding of the underlying physics.
1127
+ Instead, we develop a different method that, through
1128
+ physics-driven arguments, prevents the divergence of cin
1129
+ at high bulk concentrations and reproduces quantita-
1130
+ tively the exact solution.
1131
+ B.
1132
+ Phantom pairs
1133
+ Eq. (51) overestimates the number of Bjerrum pairs in
1134
+ the channel because it fails to account for the presence
1135
+ of Bjerrum pairs in the reservoir. The electrolyte in the
1136
+ reservoir is treated as an ideal gas : the ions are non-
1137
+ interacting and they cannot form actual tightly-bound
1138
+ pairs. Nevertheless, we have defined any two oppositely
1139
+ charged ions that find themselves in a cylinder of radius
1140
+ R and length xT to be a separate chemical species. Such
1141
+ configurations may arise in the reservoir simply out of
1142
+ statistical chance: we dub them phantom pairs. For our
1143
+
1144
+ 10
1145
+ mean-field theory to be consistent, these phantom pairs
1146
+ need to be taken into account.
1147
+ Let cp
1148
+ out be the concentration of phantom pairs in the
1149
+ reservoir.
1150
+ The chemical equilibrium between phantom
1151
+ pairs and free ions imposes
1152
+ cp
1153
+ out = 2πR2xT (cf
1154
+ out)2.
1155
+ (52)
1156
+ In addition, one has cf
1157
+ out + cp
1158
+ out = cout, since an ion must
1159
+ either be free or part of a pair. Imposing this condition
1160
+ yields:
1161
+ cf
1162
+ out =
1163
+
1164
+ 1 + 8πcoutxT R2 − 1
1165
+ 4xT πR2
1166
+ .
1167
+ (53)
1168
+ We use this result to control the proliferation of pairs in
1169
+ the channel: we now equilibrate the free ions inside the
1170
+ nanochannel with only the free ions in the reservoir:
1171
+ cf
1172
+ in = cf
1173
+ oute−µex(cf
1174
+ in)/kBT ,
1175
+ (54)
1176
+ which corresponds to Eq. (35) with cout replaced by cf
1177
+ out.
1178
+ Eq. (54) is again a self-consistent equation, this time on
1179
+ the concentration of free ions cf
1180
+ in, that must be solved
1181
+ numerically. Lastly, equilibrating pairs with free ions in-
1182
+ side the channel (or, equivalently, pairs inside with pairs
1183
+ outside), we obtain:
1184
+ cin = cf
1185
+ in + 2πR2xT (cf
1186
+ out)2,
1187
+ (55)
1188
+ where the second term corresponds again to Bjerrum
1189
+ pairs. Eqs. (53) to (55) constitute the main result of our
1190
+ modified mean-field theory. Note that µex may be deter-
1191
+ mined at the Debye-H¨uckel level (Eq. (33)), or by solving
1192
+ the full Poisson-Boltzmann equation (Eq. (39)). In what
1193
+ follows, we will only discuss the latter, as it offers greater
1194
+ accuracy; however, the Debye-H¨uckel prediction provides
1195
+ reasonable results even in the case of strong interactions,
1196
+ and yields for a convenient analytical expression for µex
1197
+ as function of cf
1198
+ in.
1199
+ The
1200
+ prediction
1201
+ of
1202
+ our
1203
+ phantom
1204
+ pair
1205
+ Poisson-
1206
+ Boltzmann model is compared to the exact solution (26)
1207
+ in Fig. 3B. The two solutions are found to be in near
1208
+ perfect agreement for all values of parameters, even in
1209
+ strong coupling limit Es ≫ kBT.
1210
+ In the next two sections, we use our modified mean-
1211
+ field model to predict the conductance of a nanochannel,
1212
+ first in the case of a neutral channel, and then in presence
1213
+ of a surface charge.
1214
+ C.
1215
+ Conductance
1216
+ One strength of our modified mean-field model is that
1217
+ it offers insight into the physical properties of the con-
1218
+ fined system beyond the value of the ionic concentra-
1219
+ tion. In particular, the decomposition of the electrolyte
1220
+ into free ions and bound pairs allows us to estimate the
1221
+ channel’s conductance. Tightly bound Bjerrum pairs are
1222
+ electrically neutral, so that they do not contribute to the
1223
+ ionic current to first order in applied electric field: it
1224
+ would then be straightforward to assume that the chan-
1225
+ nel’s conductance is proportional to the concentration
1226
+ of free ions. However, the reasoning needs to be more
1227
+ subtle, since the channel, in the same way as the reser-
1228
+ voir, may contain non-interacting phantom pairs.
1229
+ In-
1230
+ deed, we have decomposed the confined electrolyte into
1231
+ tightly bound pairs, that have no ionic atmosphere, and
1232
+ free ions that are dressed by a Debye screening cloud.
1233
+ As the concentration increases, the interaction between
1234
+ dressed ions becomes weak, and two of them may find
1235
+ themselves within a distance xT without actually bind-
1236
+ ing. Such a phantom pair is expected to still contribute
1237
+ to the conductance. The concentration of phantom pairs
1238
+ in the channel is obtained by imposing their chemical
1239
+ equilibrium with the free ions treated as an ideal gas.
1240
+ Thus, we estimate the channel’s conductance as:
1241
+ G = 2 e2D
1242
+ kBT
1243
+ πR2
1244
+ L
1245
+
1246
+ cf
1247
+ in + 2xT πR2(cf
1248
+ in)2�
1249
+ ,
1250
+ (56)
1251
+ where D is the diffusion coefficient of ions; the second
1252
+ term corresponds to the contribution of phantom pairs.
1253
+ In Fig. 4A, we compare this result to the Ohm’s law
1254
+ prediction where pairs are neglected and one assumes
1255
+ cin = cout. Ohm’s law is found to greatly overestimate
1256
+ the conductance at low concentration. In the dilute limit,
1257
+ we instead recover the Arrhenius scaling, where one as-
1258
+ sumes cin = coute−Es/kBT .
1259
+ Finally, we stress that Eq. (56) only accounts for the
1260
+ electrophoresis of free ions, and is therefore only valid
1261
+ in the limit of weak external electric fields.
1262
+ Stronger
1263
+ voltage drops will result in the breaking of ion pairs,
1264
+ causing a conductivity increase in a process known as
1265
+ the second Wien effect. This phenomenon is described in
1266
+ refs.13,14, and has been used to create solid-state voltage-
1267
+ gated nanochannels33.
1268
+ D.
1269
+ Effect of a surface charge
1270
+ Up till now, we have restricted ourselves to channels
1271
+ with uncharged walls.
1272
+ However, in most experimen-
1273
+ tally relevant situations, the channel walls bear a sur-
1274
+ face charge density Σ, which strongly impacts nanofluidic
1275
+ transport. While introducing a surface charge is tedious
1276
+ within the exact framework, we may readily assess the
1277
+ effect of surface charge in the interaction confinement
1278
+ regime using our pair-enhanced mean-field theory.
1279
+ In the limit where the channel’s radius is smaller than
1280
+ the Debye length, we assume that the presence of the
1281
+ surface charge amounts to a homogeneous Donnan po-
1282
+ tential drop VD inside the channel, which we do not need
1283
+ to determine explicitly. Then, the chemical potential of
1284
+ ions inside the channel reads:
1285
+ µ±
1286
+ in = µex ± eVD + kBT log c±
1287
+ inΛ3.
1288
+ (57)
1289
+
1290
+ 11
1291
+ B
1292
+ A
1293
+ 10-3
1294
+ 10-2
1295
+ 10-1
1296
+ 100
1297
+ 101
1298
+ Reservoir concentration (M)
1299
+ 10-6
1300
+ 10-4
1301
+ 10-2
1302
+ 100
1303
+ Channel conductance (nS)
1304
+ Ohm law
1305
+ Phantom pair mean-field
1306
+ Arrhenius model
1307
+ Actual surface charge
1308
+ 10-3 C/m2
1309
+ Apparent surface charge
1310
+ 10-2 C/m2
1311
+ 10-3
1312
+ 10-2
1313
+ 10-1
1314
+ 100
1315
+ 101
1316
+ Reservoir concentration (M)
1317
+ 10-2
1318
+ 10-1
1319
+ 100
1320
+ 101
1321
+ Conductance (nS)
1322
+ Donnan equilibrium
1323
+ Phantom pair mean-field
1324
+ FIG. 4. Channel conductance in the pair-enhanced mean-field model. A. Conductance of a nanochannel (R = 1 nm,
1325
+ ξ = 7 nm, xT = 0.7 nm, Es = 10 kBT) as function of the reservoir concentration. The red line corresponds to the prediction of
1326
+ the phantom pair mean-field model (Eq. (56)) for T = 300 K, D = 10−9 m2/s and L = 100 nm. The Ohm’s law bulk prediction
1327
+ (cin = cout, blue line) and the Arrhenius model (cin = coute−Es/kBT , yellow line) are also represented for comparison. B.
1328
+ Conductance of a nanochannel with a weak surface charge Σ = 10−3 C/m2. We represented the predictions of the conventional
1329
+ Donnan equilibrium (Eq. (1), blue line) and of the phantom pair mean-field theory (equations (56) and (59), red line). Because
1330
+ interaction confinement results in a lower ion concentration in the channel, the usual formula Σ ∼ Rc∗/2, where c∗ is the reservoir
1331
+ concentration for which conductance levels off overestimates the surface charge by one order of magnitude, as indicated on the
1332
+ plot.
1333
+ Note that the concentration in free anions c−
1334
+ in and cations
1335
+ c+
1336
+ in are now distinct, so that µex is defined as a function of
1337
+ the average free ion concentration cf
1338
+ in = (c+
1339
+ in+c−
1340
+ in)/2. In a
1341
+ channel that is sufficiently long for local electroneutrality
1342
+ to hold,
1343
+ c+
1344
+ in − c−
1345
+ in + 2Σ/R = 0.
1346
+ (58)
1347
+ Imposing chemical equilibrium with the reservoir, we ob-
1348
+ tain a modified version of the Donnan result (Eq. (1)):
1349
+
1350
+
1351
+
1352
+
1353
+
1354
+
1355
+
1356
+
1357
+
1358
+
1359
+
1360
+
1361
+
1362
+
1363
+
1364
+ cin = cf
1365
+ in + cp
1366
+ in
1367
+ cf
1368
+ in =
1369
+ ��
1370
+ cf
1371
+ oute−βµex(cf
1372
+ in)�2
1373
+ +
1374
+ � 2Σ
1375
+ R
1376
+ �2,
1377
+ cp
1378
+ in = 2πR2xT (cf
1379
+ out)2,
1380
+ (59)
1381
+ with cf
1382
+ out given by Eq. (53).
1383
+ One can again obtain the channel’s conductance
1384
+ through Eq. (56), which we compare to the Donnan /
1385
+ Ohm’s law result in Fig. 4B. Importantly, the Donnan
1386
+ result predicts that conductance becomes independent of
1387
+ concentration for cout ∼ 2Σ/R (see Eq. (1)). In practice,
1388
+ this result is commonly used to estimate experimentally
1389
+ the surface charge as Σ ∼ Rc∗/2, where c∗ is the reser-
1390
+ voir concentration for which conductance levels off. In
1391
+ contrast, in the interaction confinement regime, we pre-
1392
+ dict that the transition occurs instead at cf
1393
+ in ∼ 2Σ/R –
1394
+ corresponding to a higher reservoir concentration, due to
1395
+ the self-energy barrier. In this case, Donnan’s prediction
1396
+ overestimates the surface charge by typically one order
1397
+ of magnitude, as shown in Fig. 4B.
1398
+ Finally, let us stress that we considered here a charge
1399
+ homogeneously distributed along the channel’s surface.
1400
+ This assumption is relevant in the case of conducting
1401
+ wall materials, such as systems where the charge is im-
1402
+ posed via a gating electrode connected to the chan-
1403
+ nel walls.
1404
+ This situation, however, may be different
1405
+ in experimentally-available devices, where the surface
1406
+ charge generally consists in localized charged groups and
1407
+ defects on the channel walls. In this case, the physics be-
1408
+ come more involved as ions may form bound pairs with
1409
+ the fixed surface charges.
1410
+ Some of these physics have
1411
+ been revealed by the exact computations of Shklovskii
1412
+ and coworkers9,22; a technically simpler approach to
1413
+ these physics using our pair-enhanced mean-field theory
1414
+ would be possible, but extends beyond the scope of the
1415
+ present work.
1416
+ V.
1417
+ DISCUSSION AND PERSPECTIVES
1418
+ We have determined the salt concentration inside a
1419
+ nanometric channel connected to reservoirs filled with
1420
+ electrolyte.
1421
+ In the case of a fully 1D geometry, corre-
1422
+ sponding to a nanotube of radius R ∼ 1nm, we devel-
1423
+ oped an exact field-theoretical solution that allowed us
1424
+ to compute channel concentration cin as function of the
1425
+ reservoir concentration cout. This solution clears up the
1426
+ ambiguities of pre-existing mean-field theories, and con-
1427
+ tradicts the naive expectation cin = cout. In particular,
1428
+ the concentration inside the nanochannel is found to be
1429
+ always lower than in the bulk, as the confinement of elec-
1430
+ trostatic interactions creates an energy barrier for ions to
1431
+
1432
+ 12
1433
+ enter the channel.
1434
+ Yet, we found that cin is in fact higher than the predic-
1435
+ tion of the mean-field Debye-H¨uckel theory, as ion pairing
1436
+ is counterbalances to some extent the energy cost of in-
1437
+ teraction confinement. Such strong ion-ion correlations
1438
+ cannot be directly accounted for in a mean-field theory,
1439
+ and the filling transition that emerges in Debye-H¨uckel
1440
+ theory appears to be an artefact of linearization. To over-
1441
+ come this issue, one can add Bjerrum pairs as a separate
1442
+ chemical species within the Debye-H¨uckel model. Care-
1443
+ fully accounting for the statistical formation of unbound
1444
+ phantom pairs, we obtain a modified mean-field theory
1445
+ that reproduces the result of the exact computation with
1446
+ nearly-perfect accuracy, and that can be extended to ac-
1447
+ count for a non-zero surface charge on the channel wall.
1448
+ Despite the concurring results, the two original for-
1449
+ malisms developed in this work serve distinct purposes.
1450
+ The field-theoretical solution plays the role of a touch-
1451
+ stone model, owing to its exact treatment of all many-
1452
+ body interactions. Modeling electrolytes is a notoriously
1453
+ hard problem in statistical physics, and simplified models
1454
+ often lack a lack a reference solution for benchmarking
1455
+ their approximations. This difficulty is lifted in the 1D
1456
+ geometry: thanks to the existence of the exact solution,
1457
+ we have been able to build a quantitatively precise mean-
1458
+ field model, adding step-by-step the qualitative ingredi-
1459
+ ents necessary to reproduce the exact result.
1460
+ Moreover, the field theory formalism gives access to the
1461
+ entire statistics of the system, including finite-size effects
1462
+ which elude any mean-field treatment.
1463
+ The latter are
1464
+ expected to be relevant in many experimental situations,
1465
+ as a substantial amount of current works focuses on short
1466
+ pores, where the length of the channel is comparable to
1467
+ its radius. For instance, one can expect shorter channels
1468
+ to deviate from electroneutrality2 – something entirely
1469
+ impossible in the limit of infinitely long channels.
1470
+ On the other hand, our modified mean-field formalism
1471
+ has the advantage of mathematical simplicity, allowing
1472
+ for convenient physical interpretations. The simple dis-
1473
+ tinction between free ions and Bjerrum pairs can be used
1474
+ to straightforwardly estimate the channel’s conductance.
1475
+ The influence of ion-ion correlations on conductivity is
1476
+ of particular importance as conductance measurements
1477
+ underpin many nanofluidic experiments. In contrast, the
1478
+ exact solution does not provide any such insight on trans-
1479
+ port properties, as it is limited to thermal equilibrium.
1480
+ Furthermore, the mean-field model may easily be
1481
+ adapted to other geometries, whereas an exact treatment
1482
+ is only possible in the strictly 1D case. Extensions of our
1483
+ results to 2D nanochannels would be of significant in-
1484
+ terest. In particular, 2D nanochannels can be made out
1485
+ of various materials with different electronic properties,
1486
+ which directly impact the confined ionic interactions6.
1487
+ Therefore, 2D nanochannels could serve as a platform
1488
+ for exploring the impact of wall metallicity on the ion
1489
+ filling problem.
1490
+ Both our exact and mean-field solutions can be ex-
1491
+ pected to fail at very high concentrations. Indeed, our
1492
+ work relies on a simplified picture of electrolytes, where
1493
+ all steric effects are discarded. We considered point-like
1494
+ ions with no short-distance repulsion; therefore, no effect
1495
+ like saturation or layering can be accounted for.
1496
+ Sim-
1497
+ ilarly, we neglected any interaction with the solvent –
1498
+ for example, we did not consider the decrement in rela-
1499
+ tive permittivity at high salt concentrations34. However,
1500
+ since all electrostatic interactions are screened in the
1501
+ limit of high concentrations, such considerations should
1502
+ not impact the conclusions of the present work: partic-
1503
+ ularly, we would still expect that cin = cout at high con-
1504
+ centration.
1505
+ Lastly, let us briefly recall our results for the ion filling
1506
+ problem. In channels larger than a few nanometers, the
1507
+ conventional mean-field picture is valid, so that in ab-
1508
+ sence of any surface charge the salt concentration inside
1509
+ the channel equals that of the reservoirs: cin = cout. For
1510
+ nanometre-scale confinement and low concentrations, in-
1511
+ teraction confinement amounts to a finite energy barrier
1512
+ for ions to enter the channel: cin = coute−Es/kBT . As
1513
+ concentration increases, more ions are able to overcome
1514
+ the barrier by forming Bjerrum pairs, neutralizing the
1515
+ electrostatic cost of confinement, at the price of entropy:
1516
+ cin ∝ c2
1517
+ out. Only at high concentrations can one recover
1518
+ the intuitive estimate cin = cout, as intense screening can-
1519
+ cels out all electrostatic interactions. Overall, interaction
1520
+ confinement has a significant impact on the properties
1521
+ of nanofluidic systems, and the assumption cin = cout
1522
+ should be questioned any time the system’s size reaches
1523
+ the nanometre scale.
1524
+ ACKNOWLEDGMENTS
1525
+ N.K. acknowledges support from a Humboldt fellow-
1526
+ ship.
1527
+ L.B. acknowledges funding from the EU H2020
1528
+ Framework Programme/ERC Advanced Grant agree-
1529
+ ment number 785911-Shadoks.
1530
+ The Flatiron Institute
1531
+ is a division of the Simons Foundation.
1532
+ DATA AVAILABILITY STATEMENT
1533
+ The data that support the findings of this study are
1534
+ available from the corresponding author upon reasonable
1535
+ request.
1536
+ 1R. B. Schoch, J. Han, and P. Renaud, “Transport phenomena in
1537
+ nanofluidics,” Reviews of Modern Physics 80, 839–883 (2008).
1538
+ 2A. Levy, J. P. de Souza,
1539
+ and M. Z. Bazant, “Breakdown of
1540
+ electroneutrality in nanopores,” Journal of Colloid and Interface
1541
+ Science 579, 162–176 (2020).
1542
+ 3N. Kavokine, R. R. Netz,
1543
+ and L. Bocquet, “Fluids at the
1544
+ nanoscale: From continuum to subcontinuum transport,” An-
1545
+ nual Review of Fluid Mechanics 53, 377–410 (2021).
1546
+ 4A. Esfandiar, B. Radha, F. C. Wang, Q. Yang, S. Hu, S. Garaj,
1547
+ R. R. Nair, A. K. Geim, and K. Gopinadhan, “Size effect in ion
1548
+ transport through angstrom-scale slits,” Science 358, 511–513
1549
+ (2017).
1550
+
1551
+ 13
1552
+ 5Y. Avni, R. M. Adar, D. Andelman, and H. Orland, “Conductiv-
1553
+ ity of concentrated electrolytes,” Physical Review Letters 128,
1554
+ 098002 (2022).
1555
+ 6N. Kavokine, P. Robin,
1556
+ and L. Bocquet, “Interaction confine-
1557
+ ment and electronic screening in two-dimensional nanofluidic
1558
+ channels,” The Journal of Chemical Physics 157, 114703 (2022).
1559
+ 7A. Parsegian, “Energy of an ion crossing a low dielectric mem-
1560
+ brane: Solutions to four relevant electrostatic problems,” Nature
1561
+ 221, 844–846 (1969).
1562
+ 8S. Teber, “Translocation energy of ions in nano-channels of cell
1563
+ membranes,” Journal of Statistical Mechanics: Theory and Ex-
1564
+ periment 2005, P07001–P07001 (2005).
1565
+ 9J. Zhang, A. Kamenev,
1566
+ and B. I. Shklovskii, “Conductance of
1567
+ ion channels and nanopores with charged walls: A toy model,”
1568
+ Physical Review Letters 95, 148101 (2005).
1569
+ 10Y. Levin, “Electrostatics of ions inside the nanopores and trans-
1570
+ membrane channels,” Europhysics Letters (EPL) 76, 163–169
1571
+ (2006).
1572
+ 11S. Kondrat and A. Kornyshev, “Superionic state in double-layer
1573
+ capacitors with nanoporous electrodes,” Journal of Physics: Con-
1574
+ densed Matter 23, 022201 (2011).
1575
+ 12P. Loche, C. Ayaz, A. Schlaich, Y. Uematsu,
1576
+ and R. R. Netz,
1577
+ “Giant axial dielectric response in water-fille nanotubes an ef-
1578
+ fective electrostatic ion-ion interactions from a tensorial dielec-
1579
+ tric model,” Journal of Physical Chemistry B 123, 10850–10857
1580
+ (2019).
1581
+ 13N. Kavokine, S. Marbach, A. Siria,
1582
+ and L. Bocquet, “Ionic
1583
+ Coulomb blockade as a fractional Wien effect,” Nature Nanotech-
1584
+ nology 14, 573–578 (2019).
1585
+ 14P. Robin, N. Kavokine, and L. Bocquet, “Modeling of emergent
1586
+ memory and voltage spiking in ionic transport through angstrom-
1587
+ scale slits,” Science 373, 687–691 (2021).
1588
+ 15L. Dresner, “Ion exclusion from neutral and slightly charged
1589
+ pores,” Desalination 15, 39–57 (1974).
1590
+ 16S. Buyukdagli, M. Manghi, and J. Palmeri, “Ionic capillary evap-
1591
+ oration in weakly charged nanopores,” Physical Review Letters
1592
+ 105, 158103 (2010).
1593
+ 17S. Buyukdagli, M. Manghi,
1594
+ and J. Palmeri, “Variational ap-
1595
+ proach for electrolyte solutions:
1596
+ From dielectric interfaces to
1597
+ charged nanopores,” Physical Review E 81, 041601 (2010).
1598
+ 18R. R. Netz and H. Orland, “Variational charge renormalization in
1599
+ charged systems,” The European Physical Journal E 11, 301–311
1600
+ (2003).
1601
+ 19V. D´emery, D. S. Dean, T. C. Hammant, R. R. Horgan,
1602
+ and
1603
+ R. Podgornik, “The one-dimensional Coulomb lattice fluid ca-
1604
+ pacitor,” The Journal of Chemical Physics 137, 064901 (2012).
1605
+ 20A. A. Lee, S. Kondrat, and A. A. Kornyshev, “Single-file charge
1606
+ storage in conducting nanopores,” Physical Review Letters 113,
1607
+ 1–5 (2014).
1608
+ 21V. D´emery, R. Monsarrat, D. S. Dean, and R. Podgornik, “Phase
1609
+ diagram of a bulk 1d lattice Coulomb gas,” EPL (Europhysics
1610
+ Letters) 113, 18008 (2016).
1611
+ 22J. Zhang, A. Kamenev, and B. I. Shklovskii, “Ion exchange phase
1612
+ transitions in water-filled channels with charged walls,” Physical
1613
+ Review E 73, 051205 (2006).
1614
+ 23A. Kamenev, J. Zhang, A. Larkin,
1615
+ and B. Shklovskii, “Trans-
1616
+ port in one-dimensional Coulomb gases: From ion channels to
1617
+ nanopores,” Physica A 359, 129–161 (2006).
1618
+ 24T. Gulden and A. Kamenev, “Dynamics of ion channels via non-
1619
+ hermitian quantum mechanics,” Entropy 23, 125 (2021).
1620
+ 25S. F. Edwards and A. Lenard, “Exact statistical mechanics of
1621
+ a one-dimensional system with coulomb forces. ii. the method
1622
+ of functional integration,” Journal of Mathematical Physics 3,
1623
+ 778–792 (1962).
1624
+ 26V. D´emery, D. S. Dean, T. C. Hammant, R. R. Horgan,
1625
+ and
1626
+ R. Podgornik, “Overscreening in a 1d lattice coulomb gas model
1627
+ of ionic liquids,” EPL (Europhysics Letters) 97, 28004 (2012).
1628
+ 27D. S. Dean, R. R. Horgan, and D. Sentenac, “Boundary effects in
1629
+ the one-dimensional Coulomb gas,” Journal of Statistical Physics
1630
+ 90, 899–926 (1998).
1631
+ 28D. Andelman, “Electrostatic properties of membranes:
1632
+ The
1633
+ Poisson-Boltzmann theory,” (1995).
1634
+ 29C. Herrero and L. Joly, “Poisson-Boltzmann formulary,” arXiv
1635
+ preprint arXiv:2105.00720 (2021).
1636
+ 30D. Frenkel and B. Smit, “Understanding molecular simulation,”
1637
+ (Academic Press, 2002) Chap. 7.
1638
+ 31H. Bruus and K. Flensberg, “Many-body quantum theory in
1639
+ condensed matter physics,”
1640
+ (Oxford University Press, 2016)
1641
+ Chap. 12.
1642
+ 32Y. Levin, “Electrostatic correlations: from plasma to biology,”
1643
+ Reports on progress in physics 65, 1577 (2002).
1644
+ 33P. Robin, T. Emmerich, A. Ismail, A. Nigu`es, Y. You, G.-H.
1645
+ Nam, A. Keerthi, A. Siria, A. Geim, B. Radha, and L. Bocquet,
1646
+ “Long-term memory and synapse-like dynamics of ionic carriers
1647
+ in two-dimensional nanofluidic channels,” Science (in press).
1648
+ 34A. Levy, D. Andelman,
1649
+ and H. Orland, “Dipolar Poisson-
1650
+ Boltzmann approach to ionic solutions: A mean field and loop ex-
1651
+ pansion analysis,” The Journal of Chemical Physics 139, 164909
1652
+ (2013).
1653
+
09E3T4oBgHgl3EQfnArs/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
0NAyT4oBgHgl3EQfbfc0/content/tmp_files/2301.00262v1.pdf.txt ADDED
@@ -0,0 +1,2919 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.00262v1 [math.PR] 31 Dec 2022
2
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
3
+ KOHEI SUZUKI
4
+ Abstract. In this article, we show 1-Bakry–Émery lower Ricci curvature
5
+ bound BE1(0, ∞) of a Dirichlet form on the configuration space whose in-
6
+ variant measure is sineβ ensemble for any β > 0.
7
+ As a particular case of
8
+ β = 2, our result proves BE1(0, ∞) for a Dirichlet form related to the unlablled
9
+ Dyson Brownian motion. We prove furthermore several functional inequalities
10
+ including the integral Bochner inequality, the local Poincaré and the local log-
11
+ Sobolev inequalities as well as the log-Harnack and the dimension-free Harnack
12
+ inequalities, the Lipschitz contraction property and the L∞-to-Lipschitz regu-
13
+ larisation property of the semigroup with the L2-transportation-type extended
14
+ distance. At the end of the article, we provide a sufficient condition for the
15
+ synthetic lower Ricci curvature bound in the case of general invariant measures
16
+ beyond sineβ.
17
+ Contents
18
+ 1.
19
+ Introduction
20
+ 1
21
+ 2.
22
+ Notation and Preliminaries
23
+ 4
24
+ 3.
25
+ Curvature bound for finite-particle systems
26
+ 11
27
+ 4.
28
+ Curvature bound for infinite-particle systems
29
+ 15
30
+ 5.
31
+ Dimension-free/log Harnack inequalities and Lipschitz regularisation
32
+ 25
33
+ 6.
34
+ Generalisation
35
+ 27
36
+ Appendix A.
37
+ 29
38
+ References
39
+ 31
40
+ 1. Introduction
41
+ The objective of this article is to reveal the structure of lower curvature bound be-
42
+ hind an infinite particle system of diffusions with logarithmic interactions. Such an
43
+ interacting particle system is realised as a continuous-time strong Markov process
44
+ having continuous paths (called a diffusion process) taking values in the configu-
45
+ ration space Υ = Υ(R) over R and having the sineβ (β > 0) ensemble µ as an
46
+ Date: 31/12/2022.
47
+ Key words and phrases.
48
+ Dyson Brownian motion, sine beta ensemble, Ricci curvature bound.
49
+ Department of Mathematical Science, Durham University
50
+ E-mail: [email protected] .
51
+ 1
52
+
53
+ 2
54
+ K. SUZUKI
55
+ invariant measure. We study a corresponding Dirichlet form (EΥ,µ, D(EΥ,µ)) with
56
+ square field ΓΥ (Prop. 4.15) whose invariant measure is sineβ ensemble µ on the
57
+ configuration space Υ. The case of β = 2 is particularly related to the diffusion
58
+ process called (unlabelled) Dyson Brownian motion (cf. [Spo87, KT10, Osa13]). The
59
+ labelled interacting diffusions can be phrased formally as the following infinitely
60
+ many stochastic differential equation with logarithmic interaction (see [Tsa16] for a
61
+ rigorous construction):
62
+ dXk
63
+ t = β
64
+ 2 lim
65
+ r→∞
66
+
67
+ i̸=k:|Xk
68
+ t −Xi
69
+ t|<r
70
+ 1
71
+ Xk
72
+ t − Xi
73
+ t
74
+ dt + dBk
75
+ t ,
76
+ k ∈ N ,
77
+ whereby {Bk
78
+ · }k∈N are infinitely many independent Brownian motions on R.
79
+ The main result of this article is to show that the aforementioned Dirichlet
80
+ form (EΥ,µ, D(EΥ,µ)) satisfies the non-negative lower Ricci curvature bound BE1(0, ∞)
81
+ in the sense of Bakry–Émery.
82
+ We prove, furthermore, several related functional
83
+ inequalities including the integral Bochner inequality with respect to the integral
84
+ Γ2-operator (ΓΥ,µ
85
+ 2
86
+ , D(ΓΥ,µ
87
+ 2
88
+ )) (see (4.22)), the local Poincaré, the local log-Sobolev
89
+ inequalities as well as the dimension-free Harnack inequality, the log-Harnack in-
90
+ equality, the Lipschitz contraction and the L∞-to-Lipschitz regularisation property
91
+ with respect to the L2-transportation-type extended distance ¯dΥ on Υ (see (2.13)).
92
+ The main results are summarised in the following.
93
+ Theorem. Let β > 0 and µ be the sineβ ensemble. The form (EΥ,µ, D(EΥ,µ)) satis-
94
+ fies the following:
95
+ • (Thm. 4.17) 1-Bakry–Émery estimate BE1(0, ∞): for u ∈ D(EΥ,µ), t > 0,
96
+ ΓΥ�
97
+ T Υ,µ
98
+ t
99
+ u
100
+ � 1
101
+ 2 ≤ T Υ,µ
102
+ t
103
+
104
+ ΓΥ(u)
105
+ 1
106
+ 2�
107
+ ;
108
+ • (Cor. 4.18) Integral Bochner inequality: for every (u, ϕ) ∈ D(ΓΥ,µ
109
+ 2
110
+ )
111
+ ΓΥ,µ
112
+ 2
113
+ (u, ϕ) ≥ 0 ;
114
+ • (Cor. 4.18) Local Poincaré inequality: for u ∈ D(EΥ,µ), t > 0,
115
+ T Υ,µ
116
+ t
117
+ u2 − (T Υ,µ
118
+ t
119
+ u)2 ≤ 2tT Υ,µ
120
+ t
121
+ ΓΥ(u) ,
122
+ T Υ,µ
123
+ t
124
+ u2 − (T Υ,µ
125
+ t
126
+ u)2 ≥ 2tΓΥ(T Υ,µ
127
+ t
128
+ u) ;
129
+ • (Cor. 4.18) Local log-Sobolev inequality: for non-negative u ∈ D(EΥ,µ), t > 0,
130
+ T Υ,µ
131
+ t
132
+ u log u − T Υ,µ
133
+ t
134
+ u log T Υ,µ
135
+ t
136
+ u ≤ tT Υ,µ
137
+ t
138
+ �ΓΥ(u)
139
+ u
140
+
141
+ ,
142
+ T Υ,µ
143
+ t
144
+ u log u − T Υ,µ
145
+ t
146
+ u log T Υ,µ
147
+ t
148
+ u ≥ tΓΥ(T Υ,µ
149
+ t
150
+ u)
151
+ T Υ,µ
152
+ t
153
+ u
154
+ .
155
+ • (Thm. 5.1) Log-Harnack inequality: for every non-negative u ∈ L∞(Υ, µ),
156
+ t > 0, there exists Ω ⊂ Υ so that µ(Ω) = 1 and
157
+ T Υ,µ
158
+ t
159
+ (log u)(γ) ≤ log(T Υ,µ
160
+ t
161
+ u)(η) + ¯dΥ(γ, η)2 ,
162
+ ∀γ, η ∈ Ω ;
163
+
164
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
165
+ 3
166
+ • (Thm. 5.1) Dimension-free Harnack inequality: for every non-negative u ∈
167
+ L∞(Υ, µ), t > 0 and α > 1 there exists Ω ⊂ Υ so that µ(Ω) = 1 and
168
+ (T Υ,µ
169
+ t
170
+ u)α(γ) ≤ T Υ,µ
171
+ t
172
+ uα(η) exp
173
+
174
+ α
175
+ 2(α − 1)
176
+ ¯dΥ(γ, η)2�
177
+ ,
178
+ ∀γ, η ∈ Ω ;
179
+ • (Thm. 5.1) Lipschitz contraction: for every u ∈ Lip(¯dΥ, µ) and t > 0
180
+ T Υ,µ
181
+ t
182
+ u has a ¯dΥ-Lipschitz µ-modification ˜T Υ,µ
183
+ t
184
+ u
185
+ and the following Lipschitz contraction holds:
186
+ Lip¯dΥ( ˜T Υ,µ
187
+ t
188
+ u) ≤ Lip¯dΥ(u) ;
189
+ • (Thm. 5.1) L∞(Υ, µ)-to-Lip(¯dΥ, µ) regularisation by semigroup: For u ∈
190
+ L∞(µ) and t > 0,
191
+ T Υ,µ
192
+ t
193
+ u has a ¯dΥ-Lipschitz µ-modification ˜T Υ,µ
194
+ t
195
+ u
196
+ and the following estimate holds:
197
+ Lip¯dΥ( ˜T Υ,µ
198
+ t
199
+ u) ≤
200
+ 1
201
+
202
+ 2t∥u∥L∞(µ)
203
+ ∀u ∈ Lipb(¯dΥ, µ) .
204
+ At the end of this article, Theorem will be extended to general point processes
205
+ beyond the sineβ ensemble, see Thm. 6.2.
206
+ Comparison with Literature. To the best knowledge of the author, this is the
207
+ first article addressing lower Ricci curvature bound in the setting of interacting and
208
+ infinite particle systems of diffusion processes. In the case of non-interacting case
209
+ where the invariant measure is the Poisson measure, it has been studied in [EH15]
210
+ in the case of Riemannian manifolds and in [DS22] in the case of general diffusion
211
+ spaces. In the case of finite particle systems, a variable Ricci curvature bound has
212
+ been addressed in [VG20] in the case of Coulomb-type potentials where the curvature
213
+ bound depends on the number of particles.
214
+ Outline of the article. After preparing the notation and the preliminaries in
215
+ Section 2, we discuss in Section 3 the synthetic lower Ricci curvature bound for
216
+ Dirichlet forms (EΥ(Br),µη
217
+ r, D(EΥ(Br),µη
218
+ r)) on the configuration space Υ(Br) over the
219
+ closed metric ball Br with radius r > 0 centred at 0, whose invariant measure is
220
+ the projected regular conditional probability µη
221
+ r on Υ(Br) conditioned at η on the
222
+ compliment Bc
223
+ r ⊂ R. The key point of the proof is that the logarithm of the Radon–
224
+ Nikodým density Ψη
225
+ r := − log(dµη
226
+ r/ dπmr) with respect the Poisson measure πmr on
227
+ Υ(Br) with the intensity mr being the Lebesgue measure restricted on Br is geodesi-
228
+ cally convex in (Υ(Br), ¯dΥ) due to the following DLR (Dobrushin–Lanford–Ruelle)
229
+ equation (⋆) proven in [DHLM20, Thm.1.1] with the number-rigidity ([Gho15] for
230
+ sine2; [NR18] and [DHLM20] for sineβ): for µ-a.e. η, there exists a unique k =
231
+
232
+ 4
233
+ K. SUZUKI
234
+ k(η) ∈ N0 so that µη
235
+ r(Υl(Br)) > 0 if and only if l = k where Υl(Br) := {γ ∈
236
+ Υ(Br) : γ(Br) = l}, and for γ = �k
237
+ i=1 δxi ∈ Υ(Br)
238
+ dµη
239
+ r = 1
240
+
241
+ r e−Ψk,η
242
+ r
243
+ dm⊙k
244
+ r
245
+ ,
246
+ (⋆)
247
+ Ψk,η
248
+ r (γ) := − log
249
+ � k
250
+
251
+ i<j
252
+ |xi − xj|β
253
+ k
254
+
255
+ i=1
256
+ lim
257
+ R→∞
258
+
259
+ y∈ηBcr ,|y|≤R
260
+ ���1 − xi
261
+ y
262
+ ���
263
+ β
264
+
265
+ ,
266
+ where m⊙k
267
+ r
268
+ is the k-symmetric product measure of the Lebesgue measure mr re-
269
+ stricted on Br ⊂ R and Zη
270
+ r is the normalising constant.
271
+ In Section 4, we prove BE1(0, ∞) for (EΥ,µ, D(EΥ,µ)) in the following steps: we
272
+ first construct the truncated form (EΥ,µ
273
+ r
274
+ , D(EΥ,µ
275
+ r
276
+ )) on Υ whose gradient operator is
277
+ truncated up to configurations on Br (Prop. 4.7). We then identify it with the super-
278
+ position Dirichlet form ( ¯EΥ,µ
279
+ r
280
+ , D( ¯EΥ,µ
281
+ r
282
+ )) lifted from (EΥ(Br),µη
283
+ r, D(EΥ(Br),µη
284
+ r)) with re-
285
+ spect to the conditioning η (Thm. 4.11). By this identification, we can lift BE(0, ∞)
286
+ from the form (EΥ(Br),µη
287
+ r, D(EΥ(Br),µη
288
+ r)) onto the truncated form (EΥ,µ
289
+ r
290
+ , D(EΥ,µ
291
+ r
292
+ )).
293
+ Showing the monotonicity of the form (EΥ,µ
294
+ r
295
+ , D(EΥ,µ
296
+ r
297
+ )) with respect to r and pass-
298
+ ing to the limit r → ∞, we prove BE1(0, ∞) for the limit form (EΥ,µ, D(EΥ,µ))
299
+ (Thm. 4.17). As a consequence of BE1(0, ∞), we obtain local Poincaré and local
300
+ log-Sobolev inequalities (Cor. 4.18).
301
+ In Section 5, we prove the log-Harnack inequality, the dimension-free Harnack
302
+ inequality, the Lipschitz contraction and L∞(µ)-to-Lip(¯dΥ) properties (Thm. 5.1).
303
+ Our proof strategy is to lift the corresponding functional inequalities from the space
304
+ of finitely many configurations.
305
+ In Section 6, we extend Theorem to the case of general point processes beyond
306
+ sineβ (Thm. 6.2).
307
+ Acknowledgement. A large part of the current work has been completed while
308
+ the author was at Bielefeld University. He gratefully acknowledges funding by the
309
+ Alexander von Humboldt Stiftung to support his stay.
310
+ Data Availability Statement. Data sharing not applicable to this article as no
311
+ datasets were generated or analysed during the current study.
312
+ 2. Notation and Preliminaries
313
+ 2.1. Numbers, Tensors, Function Spaces. We write N := {1, 2, 3, . . .}, N0 =
314
+ {0, 1, 2, . . .}, N := N∪{+∞} and N0 := N0 ∪{+∞}. The uppercase letter N is used
315
+ for N ∈ N0, while the lowercase letter n is used for n ∈ N0. We shall adhere to the
316
+ following conventions:
317
+ • the superscript □×N (the subscript □×N) denotes (N-fold) product objects;
318
+ • the superscript □⊗N (the subscript □⊗N) denotes (N-fold) tensor objects;
319
+
320
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
321
+ 5
322
+ • the superscript □⊙N (the subscript □⊙N) denotes (N-fold) symmetric tensor
323
+ objects;
324
+ Let (X, τ) be a topological space with σ-finite Borel measure ν. We use the following
325
+ symbols:
326
+ (a) Lp(ν) (1 ≤ p ≤ ∞) for the space of ν-equivalence classes of functions u
327
+ with |u|p ν-integrable when 1 ≤ p < ∞, and with u ν-essentially bounded
328
+ when p = ∞. The Lp(ν)-norm is denoted by ∥u∥p
329
+ Lp(ν) := ∥u∥p
330
+ p :=
331
+
332
+ X |u|p dν
333
+ for 1 ≤ p < ∞, and ∥u∥L∞(ν) := ∥u∥∞ = esssupX u. In the case of p = 2, the
334
+ inner-product is denoted by (u, v)L2(ν) :=
335
+
336
+ X uv dν;
337
+ (b) Lp
338
+ s(ν⊗n) := {u ∈ Lp(ν⊗n) : u is symmetric} where u is said to be symmetric
339
+ if and only if u(x1, . . . , xk) = u(xσ(1), . . . , xσ(k)) for any element σ ∈ S(k) in
340
+ the k-symmetric group.
341
+ (c) Cb(X) for the space of τ-continuous bounded functions on X; if X is locally
342
+ compact, C0(X) denotes the space of τ-continuous and compactly supported
343
+ functions on X; C∞
344
+ 0 (R) for the space of compactly supported smooth func-
345
+ tions on R;
346
+ (d) We write 1A for the indicator function on A, i.e., 1A(x) = 1 if x ∈ A, and
347
+ 1A(x) = 0 otherwise.
348
+ 2.2. Dirichlet forms. We refer the reader to [MR90, BH91] for this subsection.
349
+ Throughout this paper, a Hilbert space always means a Hilbert space with inner
350
+ product (·, ·)H taking value in R.
351
+ Dirichlet forms.
352
+ Given a bilinear form (Q, D(Q)) on a Hilbert space H, we write
353
+ Q(u) := Q(u, u) ,
354
+ Qα(u, v) := Q(u, v) + α(u, v)H , α > 0 .
355
+ Let (X, Σ, ν) be a σ-finite measure space. A symmetric Dirichlet form on L2(ν) is
356
+ a non-negative definite densely defined closed symmetric bilinear form (Q, D(Q))
357
+ on L2(ν) satisfying the Markov property
358
+ u0 := 0 ∨ u ∧ 1 ∈ D(Q)
359
+ and
360
+ Q(u0) ≤ Q(u) ,
361
+ u ∈ D(Q) .
362
+ Throughout this article, Dirichlet form always means symmetric Dirichlet form. If
363
+ not otherwise stated, D(Q) is always regarded as a Hilbert space with norm
364
+ ∥ · ∥D(Q) := Q1( · )1/2 :=
365
+
366
+ Q( · ) + ∥ · ∥2
367
+ L2(ν) .
368
+ In order to distinguish Dirichlet forms defined in different base spaces with different
369
+ reference measures, we often use the notation QX,ν to specify the base space X and
370
+ the reference measure ν.
371
+ Square field.
372
+ A Dirichlet form (Q, D(Q)) admits square field Γ if there exists a
373
+ dense subspace H ⊂ D(Q) ∩ L∞(ν) having the following property: for any u ∈ H,
374
+
375
+ 6
376
+ K. SUZUKI
377
+ there exists v ∈ L1(ν) so that
378
+ 2Q(uh, u) − Q(h, u2) =
379
+
380
+ X
381
+ hv dν
382
+ ∀h ∈ D(Q) ∩ L∞(ν) .
383
+ Such v is denoted by Γ(u). The square field Γ can be uniquely extended as an
384
+ operator on D(Q) × D(Q) → L1(ν) ([BH91, Thm. I.4.1.3]).
385
+ Semigroups and generators.
386
+ We refer the reader to [MR90, Chap. I, Sec. 2] for
387
+ the following contents.
388
+ Let (Q, D(Q)) be a symmetric closed form on a Hilbert
389
+ space H. The infinitesimal generator (A, D(A)) corresponding to (Q, D(Q)) is the
390
+ unique densely defined closed operator on H satisfying the following integration-by-
391
+ parts formula:
392
+ −(u, Av)H = Q(u, v)
393
+ ∀u ∈ D(Q), v ∈ D(A) .
394
+ The resolvent operator {Rα}α≥0 is the unique bounded linear operator on H satis-
395
+ fying
396
+ Qα(Rαu, v) = (u, v)H
397
+ ∀u ∈ H
398
+ v ∈ D(Q) .
399
+ The semigroup {Tt}t≥0 is the unique bounded linear operator on H satisfying
400
+ Gαu =
401
+ � ∞
402
+ 0
403
+ e−αtTtu dt
404
+ u ∈ H .
405
+ Locality.
406
+ Let (Q, D(Q)) be a Dirihclet form on L2(ν). It is called local ([BH91,
407
+ Def. 5.1.2]) if for any F, G ∈ C∞
408
+ c (R) and any u ∈ D(Q),
409
+ supp[F] ∩ supp[G] = ∅ =⇒ Q(F0 ◦ u, G0 ◦ u) = 0 ,
410
+ where F0(x) := F(x) − F(0) and G0(x) := G(x) − G(0).
411
+ 2.3. Metric space. Let X be any non-empty set. A function d: X ×2 → [0, ∞] is
412
+ an extended distance if it is symmetric and satisfying the triangle inequality, and it
413
+ does not vanish outside the diagonal in X ×2, i.e. d(x, y) = 0 iff x = y; a distance
414
+ if it is finite. Let x0 ∈ X and r ∈ [0, ∞). We write Br(x0) := {dx0 ≤ r}, where
415
+ dx0 := d(x0, ·).
416
+ Lipschitz algebras.
417
+ A function f : X → R is d-Lipschitz if there exists a con-
418
+ stant L > 0 so that
419
+ ��u(x) − u(y)
420
+ �� ≤ L d(x, y) ,
421
+ x, y ∈ X .
422
+ (2.1)
423
+ The smallest constant L so that (2.1) holds is the (global) Lipschitz constant of u,
424
+ denoted by Lipd(u). For any non-empty A ⊂ X we write Lip(A, d), resp. Lipb(A, d)
425
+ for the family of all finite, resp. bounded, d-Lipschitz functions on A. For simplic-
426
+ ity of notation, further let Lip(d) := Lip(X, d), resp. Lipb(d) := Lipb(X, d). Set also
427
+ Lip1(d) := {u ∈ Lip(d) : Lipd(u) ≤ 1} and Lip1
428
+ b(d) := Lip1(d) ∩ Lipb(d). For a given
429
+ measure ν, we set
430
+ Lip(d, ν) := {u ∈ Lip(d) : u is ν-measurable} ,
431
+
432
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
433
+ 7
434
+ as well as Lipb(d, ν) and Lip1
435
+ b(d, ν) denoting the corresponding subspaces of ν-
436
+ measurable functions respectively.
437
+ Geodesical convexity.
438
+ A metric space (X, d) is called a geodesic space if for any
439
+ x0, x1 ∈ X there exists a constant speed geodesic ω : [0, 1] → X connecting x0 and x1:
440
+ ω0 = x0 ,
441
+ ω1 = x1 ,
442
+ d(ωt, ωs) = |t − s|d(ω0, ω1)
443
+ ∀t, s ∈ [0, 1] .
444
+ For a function U : X → R ∪ {+∞}, define D(U) := {x ∈ X : U(x) < ∞}. We say
445
+ that U is K-geodesically convex for K ∈ R if for any x0, x1 ∈ D(U) there exists a
446
+ constant speed geodesic ω : [0, 1] → X with ω0 = x0 and ω1 = x1 and
447
+ U(ωt) ≤ (1 − t)U(ω0) + tU(ω1) − K
448
+ 2 t(1 − t)d2(ω0, ω1)
449
+ ∀t ∈ [0, 1] .
450
+ When K = 0, we say that U is geodesically convex.
451
+ 2.4. Cheeger energies. A complete separable geodesic metric space (X, d) equipped
452
+ with fully supported Radon measure ν with finite total mass ν(X) < ∞ is called a
453
+ metric measure space in this article. Let (X, d, ν) be a metric measure space. For
454
+ u ∈ Lip(d), the slope |Ddu|(x) is defined as
455
+ |Ddu|(x) :=
456
+
457
+
458
+
459
+
460
+
461
+ lim sup
462
+ y→x
463
+ |u(x) − u(y)|
464
+ d(x, y)
465
+ if x is not isolated;
466
+ 0
467
+ otherwise .
468
+ The slope is universally measurable, see [AGS14a, Lem. 2.6]. The Cheeger energy
469
+ Chd,ν : L2(ν) → R ∪ {+∞} is defined as the L2(ν)-lower semi-continuous envelope
470
+ of
471
+
472
+ X |Ddu|2 dν:
473
+ Chd,ν(u) := inf
474
+
475
+ lim inf
476
+ n→∞
477
+
478
+ X
479
+ |Ddun|2 dν : un ∈ Lip(d) ∩ L2(ν)
480
+ L2
481
+ −→ u
482
+
483
+ .
484
+ The domain is denoted by W 1,2(X, d, ν) := {u ∈ L2(ν) : Chd,ν(u) < ∞}.
485
+ The
486
+ Cheeger energy Chd,ν can be expressed by the following integration, see [AGS14a,
487
+ Thm. 4.5] : there exists a measurable function |∇u|∗ ∈ L2(ν) so that |∇u|∗ ≤ |Ddu|
488
+ ν-a.e. for every u ∈ Lip(d) and
489
+ Chd,ν(u) =
490
+
491
+ X
492
+ |∇u|2
493
+ ∗ dν
494
+ ∀u ∈ W 1,2(X, d, ν) ,
495
+ where |∇u|∗ is called minimal relaxed slope.
496
+ 2.5. Riemannian Curvature-dimension condition. Let (X, d, ν) be a metric
497
+ measure space. The following definition is an equivalent characterisation of RCD(K, ∞)
498
+ by [AGS15, Cor. 4.18]. We say that (X, d, ν) satisfies the Riemannian Curvature-
499
+ Dimension Condition RCD(K, ∞) for K ∈ R if
500
+ (i) Chd,ν is quadratic, i.e., Chd,ν(u + v) + Chd,ν(u − v) = 2Chd,ν(u) + 2Chd,ν(v);
501
+ (ii) Sobolev-to-Lipschitz property holds, i.e., every u ∈ W 1,2(X, d, ν) with |∇u|∗ ≤
502
+ 1 has a d-Lipschitz ν-representative ˜u satisfying Lip(˜u) ≤ 1;
503
+
504
+ 8
505
+ K. SUZUKI
506
+ (iii) Chd,ν satisfies BE2(K, ∞), i.e., |∇Ttu|2
507
+ ∗ ≤ e−2KtTt|∇u|2
508
+ ∗ for every u ∈ W 1,2(X, d, ν)
509
+ and t > 0.
510
+ In this case, the Cheeger energy Chd,ν is a local Dirichlet form ([AGS14b, §4.3]). We
511
+ note that, while [AGS15, Cor. 4.18] is stated in terms of the minimal weak upper
512
+ gradient denoted by |∇ · |w, it is identical to the minimal relaxed slope |∇ · |∗ due to
513
+ [AGS14a, Thm. 6.2].
514
+ 2.6. Configuration spaces. A configuration on a locally compact Polish space X
515
+ is any N0-valued Radon measure γ on X, which can be expressed by γ = �N
516
+ ii δxi
517
+ for N ∈ ¯N, where δx denotes the Dirac measure at x, i.e., δx(A) = 1 if and only if
518
+ x ∈ A. The configuration space Υ = Υ(X) is the space of all configurations over X.
519
+ The space Υ is equipped with the vague topology, i.e., the topology generated by
520
+ the duality of the space C0(X) of continuous functions with compact support. We
521
+ write the restriction γA := γ ⇂A for a Polish subspace A ⊂ X and the corresponding
522
+ restriction map is denoted by
523
+ prA : Υ −→ Υ(A): γ �−→ γA .
524
+ (2.2)
525
+ The N-particle configuration space is denoted by
526
+ ΥN := {γ ∈ Υ : γ(X) = N} ,
527
+ N ∈ N0 .
528
+ Let Sk be the k-symmetric group. It can be readily seen that the k-particle config-
529
+ uration space Υk is isomorphic to the quotient space X×k/Sk:
530
+ Υk ∼= X⊙k := X×k/Sk ,
531
+ k ∈ N0 .
532
+ (2.3)
533
+ The associated projection map from X×k to the quotient space X×k/Sk is denoted
534
+ by Pr. For η ∈ Υ and r > 0, we set
535
+ Υη
536
+ r := {γ ∈ Υ : γBcr = ηBcr} .
537
+ (2.4)
538
+ Conditional probability.
539
+ Let µ be a Borel probability measure on Υ. Let
540
+ µ(· | prBcr(·) = ηBcr)
541
+ denote the regular conditional probability of µ conditioned at η ∈ Υ with respect
542
+ to the σ-field generated by the projection map γ ∈ Υ �→ prr(γ) = γBr ∈ Υ(Br) (see
543
+ e.g., [DS21a, Def. 3.32] for the precise definition). Let µη
544
+ r be the probability measure
545
+ on Υ(Br) defined as
546
+ µη
547
+ r := (prr)#µ(· | prBcr(·) = ηBcr) ,
548
+ (2.5)
549
+ and its restriction on Υk(Br) is denoted by µk,η
550
+ r
551
+ := µη
552
+ r|Υk(Br).
553
+ Note: The conditional probability µ(· | prBcr(·) = ηBcr) is a probability measure on
554
+ the whole space Υ whose support is Υη
555
+ r = {γ ∈ Υ : γBcr = ηBcr}. We may project the
556
+
557
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
558
+ 9
559
+ conditional probability to the probability measure µη
560
+ r on Υ(Br) as in (2.5) without
561
+ loss of information in the sense that
562
+ prr : Υη
563
+ r → Υ(Br) is a bi-measure-preserving bijection .
564
+ (2.6)
565
+ Namely, the projection map prr is bijective with the inverse map pr−1
566
+ r
567
+ defined
568
+ as pr−1
569
+ r (γ) := γ + η, and both prr and pr−1
570
+ r
571
+ are measure-preserving between the
572
+ two measures µ(· | prBcr(·) = ηBcr) and µη
573
+ r.
574
+ For a measurable function u: Υ → R, r > 0 and for η ∈ Υ, we set
575
+
576
+ r(γ) := u(γ + ηBcr)
577
+ γ ∈ Υ(Br) .
578
+ (2.7)
579
+ By the property of the conditional probability, it is straightforward to see that for
580
+ any u ∈ L1(µ),
581
+
582
+ Υ
583
+ u dµ =
584
+
585
+ Υ
586
+ ��
587
+ Υ(Br)
588
+
589
+ r dµη
590
+ r
591
+
592
+ dµ(η) .
593
+ (2.8)
594
+ See, e.g., [DS21a, Prop. 3.44]. For a measurable set Ω ⊂ Υ, define a section Ωη
595
+ r ⊂
596
+ Υ(Br) at η ∈ Υ on Bc
597
+ r by
598
+ Ωη
599
+ r := {γ ∈ Υ(Br) : γ + ηBcr ∈ Ω} .
600
+ (2.9)
601
+ By applying the disintegration formula (2.8) to u = 1Ω, we obtain
602
+ µ(Ω) =
603
+
604
+ Υ
605
+ µη
606
+ r(Ωη
607
+ r) dµ(η) .
608
+ (2.10)
609
+ Poisson measure.
610
+ Let (X, τ, ν) be a locally compact Polish space with Radon
611
+ measure ν satisfying ν(X) < ∞. The Poisson measure πν on Υ(X) with intensity ν
612
+ is defined in terms of the symmetric tensor measure ν⊙ as follows:
613
+ πν(·) := e−ν(X)
614
+
615
+
616
+ k=1
617
+ ν⊙k�
618
+ · ∩ Υk(X)
619
+
620
+ = e−ν(X)
621
+
622
+
623
+ k=1
624
+ 1
625
+ k!(Pr)#ν⊗k�
626
+ · ∩ Υk(X)
627
+
628
+ .
629
+ (2.11)
630
+ L2-transportation distance.
631
+ Let (X, d) be a locally compact complete separable
632
+ metric space.
633
+ For i = 1, 2 let proji : X×2 → X denote the projection to the ith
634
+ coordinate for i = 1, 2. For γ, η ∈ Υ, let Cpl(γ, η) be the set of all couplings of γ
635
+ and η, i.e.,
636
+ Cpl(γ, η) := {q ∈ M (X
637
+ ×2): (proj1)♯q = γ , (proj2)♯q = η} .
638
+ Here M (X ×2) denotes the space of all Radon measures on X ×2. The L2-transportation
639
+ extended distance on Υ(X) is
640
+ dΥ(γ, η) :=
641
+ inf
642
+ q∈Cpl(γ,η)
643
+ ��
644
+ X×2 d2(x, y) dq(x, y)
645
+ �1/2
646
+ ,
647
+ inf ∅ = +∞ .
648
+ (2.12)
649
+ We refer the readers to [DS21a, §4.2, p.52] for details regarding the L2-transportation
650
+ extended distance dΥ.
651
+ It is important to note that dΥ is an extended distance,
652
+
653
+ 10
654
+ K. SUZUKI
655
+ attaining the value +∞ and dΥ is lower semi-continuous with respect to the product
656
+ vague topology τ ×2
657
+ v
658
+ but never τ ×2
659
+ v -continuous.
660
+ We introduce a variant of the L2-transportation extended distance, called L2-
661
+ transportation-type extended distance ¯dΥ defined as
662
+ ¯dΥ(γ, η) :=
663
+
664
+
665
+
666
+ dΥ(γ, η)
667
+ if γBcr = ηBcr for some r > 0 ,
668
+ +∞
669
+ otherwise .
670
+ (2.13)
671
+ By definition, dΥ ≤ ¯dΥ on Υ, and dΥ = ¯dΥ on Υ(Br) for any r > 0. In particular,
672
+ we have
673
+ Lip(Υ, dΥ) ⊂ Lip(Υ, ¯dΥ) ,
674
+ Lip¯dΥ(u) ≤ LipdΥ(u) ,
675
+ u ∈ Lip(Υ, dΥ) .
676
+ (2.14)
677
+ It can be readily seen that
678
+ ¯dΥ(γ, η) < ∞
679
+ ⇐⇒
680
+ γBcr = ηBcr , γ(Br) = η(Br)
681
+ for some r > 0 .
682
+ (2.15)
683
+ When we work with the configuration space Υ(Rn) over the n-dimensional Eu-
684
+ clidean space Rn or over any Polish subset in Rn, we always choose the Euclidean
685
+ distance d(x, y) = |x − y| and the L2-transportation distance dΥ and ¯dΥ associated
686
+ with d.
687
+ 2.7. sineβ ensemble. Let β > 0 and CβEk be the circular β ensemble on the k-
688
+ particle configuration space, i.e., it is the probability measure Pk,β on the space Υk(S1)
689
+ over the unit circle S1 ⊂ C defined as
690
+ dPk,β :=
691
+ 1
692
+ Zk,β
693
+
694
+ 1≤j<l≤k
695
+ ��eiθj − eiθl��β dθ1
696
+ 2π · · · dθk
697
+ 2π ,
698
+ where the normalisation constant Zk,β is given in terms of Gamma function Γ:
699
+ Zk,β := Γ( 1
700
+ 2βk + 1)
701
+ Γ( 1
702
+ 2βk + 1)k .
703
+ According to [KS09, Def. 1.6], the circular β ensemble CβE is defined as the limit
704
+ probability measure Pβ whose Laplace transform is determined as
705
+
706
+ exp
707
+
708
+
709
+
710
+ x∈γ
711
+ f(x)
712
+
713
+ dPβ(γ) = lim
714
+ k→∞
715
+
716
+ exp
717
+
718
+
719
+ k
720
+
721
+ i=1
722
+ f(kθi)
723
+
724
+ dPk,β(θ1, . . . , θk) ,
725
+ for all f ∈ C0(R). In [VV09] a probability measure µβ on Υ(R) called sine β ensem-
726
+ ble has been constructed by a limit of Gaussian β-ensemble. These two measures Pβ
727
+ and µβ turned out to be identical each other by the work of [Nak14]. Throughout the
728
+ rest of the article, we use the symbol µ = µβ to denote sineβ ensemble (equivalently,
729
+ circular β ensemble) and we do not specify the inverse temperature β as there is no
730
+ particular role played by a special β.
731
+
732
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
733
+ 11
734
+ Number-rigidity.
735
+ A Borel probability µ on Υ = Υ(Rn) is said to be number
736
+ rigid (in short: (R)) if for any bounded domain E ⊂ Rn, there exists Ω ⊂ Υ so that
737
+ µ(Ω) = 1 and, for any γ, η ∈ Ω
738
+ γEc = ηEc implies γE = ηE .
739
+ (R)
740
+ Namely, the configuration outside E determines the number of particle inside E. The
741
+ number-rigidity has been proven in [Gho15] for the sine2 ensemble and in [NR18],
742
+ [DHLM20] for the sineβ ensemble for general β > 0.
743
+ 3. Curvature bound for finite-particle systems
744
+ In this section, we study Dirichlet forms on the configuration space Υ(Br) over
745
+ metric balls Br ⊂ R. We denoted by m and mr the Lebesgue measure on R and
746
+ its restriction on the metric ball Br := [−r, r] respectively, and take the Euclidean
747
+ distance d(x, y) := |x − y| for x, y ∈ Br.
748
+ 3.1. Construction of Dirichlet forms on Υk(Br). Let W 1,2
749
+ s (m⊗k
750
+ r ) be the space
751
+ of m⊗k
752
+ r -classes of (1, 2)-Sobolev and symmetric functions on the product space B×k
753
+ r ,
754
+ i.e.,
755
+ W 1,2
756
+ s (m⊗k
757
+ r ) :=
758
+
759
+ u ∈ L2
760
+ s(m⊗k
761
+ r ) :
762
+
763
+ B×k
764
+ r
765
+ |∇⊗ku|2 dm⊗k
766
+ r
767
+ < ∞
768
+
769
+ ,
770
+ where ∇⊗k denotes the weak derivative on R×k: ∇⊗ku := (∂1u, . . . , ∂ku). The space
771
+ W 1,2
772
+ s (m⊗k
773
+ r ) consisting of symmetric functions, the projection Pr : B×k
774
+ r
775
+ → Υk(Br) ∼=
776
+ B×k
777
+ r /Sk naturally acts on W 1,2
778
+ s (m⊗k
779
+ r ) and the resulting quotient space is denoted by
780
+ W 1,2(m⊙k
781
+ r ), which is the (1, 2)-Sobolev space on Υk(Br):
782
+ W 1,2(m⊙k
783
+ r ) :=
784
+
785
+ u ∈ L2(m⊙k
786
+ r ) :
787
+
788
+ Υk(Br)
789
+ |∇⊙ku|2 dm⊙k
790
+ r
791
+ < ∞
792
+
793
+ ,
794
+ where ∇⊙k is the quotient operator of the weak gradient operator ∇⊗k through the
795
+ projection Pr and m⊙k
796
+ r
797
+ is the symmetric product measure defined as
798
+ m⊙k
799
+ r
800
+ := 1
801
+ k!(Pr)#m⊗k
802
+ r
803
+ .
804
+ For 0 < r < R < ∞, k ∈ N0 and η ∈ Υ(Bc
805
+ r), we introduce the following finite
806
+ Borel measure on Υk(Br): for γ = �k
807
+ i=1 δxi
808
+ dµk,η
809
+ r,R(γ) := e−Ψk,η
810
+ r,R(γ) dm⊙k
811
+ r (γ) ,
812
+ (3.1)
813
+ Ψk,η
814
+ r,R(γ) := − log
815
+ � k
816
+
817
+ i<j
818
+ |xi − xj|β
819
+ k
820
+
821
+ i=1
822
+
823
+ y∈ηBcr ,|y|≤R
824
+ ���1 − xi
825
+ y
826
+ ���
827
+ β
828
+
829
+ .
830
+ The corresponding weighted Sobolev norm is denoted by
831
+ EΥ(Br),µk,η
832
+ r,R(u) :=
833
+
834
+ Υk(Br)
835
+ |∇⊙ku|2 dµk,η
836
+ r,R ,
837
+ u ∈ Lipb(Υk(Br), dΥ) ,
838
+ (3.2)
839
+
840
+ 12
841
+ K. SUZUKI
842
+ where we note that as Lip(Υk, dΥ) ⊂ W 1,2(m⊙k
843
+ r ) and |∇⊙ku| ≤ LipdΥ(u) due to the
844
+ Rademacher theorem descendent from the one in the product Sobolev space W 1,2(m⊗k
845
+ r )
846
+ through the quotient, the expression |∇⊙ku| and its integral against the probability
847
+ measure µk,η
848
+ r,R make sense for u ∈ Lipb(Υk(Br), dΥ).
849
+ Proposition 3.1. The form (3.2) is well-defined and closable. The closure is a local
850
+ Dirichlet form on L2(µk,η
851
+ r,R) and its domain is denoted by D
852
+
853
+ EΥ(Br),µk,η
854
+ r,R).
855
+ Proof. The well-definedness follows from the following inequality:
856
+
857
+ Υk(Br)
858
+ |∇⊙ku|2 dµk,η
859
+ r,R ≤
860
+ ���e−Ψk,η
861
+ r,R
862
+ ���
863
+ L∞(Υk(Br),µk,η
864
+ r,R)
865
+
866
+ Υk(Br)
867
+ |∇⊙ku|2 dm⊙k < ∞ .
868
+ (3.3)
869
+ The closability of EΥ(Br),µk,η
870
+ r,R descends from the closability of the corresponding
871
+ Dirichlet form on the product space B×k
872
+ r
873
+ defined on the space of symmetric d×k-
874
+ Lipschitz functions:
875
+ EB×k
876
+ r
877
+ ,µk,η
878
+ r,R :=
879
+
880
+ B×k
881
+ r
882
+ |∇⊗ku|2e−Ψk,η
883
+ r,R dm⊗k
884
+ r
885
+ ,
886
+ where the closability of EB×k
887
+ r
888
+ ,µk,η
889
+ r,R is a consequence of the continuity of the den-
890
+ sity e−Ψk,η
891
+ r,R on B×k
892
+ r
893
+ and the standard Hamza-type argument by [MR85, Fuk97], see
894
+ for an accessible reference, e.g., [MR90, pp. 44-45]. The locality of the form is an
895
+ immediate consequence of the locality of the gradient operator ∇⊙k.
896
+
897
+ Let µ be the sineβ ensemble. Due to [DHLM20, Thm. 1.1], the following limit
898
+ exists for µ-a.e. η, all x ∈ Br and r > 0:
899
+ lim
900
+ R→∞
901
+
902
+ y∈ηBcr ,|y|≤R
903
+ ���1 − x
904
+ y
905
+ ���
906
+ β
907
+ .
908
+ Recall that µη
909
+ r has been defined in (2.5). By [DHLM20, Thm. 1.1] and the number-
910
+ rigidity (R) of µ, for µ-a.e. η there exists k = k(η) so that
911
+ µη
912
+ r(Υl(Br)) > 0 if and only if l = k(η) ,
913
+ (3.4)
914
+ and for γ = �k
915
+ i=1 δxi,
916
+ dµη
917
+ r = dµk,η
918
+ r
919
+ = e−Ψk,η
920
+ r
921
+
922
+ r
923
+ dm⊙k
924
+ r
925
+ ,
926
+ (3.5)
927
+ Ψk,η
928
+ r (γ) := − log
929
+ � k
930
+
931
+ i<j
932
+ |xi − xj|β
933
+ k
934
+
935
+ i=1
936
+ lim
937
+ R→∞
938
+
939
+ y∈ηBcr ,|y|≤R
940
+ ���1 − xi
941
+ y
942
+ ���
943
+ β
944
+
945
+ ,
946
+ where Zη
947
+ r is the normalising constant. The corresponding weighted Sobolev norm is
948
+ defined as
949
+ EΥ(Br),µk,η
950
+ r (u) :=
951
+
952
+ Υk(Br)
953
+ |∇⊙ku|2 dµk,η
954
+ r
955
+ ,
956
+ u ∈ Lipb(Υk(Br), dΥ) .
957
+ (3.6)
958
+
959
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
960
+ 13
961
+ Proposition 3.2. Let µ be the sineβ ensemble for β > 0. The form (3.6) is well-
962
+ defined and closable for µ-a.e. η. The closure is a local Dirichlet form on L2(µk,η
963
+ r )
964
+ and its domain is denoted by D(EΥ(Br),µk,η
965
+ r ).
966
+ Proof. As e−Ψk,η
967
+ r,R
968
+ R→∞
969
+ −−−→ e−Ψk,η
970
+ r
971
+ uniformly on Υk(Br) for µ-a.e. η by [DHLM20,
972
+ Lem. 2.3 and Proof of Thm. 2.1 in p. 183], the density e−Ψk,η
973
+ r
974
+ is continuous on
975
+ B⊙k
976
+ r , hence the same proof as Prop. 3.1 applies to conclude the statement.
977
+
978
+ 3.2. Curvature bound for finite-particle systems. We show that the poten-
979
+ tial Ψk,η
980
+ r,R defined in (3.1) is geodesically convex in (Υ(Br), dΥ).
981
+ Proposition 3.3. Ψk,η
982
+ r,R is geodesically convex in (Υk(Br), dΥ) for any 0 < r < R <
983
+ ∞, k ∈ N and η ∈ Υ(Bc
984
+ r),
985
+ Proof. Note that if u1, . . . , uk are convex and α1, . . . , αk ≥ 0, then �k
986
+ i=1 αiui is
987
+ again convex. Note also that for any 0 < r < R, any y ∈ [−R, −r] ∪ [r, R] and any
988
+ i, j ∈ {1, 2, . . . , k} with i ̸= j, the functions − log |xi − xj| and − log |1 − xi
989
+ y | are
990
+ convex in the following areas for any σ ∈ Sk:
991
+
992
+ (x1, . . . , xk) ∈ B×k
993
+ r
994
+ : xσ(1) < xσ(2) < · · · < xσ(k)
995
+
996
+ .
997
+ The following expression, therefore, concludes that Ψk,η
998
+ r,R is geodesically convex as a
999
+ function on Υk(Br): for any γ = �k
1000
+ i=1 δxi
1001
+ Ψk,η
1002
+ r,R(γ) = −β
1003
+ k
1004
+
1005
+ i<j
1006
+ log(|xi − xj|) − β
1007
+ k
1008
+
1009
+ i=1
1010
+
1011
+ y∈ηBcr ,|y|≤R
1012
+ log
1013
+ ���1 − xi
1014
+ y
1015
+ ��� .
1016
+ (3.7)
1017
+ The proof is complete.
1018
+
1019
+ Thanks to the geodesical convexity of the potential Ψk,η
1020
+ r,R shown in Prop. 3.3,
1021
+ the Dirichlet form (EΥ(Br),µk,η
1022
+ r,R, D(EΥ(Br),µk,η
1023
+ r,R)) satisfies the Riemannian Curvature-
1024
+ Dimension condition RCD(0, ∞).
1025
+ Proposition 3.4. The space (Υk(Br), dΥ, µk,η
1026
+ r,R) satisfies RCD(0, ∞) for every k ∈
1027
+ N0, 0 < r < R < ∞ and η ∈ Υ, and it holds that
1028
+
1029
+ EΥ(Br),µk,η
1030
+ r,R, D(EΥ(Br),µk,η
1031
+ r,R)
1032
+
1033
+ =
1034
+
1035
+ ChdΥ,µk,η
1036
+ r,R, W 1,2(Υk(Br), dΥ, µk,η
1037
+ r,R)
1038
+
1039
+ .
1040
+ Proof. Noting that B×k
1041
+ r
1042
+ is a convex subset in Rk, the space (B×k
1043
+ r , d×k, m⊗k
1044
+ r ) is a
1045
+ geodesic subspace of Rk and, therefore, satisfies RCD(0, ∞) by the Global-to-Local
1046
+ property of RCD(0, ∞), see [AGS14b, Thm. 6.20]. Noting that the k-particle con-
1047
+ figuration space (Υk(Br), dΥ, µk,η
1048
+ r,R) is the quotient space of (B×k
1049
+ r , d×k, m⊗k) with
1050
+ respect to the symmetric group Sk and that the property RCD(0, ∞) is preserved
1051
+ under the quotient operation with respect to Sk thanks to [GKMS18], we obtain
1052
+ that (Υk(Br), dΥ, m⊙k) satisfies RCD(0, ∞) as well. By the geodesical convexity of
1053
+ the potential Ψk,η
1054
+ r,R shown in Prop. 3.3 and the continuity of the density e−Ψk,η
1055
+ r,R, the
1056
+ weighted space (Υk(Br), dΥ, µk,η
1057
+ r,R) satisfies RCD(0, ∞) by [AGS14b, Prop. 6.21].
1058
+
1059
+ 14
1060
+ K. SUZUKI
1061
+ To conclude the statement, it suffices to check the identity
1062
+ EΥ(Br),µk,η
1063
+ r,R = ChdΥ,µk,η
1064
+ r,R .
1065
+ By the Rademacher theorem on Υk(Br) descendent from the Rademacher theorem
1066
+ on B×k
1067
+ r , the slope |DdΥu| coincides with |∇⊙ku| for u ∈ Lip(Υk(Br), dΥ). Thus,
1068
+ EΥ(Br),µk,η
1069
+ r,R(u) =
1070
+
1071
+ Υk(Br)
1072
+ |DdΥu|2 dµk,η
1073
+ r,R
1074
+ u ∈ Lipb(Υk(Br), dΥ) .
1075
+ (3.8)
1076
+ Since ChdΥ,µk,η
1077
+ r,R is the L2-lower semi-continuous envelope, the functional ChdΥ,µk,η
1078
+ r,R is
1079
+ the maximal L2-lower semi-continuous functional satisfying
1080
+ ChdΥ,µk,η
1081
+ r,R(u) ≤
1082
+
1083
+ Υk(Br)
1084
+ |DdΥu|2 dµk,η
1085
+ r,R .
1086
+ As EΥ(Br),µk,η
1087
+ r,R is closed by Prop. 3.1, in particular, EΥ(Br),µk,η
1088
+ r,R is L2-lower semi-
1089
+ continuous. Therefore, combining the maximality of ChdΥ,µk,η
1090
+ r,R with (3.8), it holds
1091
+ that
1092
+ EΥ(Br),µk,η
1093
+ r,R ≤ ChdΥ,µk,η
1094
+ r,R and W 1,2(Υk(Br), dΥ, µk,η
1095
+ r,R) ⊂ D(EΥ(Br),µk,η
1096
+ r,R)
1097
+ and
1098
+ ChdΥ,µk,η
1099
+ r,R(u) = EΥ(Br),µk,η
1100
+ r,R(u)
1101
+ u ∈ Lipb(Υk(Br), dΥ) .
1102
+ As Lipb(Υk(Br), dΥ) is dense both in D(EΥ(Br),µk,η
1103
+ r,R) and W 1,2(Υk(Br), dΥ, µk,η
1104
+ r,R) by
1105
+ construction, the proof is completed.
1106
+
1107
+ In view of Prop. 3.4 and the approximation Ψk,η
1108
+ r,R to Ψk,η
1109
+ r
1110
+ as R → ∞, we prove
1111
+ that (Υk(Br), dΥ, µk,η
1112
+ r ) satisfies RCD(0, ∞) as well.
1113
+ Proposition 3.5. Let µ be the sineβ ensemble for β > 0. For any 0 < r < ∞ and
1114
+ µ-a.e. η ∈ Υ, the space (Υk(Br), dΥ, µk,η
1115
+ r ) satisfies RCD(0, ∞), where k = k(η) as
1116
+ in (3.4). Furthermore,
1117
+
1118
+ EΥ(Br),µk,η
1119
+ r , D(EΥ(Br),µk,η
1120
+ r )
1121
+
1122
+ =
1123
+
1124
+ ChdΥ,µk,η
1125
+ r , W 1,2(Υk(Br), dΥ, µk,η
1126
+ r )
1127
+
1128
+ .
1129
+ Proof. Since the potential Ψk,η
1130
+ r,R is geodesically convex for any R and it converges
1131
+ pointwise to Ψk,η
1132
+ r
1133
+ as R → ∞ for µ-a.e. η by [DHLM20, Lem. 2.3 and Proof of Thm.
1134
+ 2.1 in p. 183], the potential Ψk,η
1135
+ r
1136
+ is again geodesically convex on (Υk(Br), dΥ). Fur-
1137
+ thermore, as the density e−Ψk,η
1138
+ r,R converges uniformly to e−Ψk,η
1139
+ r
1140
+ on Υk(Br) as R → ∞
1141
+ for µ-a.e. η by [DHLM20, Lem. 2.3 and Proof of Thm. 2.1 in p. 183], the den-
1142
+ sity e−Ψk,η
1143
+ r
1144
+ is continuous on Υ(Br). Noting the fact that the constant multiplication
1145
+ (by the normalisation constant Zη
1146
+ r ) does not change the lower Ricci curvature bound
1147
+ (see e.g., [Stu06, Prop. 4.13]), the same proof as Prop. 3.4 applies to conclude the
1148
+ statement.
1149
+
1150
+
1151
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
1152
+ 15
1153
+ 4. Curvature bound for infinite-particle systems
1154
+ In this section, we construct a local Dirichlet form on Υ = Υ(R) associated with
1155
+ sineβ ensemble µ and show the BE1(0, ∞) property by the following steps: we first
1156
+ construct truncated Dirichlet forms on Υ whose gradient operators are truncated up
1157
+ to configurations inside Br. We then identify them with the superposition Dirichlet
1158
+ forms lifted from Υ(Br), thanks to which we can show BE1(0, ∞) for the truncated
1159
+ forms. We take the monotone limit of the truncated forms to construct a Dirichlet
1160
+ form with invariant measure sineβ ensembles µ and BE1(0, ∞) extends to the limit
1161
+ form. In the end of this section, we discuss several applications of the BE1(0, ∞)
1162
+ property.
1163
+ 4.1. Superposition of Dirichlet forms from Υ(Br) onto Υ. In this subsection,
1164
+ we construct the truncated Dirichlet forms on Υ. We first construct square field
1165
+ operators on Υ and Υ(Br) respectively.
1166
+ For so doing, we introduce a map Uγ,x
1167
+ transferring functions on the configuration space Υ to functions on the base space R.
1168
+ For u : Υ → R, define Uγ,x(u) : R → R by
1169
+ Uγ,x(u)(y) := u
1170
+
1171
+ 1X\{x} ·γ + δy
1172
+
1173
+ − u
1174
+
1175
+ 1X\{x} ·γ
1176
+
1177
+ ,
1178
+ γ ∈ Υ,
1179
+ x ∈ γ .
1180
+ (4.1)
1181
+ In the context of configuration spaces, the operation Uγ,x has been firstly discussed
1182
+ in [MR00, Lem. 1.2], see also [DS21a, Lem. 2.16]. We introduce the localisation of
1183
+ the operator Uγ,x on Br. Recall that for a measurable function u: Υ → R, r > 0
1184
+ and for η ∈ Υ, we set in (2.7)
1185
+
1186
+ r(γ) := u(γ + ηBcr) for γ ∈ Υ(Br).
1187
+ Lemma 4.1. For u : Υ(Br) → R, define Ur
1188
+ γ,x(u) : Br → R by
1189
+ Ur
1190
+ γ,x(u)(y) := u(1X\{x} · γ + δy) − u(1X\{x} · γ)
1191
+ γ ∈ Υ(Br), x ∈ γ .
1192
+ The operation Ur
1193
+ γ,x maps from Lip(Υ(Br), dΥ) to Lip(Br) and Lipschitz constants
1194
+ are contracted by Ur
1195
+ γ,x for any r > 0:
1196
+ Lip(Ur
1197
+ γ,x(u)) ≤ LipdΥ(u)
1198
+ ∀γ ∈ Υ(Br)
1199
+ ∀x ∈ γ .
1200
+ Furthermore, for any u : Υ → R,
1201
+ Ur
1202
+ γBr ,x(uγ
1203
+ r)(y) = Uγ,x(u)(y)
1204
+ for every γ ∈ Υ, x ∈ γBr and y ∈ Br .
1205
+ Proof. Let u ∈ Lip(Υ(Br), dΥ). Then
1206
+ |Ur
1207
+ γ,x(u)(y) − Ur
1208
+ γ,x(u)(z)| = |u(1X\{x} ·γ + δy) − u(1X\{x} ·γ + δz)|
1209
+ ≤ LipdΥ(u)dΥ(1X\{x} ·γ + δy, 1X\{x} ·γ + δz)
1210
+ = LipdΥ(u)|y − z| ,
1211
+ which concludes the first assertion.
1212
+
1213
+ 16
1214
+ K. SUZUKI
1215
+ We verify the second assertion. For every x ∈ γBr and y ∈ Br,
1216
+ Uγ,x(u)(y) = u(1X\{x} · γ + δy) − u(1X\{x} · γ)
1217
+ = u(1X\{x} · γBr + γBcr + δy) − u(1X\{x} · γBr + γBcr)
1218
+ = ur,γ(1X\{x} · γBr + δy) − ur,γ(1X\{x} · γBr)
1219
+ = Ur
1220
+ γBr ,x(uγ
1221
+ r)(y) .
1222
+ The proof is complete.
1223
+
1224
+ We now define a square field operator on Υ truncated up to particles inside Br.
1225
+ Definition 4.2 (Truncated square field on Υ). Let u : Υ → R be a measurable
1226
+ function so that Uγ,x(u)|Br ∈ W 1,2(mr) for µ-a.e. γ and every x ∈ γBr. The following
1227
+ operator is called the truncated square field ΓΥ
1228
+ r ,
1229
+ (4.2)
1230
+ ΓΥ
1231
+ r (u)(γ) :=
1232
+
1233
+ x∈γBr
1234
+ |∇Uγ,x(u)|2(x) .
1235
+ Thanks to Lem. A.1, Formula (4.2) is well-defined for µ-a.e. γ. Indeed, as Uγ,x(u)|Br ∈
1236
+ W 1,2
1237
+ loc (mr), the weak gradient ∇Uγ,x(u) is well-defined pointwise on a measurable set
1238
+ Σ ⊂ Br with mr(Σc) = 0. By applying Lem. A.1, Formula (4.2) is well-defined on
1239
+ the set Ω(r) of µ-full measure.
1240
+ Based on the truncated square field ΓΥ
1241
+ r , we introduce the truncated form on Υ
1242
+ defined on a certain core.
1243
+ Definition 4.3 (Core). For r > 0, let Cr be defined as the space of µ-classes of
1244
+ measurable functions u so that
1245
+ (a) u ∈ L∞(µ);
1246
+ (b) uη
1247
+ r ∈ Lipb(Υ(Br), dΥ) for µ-a.e. η and r > 0;
1248
+ (c) The following integral is finite:
1249
+ EΥ,µ
1250
+ r
1251
+ (u) :=
1252
+
1253
+ Υ
1254
+ ΓΥ
1255
+ r (u) dµ < ∞ .
1256
+ (4.3)
1257
+ Note that, thanks to Lem. 4.1, if a measurable function u : Υ → R satisfies (b),
1258
+ then Uγ,x(u)|Br ∈ Lip(Br, d) ⊂ W 1,2(mr). Thus, the expression ΓΥ
1259
+ r (u) in (4.3) is
1260
+ well-posed.
1261
+ Definition 4.4 (Square field on Υ(Br)). Fix r > 0 and η ∈ Υ. For a µ-measurable
1262
+ function u : Υ(Br) → R satisfying u|Υk(Br) ∈ D(EΥ(Br),µk,η
1263
+ r ) for any k ∈ N0, we
1264
+ define the following square field operator on Υ(Br):
1265
+ ΓΥ(Br)(u) :=
1266
+
1267
+
1268
+ k=0
1269
+ ���∇⊙k�
1270
+ u|Υk(Br)
1271
+ ����
1272
+ 2
1273
+ ,
1274
+ (4.4)
1275
+ and define the following form:
1276
+ EΥ(Br),µη
1277
+ r(u) :=
1278
+
1279
+ Υ(Br)
1280
+ ΓΥ(Br)(u) dµη
1281
+ r ,
1282
+
1283
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
1284
+ 17
1285
+ D(EΥ(Br),µη
1286
+ r) := {u : Υ(Br) → R, EΥ(Br),µη
1287
+ r(u) < ∞} .
1288
+ Due to the number-rigidity (R), the Dirichlet form EΥ(Br),µη
1289
+ r is equal to EΥ(Br),µk,η
1290
+ r
1291
+ up to the normalising constant multiplication, therefore, it is a Dirichlet form as
1292
+ well. The corresponding semigroup is denoted by {T Υ(Br),µη
1293
+ r
1294
+ t
1295
+ }t≥0.
1296
+ Remark 4.5. The number-rigidity (R) is not necessary to conclude that EΥ(Br),µη
1297
+ r
1298
+ is a Dirichlet form since any countable sum of Dirichlet forms is a Dirichlet form
1299
+ (see e.g., [MR90, Exercise 3.9]).
1300
+ Before discussing properties of truncated forms, we prepare a lemma, which states
1301
+ that the operation (·)η
1302
+ r defined in (2.7) maps from Lip(Υ, ¯dΥ) to Lip(Υ(Br), dΥ) and
1303
+ contracts Lipschitz constants.
1304
+ Lemma 4.6. Let u ∈ Lip(Υ, ¯dΥ). Then, uη
1305
+ r ∈ Lip(Υ(Br), dΥ) and
1306
+ LipdΥ(uη
1307
+ r) ≤ Lip¯dΥ(u) ,
1308
+ ∀η ∈ Υ ,
1309
+ r > 0 .
1310
+ (4.5)
1311
+ Proof. Let γ, ζ ∈ Υ(Br) and η ∈ Υ. Then,
1312
+ |uη(γ) − uη(ζ)| = |u(γ + ηBcr) − u(ζ + ηBcr)| ≤ Lip¯dΥ(u)¯dΥ(γ + ηBcr, ζ + ηBcr)
1313
+ = Lip¯dΥ(u)dΥ(γ, ζ) .
1314
+ The proof is completed.
1315
+
1316
+ The following proposition relates the two square fields ΓΥ
1317
+ r and ΓΥ(Br).
1318
+ Proposition 4.7 (Truncated form). The following relations hold on Cr:
1319
+ ΓΥ
1320
+ r (u)(γ + ηBcr) = ΓΥ(Br)(uη
1321
+ r)(γ) ,
1322
+ µ-a.e. η, µη
1323
+ r-a.e. γ ∈ Υ(Br) ,
1324
+ (4.6)
1325
+ EΥ,µ
1326
+ r
1327
+ (u) =
1328
+
1329
+ Υ
1330
+ EΥ(Br),µη
1331
+ r(uη
1332
+ r) dµ(η) ,
1333
+ u ∈ Cr .
1334
+ Furthermore, the Rademacher-type property holds: Lipb(¯dΥ, µ) ⊂ Cr and
1335
+ ΓΥ
1336
+ r (u) ≤ Lip¯dΥ(u)2
1337
+ ∀u ∈ Lipb(¯dΥ, µ) .
1338
+ (4.7)
1339
+ As a consequence, the form (EΥ,µ
1340
+ r
1341
+ , Cr) in (4.3) is a densely defined closable Markovian
1342
+ form and the closure (EΥ,µ
1343
+ r
1344
+ , D(EΥ,µ
1345
+ r
1346
+ )) is a local Dirichlet form on L2(µ). The L2-
1347
+ semigroups corresponding to (EΥ,µ
1348
+ r
1349
+ , D(EΥ,µ
1350
+ r
1351
+ )) is denoted by {T Υ,µ
1352
+ r,t }t≥0.
1353
+ Proof. We first prove (4.6). Let u ∈ Cr. Thanks to (b) in Def. 4.3 and Lem. 4.1,
1354
+ Uγ,x(u) ∈ Lip(Br, d) ,
1355
+ µ-a.e. γ
1356
+ ∀r > 0 .
1357
+ Thus, noting Lip(Br, d) ⊂ W 1,2(mr), the LHS of (4.6) is well-defined. The RHS
1358
+ of (4.6) is also well-defined by (b) in Def. 4.3 and the fact that Lipb(Υ(Br), dΥ) ⊂
1359
+ D(EΥ(Br),µη
1360
+ r) by construction. Thus, for µ-a.e. η, we can take a measurable set Ω =
1361
+ Ω(η) ⊂ Υ(Br) with µη
1362
+ r(Ω) = 1 so that (4.6) is well-defined for every γ ∈ Ω. As µη
1363
+ r is
1364
+ absolutely continuous with respect to the Poisson measure πmr, we may assume that
1365
+
1366
+ 18
1367
+ K. SUZUKI
1368
+ every γ ∈ Ω does not have multiple points, i.e., γ({x}) ∈ {0, 1} for every x ∈ Br.
1369
+ Let γ ∈ Ω ∩ Υk(Br). Then, according to (4.4),
1370
+ ΓΥ(Br)(uη
1371
+ r)(γ) =
1372
+ ���∇⊙k�
1373
+
1374
+ r
1375
+ ����
1376
+ 2
1377
+ (γ)
1378
+ =
1379
+
1380
+ x∈γ
1381
+ ��∇uη
1382
+ r
1383
+
1384
+ 1Br\{x} ·γ + δ•
1385
+ ���2(x)
1386
+ =
1387
+
1388
+ x∈γ
1389
+ ���∇
1390
+
1391
+
1392
+ r
1393
+
1394
+ 1Br\{x} ·γ + δ•
1395
+
1396
+ − uη
1397
+ r
1398
+
1399
+ 1Br\{x} ·γ
1400
+ �����
1401
+ 2
1402
+ (x)
1403
+ =
1404
+
1405
+ x∈γBr
1406
+ ���∇
1407
+
1408
+ u
1409
+
1410
+ 1X\{x} ·(γ + ηBcr) + δ•
1411
+
1412
+ − u
1413
+
1414
+ 1X\{x} ·(γ + ηBcr)
1415
+ �����
1416
+ 2
1417
+ (x)
1418
+ = ΓΥ
1419
+ r (u)(γ + ηBcr)
1420
+ where the second equality followed from the definition of the symmetric gradient
1421
+ operator ∇⊙k, for which we used the fact that γ ∈ Ω does not have multiple points;
1422
+ the third equality follows simply as uη
1423
+ r
1424
+
1425
+ 1Br\{x} ·γ
1426
+
1427
+ does not depend on the variable
1428
+ denoted as •, on which the weak gradient ∇ operates; the fifth equality followed
1429
+ from the definition of the square field ΓΥ
1430
+ r . As this argument holds for arbitrary
1431
+ k ∈ N0, (4.6) has been shown. The Markov property and the locality of EΥ,µ
1432
+ r
1433
+ follow
1434
+ immediately from (4.6) since EΥ(Br),µη
1435
+ r possesses the corresponding properties by
1436
+ construction.
1437
+ We now show the Rademacher-type property: Lipb(¯dΥ, µ) ⊂ Cr and
1438
+ ΓΥ
1439
+ r (u) ≤ Lip¯dΥ(u)2
1440
+ ∀u ∈ Lipb(¯dΥ, µ)
1441
+ ∀r > 0 .
1442
+ (4.8)
1443
+ We first show Lipb(¯dΥ, µ) ⊂ Cr. The verification of (a) in Def. 4.3 is obvious. The
1444
+ verification of (b) in Def. 4.3 follows from the Lipschitz contraction (4.5) of the
1445
+ operator (·)η
1446
+ r. The verification of (c) in Def. 4.3 follows by showing (4.8) as µ is a
1447
+ probability measure.
1448
+ We now prove (4.8). As the Cheeger energy ChdΥ,µk,η
1449
+ r
1450
+ coincided with EΥ(Br),µk,η
1451
+ r
1452
+ by
1453
+ Prop. 3.5, the Rademacher-type property for EΥ(Br),µk,η
1454
+ r
1455
+ follows from that for ChdΥ,µk,η
1456
+ r ,
1457
+ the latter of which is an immediate consequence by the definition of the Cheeger
1458
+ energy. Therefore, we have that
1459
+ ΓΥ(Br)(u) ≤ LipdΥ(u)2
1460
+ ∀u ∈ Lip(Υ(Br), dΥ)
1461
+ ∀r > 0 .
1462
+ (4.9)
1463
+ In view of the relation between ΓΥ
1464
+ r and ΓΥ(Br) in (4.6) and the Lipschitz contrac-
1465
+ tion (4.5) of the operator (·)η
1466
+ r, we concluded (4.8).
1467
+ Noting that Lipb(dΥ, µ) ⊂ L2(µ) is dense (e.g., [AGS14a, Prop. 4.1]) and the
1468
+ fact that Lipb(dΥ, µ) ⊂ Lipb(¯dΥ, µ) ⊂ Cr by (2.14) and (4.8), we obtain that the
1469
+ form (EΥ,µ
1470
+ r
1471
+ , Cr) is densely defined.
1472
+ We now show the closability. Noting that EΥ(Br),µη
1473
+ r is closable for µ-a.e. η by
1474
+ Prop. 3.5, the superposition form ( ¯EΥ,µ
1475
+ r
1476
+ , D( ¯EΥ,µ
1477
+ r
1478
+ )) (defined below in Def. 4.8) is
1479
+ closable (indeed it is closed) by [BH91, Prop. V.3.1.1]. As the two forms (EΥ,µ
1480
+ r
1481
+ , Cr)
1482
+
1483
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
1484
+ 19
1485
+ and ( ¯EΥ,µ
1486
+ r
1487
+ , D( ¯EΥ,µ
1488
+ r
1489
+ )) coincide on Cr and Cr ⊂ D( ¯EΥ,µ
1490
+ r
1491
+ ) by construction, the closability
1492
+ of (EΥ,µ
1493
+ r
1494
+ , Cr) is inherited from the closedness of the superposition form ( ¯EΥ,µ
1495
+ r
1496
+ , D( ¯EΥ,µ
1497
+ r
1498
+ )).
1499
+ The proof is complete.
1500
+
1501
+ The superposition of the Dirichlet form EΥ(Br),µη
1502
+ r onto Υ is now defined below.
1503
+ Definition 4.8 (Superposition Dirichlet form, e.g., [BH91, Prop. V.3.1.1]).
1504
+ D( ¯EΥ,µ
1505
+ r
1506
+ ) :=
1507
+
1508
+ u ∈ L2(µ) :
1509
+
1510
+ Υ
1511
+ EΥ(Br),µη
1512
+ r(uη
1513
+ r) dµ(η) < ∞
1514
+
1515
+ ,
1516
+ (4.10)
1517
+ ¯EΥ,µ
1518
+ r
1519
+ (u) :=
1520
+
1521
+ Υ
1522
+ EΥ(Br),µη
1523
+ r(uη
1524
+ r) dµ(η) .
1525
+ It is known that ( ¯EΥ,µ
1526
+ r
1527
+ , D( ¯EΥ,µ
1528
+ r
1529
+ )) is a Dirichlet form on L2(µ) [BH91, Prop. V.3.1.1].
1530
+ The L2-semigroup and the infinitesimal generator corresponding to ( ¯EΥ,µ
1531
+ r
1532
+ , D( ¯EΥ,µ
1533
+ r
1534
+ ))
1535
+ are denoted by { ¯T Υ,µ
1536
+ r,t }t≥0 and ( ¯AΥ,µ
1537
+ r
1538
+ , D( ¯AΥ,µ
1539
+ r
1540
+ )) respectively.
1541
+ The semigroup { ¯T Υ,µ
1542
+ r,t }t≥0 corresponding to the superposition form ¯EΥ,µ
1543
+ r
1544
+ can be
1545
+ obtained as the superposition of the semigroup {T Υ(Br),µη
1546
+ r
1547
+ t
1548
+ }t≥0 associated with the
1549
+ form EΥ(Br),µη
1550
+ r. For the following proposition, we refer the reader to [Del21, (iii)
1551
+ Prop. 2.13].
1552
+ Proposition 4.9 ([Del21, (iii) Prop. 2.13]). The following holds:
1553
+ ¯T Υ,µ
1554
+ r,t u(γ) = T Υ(Br),µγ
1555
+ r
1556
+ t
1557
+
1558
+ r(γBr) ,
1559
+ (4.11)
1560
+ for µ-a.e. γ ∈ Υ, any t > 0.
1561
+ Remark 4.10. The proof of [Del21, (iii) Prop. 2.13] has been given in terms of direct
1562
+ integral in a general setting. As the measure µη
1563
+ r can be identified to the conditional
1564
+ probability µ(· | ·Bcr = ηBcr) by a bi-measure-preserving isomorphism as remarked
1565
+ in (2.6), our setting is a particular case of direct integrals discussed in [Del21].
1566
+ We now discuss the relation between EΥ,µ
1567
+ r
1568
+ and ¯EΥ,µ
1569
+ r
1570
+ . As the former form is con-
1571
+ structed as the smallest closed extension of (EΥ,µ
1572
+ r
1573
+ , Cr), it is clear by definition that
1574
+ EΥ,µ
1575
+ r
1576
+ = ¯EΥ,µ
1577
+ r
1578
+ on
1579
+ Cr ,
1580
+ D(EΥ,µ
1581
+ r
1582
+ ) ⊂ D( ¯EΥ,µ
1583
+ r
1584
+ ) .
1585
+ The following theorem proves that the opposite inclusion holds as well.
1586
+ Theorem 4.11. (EΥ,µ
1587
+ r
1588
+ , D(EΥ,µ
1589
+ r
1590
+ )) = ( ¯EΥ,µ
1591
+ r
1592
+ , D( ¯EΥ,µ
1593
+ r
1594
+ )).
1595
+ Proof. The inclusion D(EΥ,µ
1596
+ r
1597
+ ) ⊂ D( ¯EΥ,µ
1598
+ r
1599
+ ) with the inequality ¯EΥ,µ
1600
+ r
1601
+ ≤ EΥ,µ
1602
+ r
1603
+ is straight-
1604
+ forward by definition. Noting ¯EΥ,µ
1605
+ r
1606
+ = EΥ,µ
1607
+ r
1608
+ on CR and D(EΥ,µ
1609
+ r
1610
+ ) is the closure of Cr,
1611
+ it suffices to show that Cr ⊂ D( ¯EΥ,µ
1612
+ r
1613
+ ) is dense. Thanks to Lem. A.4, we only need
1614
+ to show that ¯T Υ,µ
1615
+ r,t Cr ⊂ Cr.
1616
+ As ¯T Υ,µ
1617
+ r,t
1618
+ is an L∞-contraction semigroup by the sub-Markovian property of the
1619
+ semigroup (see, e.g., [MR90, Def. I.4.1]), we obtain ¯T Υ,µ
1620
+ r,t Cr ⊂ L∞(µ), which verifies
1621
+ (a) in Def. 4.3
1622
+
1623
+ 20
1624
+ K. SUZUKI
1625
+ Verification of (b) in Def. 4.3.
1626
+ Let u ∈ Cr and we show that ¯T Υ,µ
1627
+ r,t u satisfies (b)
1628
+ in Def. 4.3. By Prop. 4.9, we can identify the following two operators:
1629
+ ¯T Υ,µ
1630
+ r,t u = T Υ(Br),µ·
1631
+ r
1632
+ t
1633
+
1634
+ r(·Br) .
1635
+ This implies that
1636
+
1637
+ ¯T Υ,µ
1638
+ r,t u
1639
+ �η
1640
+ r(·) = ¯T Υ,µ
1641
+ r,t u(· + ηBcr) = T Υ(Br),µη
1642
+ r
1643
+ t
1644
+
1645
+ r(·) .
1646
+ Take k = k(η) as in (3.4). As the conditional probability µη
1647
+ r is supported only on
1648
+ Υk(Br), we only need to show
1649
+ T Υ(Br),µk,η
1650
+ r
1651
+ t
1652
+
1653
+ r ∈ Lipb(Υk(Br), dΥ) .
1654
+ (4.12)
1655
+ As (Υk(Br), dΥ, µk,γ
1656
+ r ) is RCD(0, ∞) for k = k(η) for µ-a.e. η by Prop. 3.5, the corre-
1657
+ sponding semigroup satisfies L∞(µk,η
1658
+ r )-to-Lipb(Υk(Br), dΥ)-regularisation property
1659
+ ([AGS14a, Thm. 6.5]), which shows that for µ-a.e. η
1660
+ T Υ(Br),µk,η
1661
+ r
1662
+ t
1663
+ v ∈ Lipb(Υk(Br), dΥ)
1664
+ ∀v ∈ L∞(µk,η
1665
+ r ) ,
1666
+ and its Lipchitz constant is bounded as
1667
+ LipdΥ(T Υ(Br),µk,η
1668
+ r
1669
+ t
1670
+ v) ≤ c(t, K)∥v∥L∞(µk,η
1671
+ r
1672
+ ) ,
1673
+ with constant c(t, K) depending only on t and the curvature bound K = 0 (to be
1674
+ more precise, c(t, 0) =
1675
+ 1
1676
+
1677
+ 2t). This proves (4.12), which completes the verification
1678
+ of (b).
1679
+ Verificaiton of (c) in Def. 4.3.
1680
+ Let u ∈ Cr. Thanks to the verification of (b), the
1681
+ square field ΓΥ
1682
+ r ( ¯T Υ,µ
1683
+ r,t u) is well-defined, and by (4.6) it holds that for µ-a.e. η
1684
+ ΓΥ
1685
+ r ( ¯T Υ,µ
1686
+ r,t u)(γ + ηBcr) = ΓΥ(Br)�
1687
+ ( ¯T Υ,µ
1688
+ r,t u)η
1689
+ r
1690
+
1691
+ (γ)
1692
+ µη
1693
+ r-a.e. γ ∈ Υ(Br) .
1694
+ (4.13)
1695
+ In view of the contraction property of the semigroup with respect to the form by
1696
+ general theory (see, e.g., [FOT11, p.23, Lem. 1.3.3]), viz.
1697
+ EΥ(Br),µη
1698
+ r(T Υ(Br),µη
1699
+ r
1700
+ t
1701
+
1702
+ r) ≤ EΥ(Br),µη
1703
+ r(uη
1704
+ r)
1705
+ as well as Prop. 4.9 and (4.13), we obtain
1706
+
1707
+ Υ
1708
+ ΓΥ
1709
+ r ( ¯T Υ,µ
1710
+ r,t u) dµ =
1711
+
1712
+ Υ
1713
+ EΥ(Br),µη
1714
+ r�
1715
+ ( ¯T Υ,µ
1716
+ r,t u)η
1717
+ r
1718
+
1719
+ dµ(η)
1720
+ =
1721
+
1722
+ Υ
1723
+ EΥ(Br),µη
1724
+ r(T Υ(Br),µη
1725
+ r
1726
+ t
1727
+
1728
+ r) dµ(η)
1729
+
1730
+
1731
+ Υ
1732
+ EΥ(Br),µη
1733
+ r(uη
1734
+ r) dµ(η)
1735
+ = EΥ,µ
1736
+ r
1737
+ (u) < ∞ .
1738
+ The verification of (c) is completed. Therefore, we confirmed ¯T Υ,µ
1739
+ r,t Cr ⊂ Cr, which
1740
+ concludes the statement.
1741
+
1742
+
1743
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
1744
+ 21
1745
+ As a consequence of Thm. 4.11 and Prop. 4.9, we obtain the superposition formula
1746
+ for the semigroup {T Υ,µ
1747
+ r,t }t≥0 in terms of the semigroup {T Υ(Br),µη
1748
+ r
1749
+ t
1750
+ }t≥0.
1751
+ Corollary 4.12 (Coincidence of semigroups). The following three operators coin-
1752
+ cide:
1753
+ T Υ,µ
1754
+ r,t u(γ) = ¯T Υ,µ
1755
+ r,t u(γ) = T Υ(Br),µγ
1756
+ r
1757
+ t
1758
+
1759
+ r(γBr) ,
1760
+ (4.14)
1761
+ for µ-a.e. γ ∈ Υ, any t > 0.
1762
+ 4.2. Monotone limit form. We now construct a Dirichlet form on Υ with sineβ-
1763
+ invariant measure µ as the monotone limit of (EΥ,µ
1764
+ r
1765
+ , D(EΥ,µ
1766
+ r
1767
+ ) as r → ∞. The follow-
1768
+ ing proposition follows immediately from the definitions of the square field ΓΥ
1769
+ r and
1770
+ the core Cr.
1771
+ Proposition 4.13 (Monotonicity). The form (EΥ,µ
1772
+ r
1773
+ , D(EΥ,µ
1774
+ r
1775
+ ) and the square field
1776
+ ΓΥ
1777
+ r are monotone increasing as r ↑ ∞, viz.,
1778
+ ΓΥ
1779
+ r (u) ≤ ΓΥ
1780
+ s (u) ,
1781
+ EΥ,µ
1782
+ r
1783
+ (u) ≤ EΥ,µ
1784
+ s
1785
+ (u) ,
1786
+ D(EΥ,µ
1787
+ s
1788
+ ) ⊂ D(EΥ,µ
1789
+ r
1790
+ )
1791
+ r ≤ s .
1792
+ Proof. As Cr is a core of the form (EΥ,µ
1793
+ r
1794
+ , D(EΥ,µ
1795
+ r
1796
+ )), it suffices to check Cs ⊂ Cr
1797
+ and ΓΥ
1798
+ r (u) ≤ ΓΥ
1799
+ s (u) on Cs. Let u ∈ Cs and we show u ∈ Cr. By a simple reasoning
1800
+ similar to the proof of Lem. 4.6, we can see
1801
+
1802
+ r ∈ Lipb(Υ(Br), dΥ)
1803
+ µ-a.e. η
1804
+ if uη
1805
+ s ∈ Lipb(Υ(Bs), dΥ)
1806
+ µ-a.e. η .
1807
+ By Def. 4.2, it is straightforward to see ΓΥ
1808
+ r (u) ≤ ΓΥ
1809
+ s (u). Thus,
1810
+ EΥ,µ
1811
+ r
1812
+ (u) =
1813
+
1814
+ Υ
1815
+ ΓΥ
1816
+ r (u) dµ ≤
1817
+
1818
+ Υ
1819
+ ΓΥ
1820
+ s (u) dµ =
1821
+
1822
+ Υ
1823
+ ΓΥ
1824
+ s (u) dµ = EΥ,µ
1825
+ s
1826
+ (u) < ∞ .
1827
+ Therefore, we conclude u ∈ Cr. The proof is completed.
1828
+
1829
+ We now define a Dirichlet form on Υ whose invariant measure is the sineβ mea-
1830
+ sure µ by the monotone limit of (EΥ,µ
1831
+ r
1832
+ , D(EΥ,µ
1833
+ r
1834
+ )).
1835
+ Definition 4.14 (Monotone limit form). The form (EΥ,µ, D(EΥ,µ)) is defined as the
1836
+ monotone limit:
1837
+ D(EΥ,µ) := {u ∈ ∩r>0D(EΥ,µ
1838
+ r
1839
+ ) : EΥ,µ(u) = lim
1840
+ r→∞ EΥ,µ
1841
+ r
1842
+ (u) < ∞} ,
1843
+ (4.15)
1844
+ EΥ,µ(u) := lim
1845
+ r→∞ EΥ,µ
1846
+ r
1847
+ (u) .
1848
+ The form (EΥ,µ, D(EΥ,µ)) is a Dirichlet form on L2(µ) as it is the monotone limit
1849
+ of Dirichlet forms (e.g., by [MR90, Exercise 3.9]). The square field ΓΥ is defined as
1850
+ the monotone limit of ΓΥ
1851
+ r as well:
1852
+ ΓΥ(u) := lim
1853
+ r→∞ ΓΥ
1854
+ r (u)
1855
+ u ∈ D(EΥ,µ) .
1856
+ (4.16)
1857
+ The corresponding L2(µ)-semigroup is denoted by {T Υ,µ
1858
+ t
1859
+ }t≥0.
1860
+
1861
+ 22
1862
+ K. SUZUKI
1863
+ We now show that the form (EΥ,µ, D(EΥ,µ)) is a local Dirichlet form on L2(µ) and
1864
+ satisfies the Rademacher-type property with respect to the L2-transportation-type
1865
+ distance ¯dΥ.
1866
+ Proposition 4.15. The form (EΥ,µ, D(EΥ,µ)) is a local Dirichlet form on L2(µ).
1867
+ Furthermore, (EΥ,µ, D(EΥ,µ)) satisfies Rademacher-type property:
1868
+ Lip(¯dΥ, µ) ⊂ D(EΥ,µ) ,
1869
+ ΓΥ(u) ≤ Lip¯dΥ(u)2
1870
+ ∀u ∈ Lip(¯dΥ, µ) .
1871
+ (4.17)
1872
+ Proof. The local property of (EΥ,µ, D(EΥ,µ)) follows from the fact that (EΥ,µ, D(EΥ,µ))
1873
+ is the monotone limit of the local Dirichlet form (EΥ,µ
1874
+ r
1875
+ , D(EΥ,µ
1876
+ r
1877
+ )).
1878
+ We show the
1879
+ Rademacher-type property. Since ΓΥ is the limit square field of ΓΥ
1880
+ r as in (4.16), it
1881
+ suffices to show
1882
+ Lip(¯dΥ, µ) ⊂ Cr
1883
+ and
1884
+ ΓΥ
1885
+ r (u) ≤ Lip¯dΥ(u)2
1886
+ ∀u ∈ Lip(¯dΥ, µ)
1887
+ ∀r > 0 ,
1888
+ which has been already proven in Prop. 4.7.
1889
+ We verified (4.17).
1890
+ The proof is
1891
+ complete.
1892
+
1893
+ Proposition 4.16. The semigroup {T Υ,µ
1894
+ t
1895
+ }t≥0 is the L2(µ)-strong operator limit of
1896
+ the semigroups {T Υ,µ
1897
+ r,t }t≥0, viz.,
1898
+ L2(µ)– lim
1899
+ r→∞ T Υ,µ
1900
+ r,t u = T Υ,µ
1901
+ t
1902
+ u
1903
+ ∀u ∈ L2(µ) ,
1904
+ t > 0 .
1905
+ Proof. The statement follows from the monotonicity of (EΥ,µ
1906
+ r
1907
+ , D(EΥ,µ
1908
+ r
1909
+ ) as r ↑ ∞
1910
+ proven in Prop. 4.13 and [RS80, S.14, p.373].
1911
+
1912
+ 4.3. Bakry–Émery Curvature bound for (EΥ,µ, D(EΥ,µ)). In this subsection,
1913
+ we prove the Bakry–Émery curvature bound for the form (EΥ,µ, D(EΥ,µ)).
1914
+ Theorem 4.17. Let β > 0 and µ be the sineβ ensemble. The form (EΥ,µ, D(EΥ,µ))
1915
+ satisfies the 1-Bakry–Émery curvature dimension condition BE1(0, ∞):
1916
+ ΓΥ�
1917
+ T Υ,µ
1918
+ t
1919
+ u
1920
+ � 1
1921
+ 2 ≤ T Υ,µ
1922
+ t
1923
+
1924
+ ΓΥ(u)
1925
+ 1
1926
+ 2�
1927
+ ∀u ∈ D(EΥ,µ)
1928
+ ∀t > 0 .
1929
+ (BE1(0, ∞))
1930
+ Proof. We first prove BE1(0, ∞) for the form (EΥ,µ
1931
+ r
1932
+ , D(EΥ,µ
1933
+ r
1934
+ )). Let u ∈ D(EΥ,µ
1935
+ r
1936
+ ). By
1937
+ Prop. 3.5, [Han18, Thm. 1.1], by the expression (3.4) of µη
1938
+ r in terms of µk,η
1939
+ r
1940
+ and
1941
+ by the definition (4.4) of ΓΥ(Br), there exists Ξ1
1942
+ r ⊂ Υ with µ(Ξ1
1943
+ r) = 1 so that for
1944
+ every η ∈ Ξ1
1945
+ r there exists a measurable set Ω1,η
1946
+ r
1947
+ ⊂ Υ(Br) with µη
1948
+ r(Ω1,η
1949
+ r ) = 1 satisfying
1950
+ that for every γ ∈ Ω1,η
1951
+ r , the following 1-Bakry–Émery gradient estimate holds:
1952
+ ΓΥ(Br)(T Υ(Br),µη
1953
+ r
1954
+ t
1955
+
1956
+ r)
1957
+ 1
1958
+ 2(γ) ≤ T Υ(Br),µη
1959
+ r
1960
+ t
1961
+
1962
+ ΓΥ(Br)(uη
1963
+ r)
1964
+ � 1
1965
+ 2(γ) .
1966
+ (4.18)
1967
+ By Prop. 4.7, there exists Ξ2
1968
+ r ⊂ Υ with µ(Ξ2
1969
+ r) = 1 so that for every η ∈ Ξ2
1970
+ r there
1971
+ exists a measurable set Ω2,η
1972
+ r
1973
+ ⊂ Υ(Br) with µη
1974
+ r(Ω2,η
1975
+ r ) = 1 satisfying that for every
1976
+ γ ∈ Ω2,η
1977
+ r
1978
+ ΓΥ
1979
+ r (T Υ,µ
1980
+ r,t u)(γ + ηBcr) = ΓΥ(Br)��
1981
+ T Υ,µ
1982
+ r,t u
1983
+ �η
1984
+ r
1985
+
1986
+ (γ) ;
1987
+ (4.19)
1988
+
1989
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
1990
+ 23
1991
+ ΓΥ
1992
+ r (u)(γ + ηBcr) = ΓΥ(Br)(uη
1993
+ r)(γ) .
1994
+ By Cor. 4.12, there exists Λ3
1995
+ r ⊂ Υ with µ(Λ3
1996
+ r) = 1 so that for every γ ∈ Λ3
1997
+ r
1998
+ T Υ,µ
1999
+ r,t u(γ) = T Υ(Br),µγ
2000
+ r
2001
+ t
2002
+
2003
+ r(γ) .
2004
+ (4.20)
2005
+ By the standard disintegration argument, we can write
2006
+ Λ3
2007
+ r =
2008
+
2009
+ η∈Ξ3r
2010
+ pr−1
2011
+ r (Ω3,η
2012
+ r ) ∩ Υη
2013
+ r ,
2014
+ where Ω3,η
2015
+ r
2016
+ = (Λ3
2017
+ r)η
2018
+ r := {γ ∈ Υ(Br) : γ + ηBcr ∈ Λ3
2019
+ r} and Ξ3
2020
+ r = prBcr(Λ3
2021
+ r), and Υη
2022
+ r
2023
+ has been defined in (2.4).
2024
+ By the disintegration formula (2.10), µ(Ξ3
2025
+ r) = 1 and
2026
+ µη
2027
+ r(Ω3,η
2028
+ r ) = 1 for every η ∈ Ξ3
2029
+ r.
2030
+ Let Ξr := Ξ1
2031
+ r ∩ Ξ2
2032
+ r ∩ Ξ3
2033
+ r and Ωη
2034
+ r := Ω1,η
2035
+ r
2036
+ ∩ Ω2,η
2037
+ r
2038
+ ∩ Ω3,η
2039
+ r
2040
+ for η ∈ Ξr. Set
2041
+ Kr :=
2042
+
2043
+ η∈Ξr
2044
+ pr−1
2045
+ r (Ωη
2046
+ r) ∩ Υη
2047
+ r .
2048
+ By construction, µ(Ξr) = 1 and µη
2049
+ r(Ωη
2050
+ r) = 1 for every η ∈ Ξr. By (4.18), (4.19) and
2051
+ (4.20), the following inequalities hold for every γ ∈ Kr:
2052
+ ΓΥ
2053
+ r (T Υ,µ
2054
+ r,t u)
2055
+ 1
2056
+ 2(γ) = ΓΥ
2057
+ r (T Υ,µ
2058
+ r,t u)
2059
+ 1
2060
+ 2(γBr + γBcr)
2061
+ (4.21)
2062
+ = ΓΥ(Br)((T Υ,µ
2063
+ r,t u)γ
2064
+ r)
2065
+ 1
2066
+ 2(γBr)
2067
+ ≤ T Υ(Br),µγ
2068
+ r
2069
+ t
2070
+ ΓΥ(Br)(uγ
2071
+ r)
2072
+ 1
2073
+ 2(γBr)
2074
+ = T Υ(Br),µγ
2075
+ r
2076
+ t
2077
+
2078
+ (ΓΥ
2079
+ r (u)γ
2080
+ r)
2081
+ 1
2082
+ 2�
2083
+ (γBr)
2084
+ = T Υ,µ
2085
+ r,t ΓΥ
2086
+ r (u)
2087
+ 1
2088
+ 2(γ) .
2089
+ Let Θ := {γ ∈ Υ : ΓΥ
2090
+ r (T Υ,µ
2091
+ r,t u)
2092
+ 1
2093
+ 2(γ) ≤ T Υ,µ
2094
+ r,t ΓΥ
2095
+ r (u)
2096
+ 1
2097
+ 2(γ)}. Then Θ is µ-measurable by
2098
+ construction, and thanks to (4.21), it holds that Kr ⊂ Θ. By applying Lem. A.2, we
2099
+ obtain µ(Θ) = 1, which concludes BE1(0, ∞) for the truncated form (EΥ,µ
2100
+ r
2101
+ , D(EΥ,µ
2102
+ r
2103
+ ))
2104
+ for any r > 0.
2105
+ We now prove BE1(0, ∞) of the form (EΥ,µ, D(EΥ,µ)).
2106
+ Let u ∈ D(EΥ,µ) ⊂
2107
+ ∩r>0D(EΥ,µ
2108
+ r
2109
+ ). By the L2(µ)-lower semi-continuity of the square field ΓΥ, the mono-
2110
+ tonicity ΓΥ
2111
+ r ≤ ΓΥ
2112
+ r′ for r ≤ r′ (we will use it in the following displayed formulas in
2113
+ the first equality and in the second inequality), the convergence of T Υ,µ
2114
+ r′,t
2115
+ to T Υ,µ
2116
+ t
2117
+ as r′ → ∞ in the L2-strong operator sense by Prop. 4.16, and BE1(0, ∞) for the
2118
+ truncated form (EΥ,µ
2119
+ r
2120
+ , D(EΥ,µ
2121
+ r
2122
+ )) for any r > 0, the following inequalities hold true:
2123
+ ΓΥ(T Υ,µ
2124
+ t
2125
+ u)1/2 = lim
2126
+ r→∞ ΓΥ
2127
+ r (T Υ,µ
2128
+ t
2129
+ u)1/2 ≤ lim
2130
+ r→∞ lim inf
2131
+ r′→∞ ΓΥ
2132
+ r (T Υ,µ
2133
+ r′,t u)1/2
2134
+ ≤ lim inf
2135
+ r′→∞ ΓΥ
2136
+ r′(T Υ,µ
2137
+ r′,t u)1/2
2138
+ ≤ lim inf
2139
+ r′→∞ T Υ,µ
2140
+ r′,t ΓΥ
2141
+ r′(u)1/2
2142
+ = T Υ,µ
2143
+ t
2144
+ ΓΥ(u)1/2 .
2145
+
2146
+ 24
2147
+ K. SUZUKI
2148
+ The last equality in the above displayed formulas followed from the following argu-
2149
+ ment: by the L2(µ)-contraction property of T Υ,µ
2150
+ r′,t , the monotonicity of ΓΥ
2151
+ r′ as r′ ↑ ∞,
2152
+ and the convergence of the semigroups T Υ,µ
2153
+ r′,t
2154
+ to T Υ,µ
2155
+ t
2156
+ as r′ → ∞ in the L2-strong
2157
+ operator sense by Prop. 4.16,
2158
+ ��T Υ,µ
2159
+ r′,t ΓΥ
2160
+ r′(u)1/2 − T Υ,µ
2161
+ t
2162
+ ΓΥ(u)1/2��
2163
+ L2(µ)
2164
+ =
2165
+ ��T Υ,µ
2166
+ r′,t ΓΥ
2167
+ r′(u)1/2 − T Υ,µ
2168
+ r′,t ΓΥ(u)1/2��
2169
+ L2(µ) +
2170
+ ��T Υ,µ
2171
+ r′,t ΓΥ(u)1/2 − T Υ,µ
2172
+ t
2173
+ ΓΥ(u)1/2��
2174
+ L2(µ)
2175
+
2176
+ ��ΓΥ
2177
+ r′ (u)1/2 − ΓΥ(u)1/2��
2178
+ L2(µ) +
2179
+ ��T Υ,µ
2180
+ r′,t ΓΥ(u)1/2 − T Υ,µ
2181
+ t
2182
+ ΓΥ(u)1/2��
2183
+ L2(µ)
2184
+ r′→∞
2185
+ −−−→ 0 .
2186
+ The proof is completed.
2187
+
2188
+ 4.4. Integral Bochner, local Poicaré and local log-Sobolev inequalities.
2189
+ As an application of BE(0, ∞) proven in Thm. 4.17, we show several functional
2190
+ inequalities. We define the integral Γ2-operator as follows:
2191
+ ΓΥ,µ
2192
+ 2
2193
+ (u, ϕ) :=
2194
+
2195
+ Υ
2196
+ �1
2197
+ 2ΓΥ(u)AΥ,µϕ − ΓΥ(u, AΥ,µu)ϕ
2198
+
2199
+ dµ ,
2200
+ (4.22)
2201
+ D(ΓΥ,µ
2202
+ 2
2203
+ ) :=
2204
+
2205
+ (u, ϕ) : D(AΥ,µ)×2 : AΥ,µu ∈ D(EΥ,µ), ϕ, AΥ,µu ∈ L∞(µ)
2206
+
2207
+ ,
2208
+ where AΥ,µ denotes the L2(µ)-infinitesimal generator associated with (EΥ,µ, D(EΥ,µ)).
2209
+ Corollary 4.18. Let µ be the sineβ ensemble with β > 0. The following hold:
2210
+ (a) (lntegral Bochner inequality) for every (u, ϕ) ∈ D(ΓΥ,µ
2211
+ 2
2212
+ )
2213
+ ΓΥ,µ
2214
+ 2
2215
+ (u, ϕ) ≥ 0 ;
2216
+ (b) (local Poincaré inequality) for u ∈ D(EΥ,µ) and t > 0,
2217
+ T Υ,µ
2218
+ t
2219
+ u2 − (T Υ,µ
2220
+ t
2221
+ u)2 ≤ 2tT Υ,µ
2222
+ t
2223
+ ΓΥ(u) ,
2224
+ T Υ,µ
2225
+ t
2226
+ u2 − (T Υ,µ
2227
+ t
2228
+ u)2 ≥ 2tΓΥ(T Υ,µ
2229
+ t
2230
+ u) ;
2231
+ (c) (local logarithmic Sobolev inequality) for non-negative u ∈ D(EΥ,µ)
2232
+ and t > 0,
2233
+ T Υ,µ
2234
+ t
2235
+ u log u − T Υ,µ
2236
+ t
2237
+ u log T Υ,µ
2238
+ t
2239
+ u ≤ tT Υ,µ
2240
+ t
2241
+ �ΓΥ(u)
2242
+ u
2243
+
2244
+ ,
2245
+ T Υ,µ
2246
+ t
2247
+ u log u − T Υ,µ
2248
+ t
2249
+ u log T Υ,µ
2250
+ t
2251
+ u ≥ tΓΥ(T Υ,µ
2252
+ t
2253
+ u)
2254
+ T Υ,µ
2255
+ t
2256
+ u
2257
+ .
2258
+ Proof. The statement (a) follows from BE(0, ∞) proven in Thm. 4.17 and [AGS15,
2259
+ Cor. 2.3]. The statement (b) and (c) are consequences of BE(0, ∞), see e.g., [BGL14,
2260
+ Thm.s 4.7.2, 5.5.2.].
2261
+
2262
+
2263
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
2264
+ 25
2265
+ 5. Dimension-free/log Harnack inequalities and Lipschitz
2266
+ regularisation
2267
+ In this section, we prove functional inequalities involving the Bakry–Émery cur-
2268
+ vature bound BE(0, ∞) and the L2-transportation-type extended distance ¯dΥ.
2269
+ Theorem 5.1. Let µ be the sineβ ensemble with β > 0. Then the following inequal-
2270
+ ities hold:
2271
+ (a) (log-Harnack inequality) for every non-negative u ∈ L∞(Υ, µ), t > 0,
2272
+ there exists Ω ⊂ Υ so that µ(Ω) = 1 and
2273
+ T Υ,µ
2274
+ t
2275
+ (log u)(γ) ≤ log(T Υ,µ
2276
+ t
2277
+ u)(η) + ¯dΥ(γ, η)2 ,
2278
+ every γ, η ∈ Ω ;
2279
+ (b) (dimension-free Harnack inequality) for every non-negative u ∈ L∞(Υ, µ),
2280
+ t > 0 and α > 1 there exists Ω ⊂ Υ so that µ(Ω) = 1 and
2281
+ (T Υ,µ
2282
+ t
2283
+ u)α(γ) ≤ T Υ,µ
2284
+ t
2285
+ uα(η) exp
2286
+
2287
+ α
2288
+ 2(α − 1)
2289
+ ¯dΥ(γ, η)2�
2290
+ ,
2291
+ for every γ, η ∈ Ω ;
2292
+ (c) (Lipschitz contraction) For u ∈ Lipb(¯dΥ, µ) and t > 0,
2293
+ T Υ,µ
2294
+ t
2295
+ u has a ¯dΥ-Lipschitz µ-modification ˜T Υ,µ
2296
+ t
2297
+ u
2298
+ and the following estimate holds:
2299
+ Lip¯dΥ( ˜T Υ,µ
2300
+ t
2301
+ u) ≤ Lip¯dΥ(u) ;
2302
+ (d) (L∞-to-Lip regularisation) For u ∈ L∞(µ) and any t > 0,
2303
+ T Υ,µ
2304
+ t
2305
+ u has a ¯dΥ-Lipschitz µ-modification ˜T Υ,µ
2306
+ t
2307
+ u
2308
+ and the following estimate holds:
2309
+ Lip¯dΥ( ˜T Υ,µ
2310
+ t
2311
+ u) ≤
2312
+ 1
2313
+
2314
+ 2t∥u∥L∞(µ) .
2315
+ Proof. We prove (a). By the relation between T Υ,µ
2316
+ r,t
2317
+ and T Υ(Br),µ·
2318
+ r
2319
+ t
2320
+ (·Br) in Prop. 4.12,
2321
+ there exists a measurable set Ωr
2322
+ sem ⊂ Υ with µ(Ωr
2323
+ sem) = 1 so that for every η ∈ Ωr
2324
+ sem
2325
+ T Υ,µ
2326
+ r,t (η) = T Υ(Br),µη
2327
+ r
2328
+ t
2329
+ (ηBr) .
2330
+ (5.1)
2331
+ Let u ∈ L∞(µ). Thanks to Lem. A.3, there exists Ωr
2332
+ ∞ ⊂ Υ so that µ(Ωr
2333
+ ∞) = 1
2334
+ and
2335
+
2336
+ r ∈ L∞(µη
2337
+ r),
2338
+ ∀η ∈ Ωr
2339
+ ∞,
2340
+ ∀r ∈ N .
2341
+ By Prop. 3.5, there exists a measurable set Ωr
2342
+ rcd ⊂ Υ so that µ(Ωr
2343
+ rcd) = 1 and
2344
+ (Υk, dΥ, µη
2345
+ r) is RCD(0, ∞) with k = k(η) as in (3.4) for every η ∈ Ωr
2346
+ rcd.
2347
+ Let Ωr := Ωr
2348
+ sem ∩ Ωr
2349
+ ∞ ∩ Ωr
2350
+ rcd. As the log-Harnack inequality holds in RCD spaces
2351
+ (see, [AGS15, Lem. 4.6]), the following holds for every η ∈ Ωr and k = k(η)
2352
+ T Υk(Br),µk,η
2353
+ r
2354
+ t
2355
+ (log uη
2356
+ r)(γ) ≤ log(T Υk(Br),µk,η
2357
+ r
2358
+ t
2359
+
2360
+ r)(ζ) + dΥ(γ, ζ)2 ,
2361
+ ∀ γ, ζ ∈ Υk(Br) .
2362
+ (5.2)
2363
+
2364
+ 26
2365
+ K. SUZUKI
2366
+ Noting the convergence of the semigroups {T Υ,µ
2367
+ r,t }t≥0 to {T Υ,µ
2368
+ t
2369
+ }t≥0 in the L2(µ)-
2370
+ operator sense by Prop. 4.16, there exist Ωcon ⊂ Υ with µ(Ωcon) = 1 and a (non-
2371
+ relabelled) subsequence of {r} so that for every γ ∈ Ωcon
2372
+ T Υ,µ
2373
+ r,t (log u)(γ)
2374
+ r→∞
2375
+ −−−→ T Υ,µ
2376
+ t
2377
+ (log u)(γ) ,
2378
+ log(T Υ,µ
2379
+ r,t u)(γ)
2380
+ r→∞
2381
+ −−−→ log(T Υ,µ
2382
+ t
2383
+ u)(γ) .
2384
+ (5.3)
2385
+ Let Ω′ = Ωcon ∩r∈N Ωr, which by construction satisfies µ(Ω′) = 1. Our goal is now
2386
+ to prove that there exists Ω ⊂ Ω′ with µ(Ω) = 1 so that
2387
+ T Υ,µ
2388
+ t
2389
+ (log u)(γ) ≤ log(T Υ,µ
2390
+ t
2391
+ u)(η) + ¯dΥ(γ, η)2 ,
2392
+ every γ, η ∈ Ω .
2393
+ (5.4)
2394
+ Thanks to (5.3), Formula (5.4) comes down to the corresponding inequality for the
2395
+ semigroup {T Υ,µ
2396
+ r,t }t≥0 for any r > 0:
2397
+ T Υ,µ
2398
+ r,t (log u)(γ) ≤ log(T Υ,µ
2399
+ r,t u)(η) + ¯dΥ(γ, η)2 ,
2400
+ every γ, η ∈ Ω .
2401
+ (5.5)
2402
+ We prove (5.5) by contradiction. Suppose that for any Ω ⊂ Ω′ with µ(Ω) = 1,
2403
+ there exists γ, η ∈ Ω so that
2404
+ T Υ,µ
2405
+ r,t (log u)(γ) ≥ log(T Υ,µ
2406
+ r,t u)(η) + ¯dΥ(γ, η)2 .
2407
+ (5.6)
2408
+ We may assume that ¯dΥ(γ, η) < ∞, otherwise, we have nothing to prove. Thus,
2409
+ by (2.15), there exists r > 0 so that
2410
+ γBcr = ηBcr ,
2411
+ γ(Br) = η(Br) .
2412
+ (5.7)
2413
+ By making use of (5.1), (5.2), (5.7), we obtain
2414
+ T Υ,µ
2415
+ r,t (log u)(γ) = T Υ,µ
2416
+ r,t (log u)(γBr + γBcr)
2417
+ (5.8)
2418
+ = T Υ(Br),µγ
2419
+ r
2420
+ t
2421
+ (log uγ
2422
+ r)(γBr)
2423
+ ≤ log(T Υ(Br),µγ
2424
+ r
2425
+ t
2426
+ u)(ηBr) + dΥ(γBr, ηBr)2
2427
+ = log(T Υ,µ
2428
+ r,t u)(η) + ¯dΥ(γ, η)2 ,
2429
+ which contradicts (5.6), therefore, the proof of (a) is completed.
2430
+ The proof of (b) follows precisely in the same strategy as above by replacing
2431
+ T Υ,µ
2432
+ t
2433
+ (log u), log(T Υ,µ
2434
+ t
2435
+ u) and ¯dΥ(γ, η)2 by (T Υ,µ
2436
+ t
2437
+ u)α, T Υ,µ
2438
+ t
2439
+ uα and
2440
+ α
2441
+ 2(α−1)¯dΥ(γ, η)2 re-
2442
+ spectively, and noting that the dimension-free Harnack inequality holds on RCD(K, ∞)
2443
+ spaces ([Li15, Thm. 3.1]).
2444
+ The proof of (c): Note that uη
2445
+ r ∈ Lip(Υ(Br), dΥ) whenever u ∈ Lip(Υ, ¯dΥ) and
2446
+ LipdΥ(uη
2447
+ r) ≤ Lip¯dΥ(u) by Lem. 4.6. Note also that the sought conclusion of (c) can
2448
+ be rephrased as
2449
+ ˜T Υ,µ
2450
+ t
2451
+ u(γ) − ˜T Υ,µ
2452
+ t
2453
+ u(η) ≤ Lip¯dΥ(u)¯dΥ(γ, η)
2454
+ ∀γ, η ∈ Υ .
2455
+ Thus, by the same proof strategy as in (a) replacing T Υ,µ
2456
+ t
2457
+ (log u)(γ) and log(T Υ,µ
2458
+ t
2459
+ u)(η)
2460
+ with T Υ,µ
2461
+ t
2462
+ u(γ) and T Υ,µ
2463
+ t
2464
+ u(η), and noting that the Lipschitz contraction property
2465
+
2466
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
2467
+ 27
2468
+ holds on RCD spaces ([AGS14b, (iv) in Thm. 6.1]), we conclude that there exists
2469
+ Ω ⊂ Υ with µ(Ω) = 1 so that
2470
+ T Υ,µ
2471
+ t
2472
+ (γ) − T Υ,µ
2473
+ t
2474
+ (η) ≤ Lip¯dΥ(u)¯dΥ(γ, η)
2475
+ ∀γ, η ∈ Ω .
2476
+ The conclusion now follows from the McShane extension Theorem [DS21b, Lem. 2.1].
2477
+ The proof of (d) is the same as that of (c) but using the L∞-to-Lip property
2478
+ ([AGS14b, Thm. 6.5]) in RCD(K, ∞) spaces instead of [AGS14b, (iv) in Thm. 6.1]).
2479
+ The proof is complete.
2480
+
2481
+ 6. Generalisation
2482
+ We have been so far working in the case of sineβ ensemble. In this section, we
2483
+ seek to generalise the aforementioned statements to general probability measures
2484
+ on Υ = Υ(Rn) for n ∈ N. In this section, we denote by m and mr the Lebesgue
2485
+ measure on Rn and its restriction on Br(0) respectively, and we take the Euclidean
2486
+ distance d(x, y) := |x − y| for x, y ∈ Rn. Let µ be a Borel probability on Υ and
2487
+ assume that it is fully supported on Υ with respect to the vague topology τv. Let
2488
+ K(µη
2489
+ r) ⊂ N0 be defined as
2490
+ K(µη
2491
+ r) := {k ∈ N0 : µk,η
2492
+ r (Υk(Br)) > 0} .
2493
+ Assumption 6.1. Let K ∈ R and µ be a fully supported Borel probability with
2494
+ respect to the vague topolgoy τv. Assume the following conditions:
2495
+ (a) the measure µη
2496
+ r is absolutely continuous with respect to the Poisson mea-
2497
+ sure πmr, and µk,η
2498
+ r
2499
+ is equivalent to πmr|Υk(Br) for any k ∈ K(µη
2500
+ r), µ-a.e. η and
2501
+ any r > 0;
2502
+ (b) the density
2503
+ dµk,η
2504
+ r
2505
+ dπmr|Υk(Br)
2506
+ is τv-continuous on Υk(Br), and the logarithmic density
2507
+ Ψk,η
2508
+ r
2509
+ = − log
2510
+
2511
+ dµk,η
2512
+ r
2513
+ dπmr|Υk(Br)
2514
+
2515
+ is K-geodesically convex with respect to dΥ on Υk(Br) for any k ∈ K(µη
2516
+ r),
2517
+ µ-a.e. η and any r > 0.
2518
+ Under (a) in Assumption, the local Dirichlet form (EΥ,µ, D(EΥ,µ)) is constructed
2519
+ in the same proof as in the case of sineβ ensemble as we have not use any partic-
2520
+ ular property of K = 0. We further show the synthetic curvature bound for the
2521
+ form (EΥ,µ, D(EΥ,µ)) and related functional inequalities.
2522
+ Theorem 6.2. Suppose that µ satisfies Assumption 6.1. Then the form (EΥ,µ, D(EΥ,µ))
2523
+ satisfies
2524
+
2525
+ 28
2526
+ K. SUZUKI
2527
+ (a) (Bakry–Émery inequality BE1(K, ∞))
2528
+ ΓΥ�
2529
+ T Υ,µ
2530
+ t
2531
+ u
2532
+ � 1
2533
+ 2 ≤ e−KtT Υ,µ
2534
+ t
2535
+
2536
+ ΓΥ(u)
2537
+ 1
2538
+ 2�
2539
+ ∀u ∈ D(EΥ,µ) ;
2540
+ (b) (lntegral Bochner inequality) for every (u, ϕ) ∈ D(ΓΥ,µ
2541
+ 2
2542
+ )
2543
+ ΓΥ,µ
2544
+ 2
2545
+ (u, ϕ) ≥ K
2546
+
2547
+ Υ
2548
+ ΓΥ(u)ϕ dµ ;
2549
+ (c) (local Poincaré inequality) for u ∈ D(EΥ,µ) and t > 0,
2550
+ T Υ,µ
2551
+ t
2552
+ u2 − (T Υ,µ
2553
+ t
2554
+ u)2 ≤ 1 − e−2Kt
2555
+ K
2556
+ T Υ,µ
2557
+ t
2558
+ ΓΥ(u) ,
2559
+ T Υ,µ
2560
+ t
2561
+ u2 − (T Υ,µ
2562
+ t
2563
+ u)2 ≥ e−2Kt − 1
2564
+ K
2565
+ ΓΥ(T Υ,µ
2566
+ t
2567
+ u) ;
2568
+ (d) (local logarithmic Sobolev inequality) for non-negative u ∈ D(EΥ,µ)
2569
+ and t > 0,
2570
+ T Υ,µ
2571
+ t
2572
+ u log u − T Υ,µ
2573
+ t
2574
+ u log T Υ,µ
2575
+ t
2576
+ u ≤ 1 − e−2Kt
2577
+ 2K
2578
+ T Υ,µ
2579
+ t
2580
+ �ΓΥ(u)
2581
+ u
2582
+
2583
+ ,
2584
+ T Υ,µ
2585
+ t
2586
+ u log u − T Υ,µ
2587
+ t
2588
+ u log T Υ,µ
2589
+ t
2590
+ u ≥ e−2Kt − 1
2591
+ 2K
2592
+ ΓΥ(T Υ,µ
2593
+ t
2594
+ u)
2595
+ T Υ,µ
2596
+ t
2597
+ u
2598
+ .
2599
+ (e) (log Harnack inequality) for every non-negative u ∈ L∞(Υ, µ), t > 0,
2600
+ there exists Ω ⊂ Υ so that µ(Ω) = 1 and
2601
+ T Υ,µ
2602
+ t
2603
+ (log u)(γ) ≤ log(T Υ,µ
2604
+ t
2605
+ u)(η) +
2606
+ K
2607
+ 2(1 − e−2Kt)
2608
+ ¯dΥ(γ, η)2 ,
2609
+ ∀γ, η ∈ Ω ;
2610
+ (f) (dimension-free Harnack inequality) for every non-negative u ∈ L∞(Υ, µ),
2611
+ t > 0 and α > 1 there exists Ω ⊂ Υ so that µ(Ω) = 1 and
2612
+ (T Υ,µ
2613
+ t
2614
+ u)α(γ) ≤ T Υ,µ
2615
+ t
2616
+ uα(η) exp
2617
+
2618
+ αK
2619
+ 2(α − 1)(1 − e−2Kt)
2620
+ ¯dΥ(γ, η)2�
2621
+ ,
2622
+ ∀γ, η ∈ Ω ;
2623
+ (g) (Lipschitz contraction) For u ∈ Lip(¯dΥ, µ) and t > 0,
2624
+ T Υ,µ
2625
+ t
2626
+ u has a ¯dΥ-Lipschitz µ-modification ˜T Υ,µ
2627
+ t
2628
+ u
2629
+ and the following estimate holds:
2630
+ Lip¯dΥ( ˜T Υ,µ
2631
+ t
2632
+ u) ≤ e−KtLip¯dΥ(u) ;
2633
+ (h) (L∞-to-Lip regularisation) For u ∈ L∞(µ) and t > 0,
2634
+ T Υ,µ
2635
+ t
2636
+ u has a ¯dΥ-Lipschitz µ-modification ˜T Υ,µ
2637
+ t
2638
+ u
2639
+ and the following estimate holds:
2640
+ Lip¯dΥ( ˜T Υ,µ
2641
+ t
2642
+ u) ≤
2643
+ 1
2644
+
2645
+ 2I2K(t)
2646
+ ∥u∥L∞(µ)
2647
+ ∀t > 0 ,
2648
+ where IK(t) :=
2649
+ � t
2650
+ 0 eKr dr.
2651
+
2652
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
2653
+ 29
2654
+ Proof. Thanks to Assumption 6.1, the space (Υk(Br), dΥ, µk,η
2655
+ r ) satisfies RCD(K, ∞)
2656
+ for every k ∈ K(µη
2657
+ r) as in the same proof of Prop. 3.4. As we have not used any
2658
+ particular properties of K = 0 for the proofs in the case of sineβ, the completely
2659
+ same proofs (up to constant multiplication depending only on K) work in the case of
2660
+ general K ∈ R and general µ satisfying Assumption 6.1, which therefore concludes
2661
+ the statements of Thm. 6.2.
2662
+
2663
+ Appendix A.
2664
+ Let m and mr be the Lebesgue measure on Rn and its restriction on Br respectively.
2665
+ Set Υ = Υ(Rn).
2666
+ Lemma A.1. Let µ be a Borel probability on Υ satisfying that µη
2667
+ r is absolutely
2668
+ continuous with respect to the Poisson measure πmr for any r > 0 and µ-a.e. η. Let
2669
+ Σ ⊂ Br so that mr(Σc) = 0. Let Ω(r) := {γ ∈ Υ : γΣ = γBr}. Then,
2670
+ µ
2671
+
2672
+ Ω(r)
2673
+
2674
+ = 1
2675
+ ∀r > 0 .
2676
+ Proof. We fix r > 0 and write simply Ω = Ω(r). By the disintegration formula (2.10),
2677
+ µ(Ω) =
2678
+
2679
+ Υ
2680
+ µη
2681
+ r(Ωη
2682
+ r) dµ(η) .
2683
+ Thus, it suffices to show µη
2684
+ r(Ωη
2685
+ r) = 1 for µ-a.e. η. This is equivalent to show
2686
+ µη
2687
+ r(Ωη
2688
+ r) =
2689
+
2690
+ k∈N0
2691
+ µk,η
2692
+ r (Ωη
2693
+ r) = 1 .
2694
+ (A.1)
2695
+ As µk,η
2696
+ r
2697
+ is absolutely continuous with respect to πmr|Υk(Br), it suffices to prove
2698
+ πmr|Υk(Br)((Ωη
2699
+ r)c) = 0 for every k ∈ N0 and η ∈ Υ.
2700
+ We show that (recall the definition of symmetric product Σ⊙k in (2.3))
2701
+ Σ⊙k ⊂ Ωη
2702
+ r ∩ Υk(Br)
2703
+ ∀η ∈ Υ .
2704
+ (A.2)
2705
+ Let γ ∈ Σ⊙k. Then by the definition of Ω, it holds that γ + ηBcr ∈ Ω for any η ∈ Υ.
2706
+ Thus, by recalling the definition (2.9) of Ωη
2707
+ r, we obtain γ ∈ Ωη
2708
+ r ∩ Υk(Br). Thus,
2709
+ (A.2) holds true.
2710
+ By using (A.2), πmr|Υk(Br) = e−mr(Br)m⊙k
2711
+ r
2712
+ by (2.11) and m⊙k
2713
+ r
2714
+
2715
+ (Σ⊙k)c�
2716
+ = 0 by
2717
+ hypothesis, we conclude that for every η ∈ Υ
2718
+ πmr|Υk(Br)((Ωη
2719
+ r)c) = e−mr(Br)m⊙k
2720
+ r
2721
+ ��
2722
+ Ωη
2723
+ r ∩ Υk(Br)
2724
+ �c�
2725
+ ≤ e−mr(Br)m⊙k
2726
+ r
2727
+
2728
+ (Σ⊙k)c�
2729
+ = 0 .
2730
+ The proof is complete.
2731
+
2732
+ We recall that for η ∈ Υ, we set Υη
2733
+ r := {γ ∈ Υ : γBcr = ηBcr}.
2734
+ Lemma A.2 (disintegration lemma). Assume that there exists a measurable set
2735
+ Ξ ⊂ Υ with µ(Ξ) = 1 so that for every η ∈ Ξ, there exists a family of measurable
2736
+
2737
+ 30
2738
+ K. SUZUKI
2739
+ sets Ωη ⊂ Υ(Br) so that µη
2740
+ r(Ωη) = 1 for every η ∈ Ξ. Let Ω ⊂ Υ be the (not
2741
+ necessarily measurable) subset defined by
2742
+ Ω :=
2743
+
2744
+ η∈Ξ
2745
+ pr−1
2746
+ r (Ωη) ∩ Υη
2747
+ r .
2748
+ Assume further that there exists a measurable set Θ ⊂ Υ so that Ω ⊂ Θ. Then,
2749
+ µ(Θ) = 1.
2750
+ Caveat.
2751
+ As the set Ω is defined as uncountable union of measurable sets, the mea-
2752
+ surability of Ω is not necessarily true in general. The disintegration formula (2.10)
2753
+ is, therefore, not necessarily applicable directly to Ω, which motivates the aforemen-
2754
+ tioned lemma.
2755
+ Proof of Lem. A.2. Let Θη
2756
+ r = {γ ∈ Υ(Br) : γ + ηBcr ∈ Θ} be a section of Θ at ηBcr
2757
+ as in (2.9). Then, Ωη ⊂ Θη
2758
+ r by assumption. Thus, µη
2759
+ r(Θη
2760
+ r) ≥ µη
2761
+ r(Ωη) ≥ 1. By the
2762
+ disintegration formula in (2.10), we have that
2763
+ µ(Θ) =
2764
+
2765
+ Υ
2766
+ µη
2767
+ r(Θη
2768
+ r) dµ(η) ≥ 1 .
2769
+ The proof is completed.
2770
+
2771
+ Lemma A.3. Let µ be a Borel probability on Υ. Let Ω ⊂ Υ satisfy µ(Ω) = 1.
2772
+ Then, there exists Ω′ ⊂ Ω with µ(Ω′) = 1 and
2773
+ µη
2774
+ r(Ωη
2775
+ r) = 1 ,
2776
+ ∀η ∈ Ω′ .
2777
+ (A.3)
2778
+ Proof. By the disintegration formula (2.10),
2779
+ 1 = µ(Ω) =
2780
+
2781
+ Υ
2782
+ µη
2783
+ r(Ωη
2784
+ r) dµ(η) =
2785
+
2786
+
2787
+ µη
2788
+ r(Ωη
2789
+ r) dµ(η) ,
2790
+ by which the statement is readily concluded.
2791
+
2792
+ Lemma A.4. Let (Q, D(Q)) be a closed form on a complete separable Hilbert
2793
+ space H. Let {Tt} and (A, D(A)) be the corresponding semigroup and infinitesi-
2794
+ mal generator respectively. Suppose that there exists an algebra C ⊂ D(Q) so that
2795
+ C ⊂ H is dense and TtC ⊂ C for any t > 0. Then C is dense in D(Q).
2796
+ Proof. It holds that TtD(A) ⊂ D(A) by the general property of semigroups associ-
2797
+ ated with closed forms. Thus, combining it with the hypothesis TtC ⊂ C,
2798
+ Tt(C ∩ D(A)) ⊂ C ∩ D(A) .
2799
+ Thus, by [RS75, Thm. X.49], C ∩ D(A) is dense in the graph norm in the space
2800
+ (A, D(A)). Namely, we obtained
2801
+ (A, C ∩ D(A)) is essentially self-adjoint .
2802
+
2803
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
2804
+ 31
2805
+ The density C ⊂ D(Q) now follows by the density of C ∩ D(A) in the graph norm,
2806
+ by the density of D(A) ⊂ D(Q) due to the general property of closed forms, by the
2807
+ density of C ⊂ H and by a simple integration-by-parts argument
2808
+ −Q(u, u) = (Au, u)H ≤ ∥Au∥H∥u∥H .
2809
+ The proof is complete.
2810
+
2811
+ References
2812
+ [AGS14a] Ambrosio, L., Gigli, N., and Savaré, G. Calculus and heat flow in metric
2813
+ measure spaces and applications to spaces with Ricci bounds from below.
2814
+ Invent. Math., 395:289–391, 2014.
2815
+ [AGS14b] Ambrosio, L., Gigli, N., and Savaré, G. Metric measure spaces with
2816
+ Riemannian Ricci curvature bounded from below.
2817
+ Duke Math. J.,
2818
+ 163(7):1405–1490, 2014.
2819
+ [AGS15] Ambrosio, L., Gigli, N., and Savaré, G.
2820
+ Bakry–Émery Curvature-
2821
+ Dimension
2822
+ Condition
2823
+ and
2824
+ Riemannian
2825
+ Ricci
2826
+ Curvature
2827
+ Bounds.
2828
+ Ann. Probab., 43(1):339–404, 2015.
2829
+ [BGL14] Bakry, D., Gentil, I., and Ledoux, M. Analysis and Geometry of Markov
2830
+ Diffusion Operators, volume 348 of Grundlehren der mathematischen
2831
+ Wissenschaften. Springer, 2014.
2832
+ [BH91] Bouleau, N. and Hirsch, F.
2833
+ Dirichlet forms and analysis on Wiener
2834
+ space. De Gruyter, 1991.
2835
+ [Del21] Dello Schiavo, L. Ergodic Decomposition of Dirichlet Forms via Direct
2836
+ Integrals and Applications. Potential Anal., 2021.
2837
+ [DHLM20] Dereudre, D., Hardy, A., Leblé, T., and Maïda, M. DLR Equations and
2838
+ Rigidity for the Sine-Beta Process. Commun. Pure Appl. Math., pages
2839
+ 172–222, 2020.
2840
+ [DS21a] Dello Schiavo, L. and Suzuki, K.
2841
+ Configuration spaces over sin-
2842
+ gular spaces –I. Dirichlet-Form and Metric Measure Geometry –.
2843
+ arXiv:2109.03192v2 (version 2), 2021.
2844
+ [DS21b] Dello Schiavo, L. and Suzuki, K. On the Rademacher and Sobolev-to-
2845
+ Lipschitz Properties for Strongly Local Dirichlet Spaces. J. Func. Anal.,
2846
+ 281(11):Online first, 2021.
2847
+ [DS22] Dello Schiavo, L. and Suzuki, K. Configuration Spaces over Singular
2848
+ Spaces II – Curvature. arXiv:2205.01379, 2022.
2849
+ [EH15] Erbar, M. and Huesmann, M. Curvature bounds for configuration spaces.
2850
+ Calc. Var., 54:307–430, 2015.
2851
+ [FOT11] Fukushima, M., Oshima, Y., and Takeda, M. Dirichlet forms and sym-
2852
+ metric Markov processes, volume 19 of De Gruyter Studies in Mathe-
2853
+ matics. de Gruyter, extended edition, 2011.
2854
+
2855
+ 32
2856
+ K. SUZUKI
2857
+ [Fuk97] Fukushima, M. Distorted Brownian motions and BV functions. Trends
2858
+ in Probability and Analysis, N. Kono, N-R. Shieh, eds, pages 143–150,
2859
+ 1997.
2860
+ [Gho15] Ghosh, S. Determinantal processes and completeness of random expo-
2861
+ nentials: the critical case. Probab. Theory Relat. Fields, 163(3):643–665,
2862
+ 2015.
2863
+ [GKMS18] Galaz-García, F., Kell, M., Mondino, A., and Sosa, G. On quotients of
2864
+ spaces with Ricci curvature bounded below. J. Funct. Anal., 275:1368–
2865
+ 1446, 2018.
2866
+ [Han18] Han, B.X. New characterizations of ricci curvature on rcd metric mea-
2867
+ sure spaces.
2868
+ Discrete and Continuous Dynamical Systems. Series A,
2869
+ 38(10):4915–4927, 2018.
2870
+ [KS09] Killip, R. and Stoiciu, M. Eigenvalue statistics for cmv matrices: from
2871
+ poisson to clock via random matrix ensembles.
2872
+ Duke Math. J., 146
2873
+ (3)::361–399, 2009.
2874
+ [KT10] Katori, M. and Tanemura, H.
2875
+ Non-equilibrium dynamics of Dyson’s
2876
+ model with an infinite number of particles.
2877
+ Comm. Math. Phys.,
2878
+ 293(2):469–497, 2010.
2879
+ [Li15] H. Li. Dimension-Free Harnack Inequalities on RCD(K, ∞) Spaces. J.
2880
+ Theoret. Probab., 29:1280–1297, 2015.
2881
+ [MR85] Ma, Z.-M. and Röckner, M. Dirichlet forms-closability and change of
2882
+ speed measure, Infinite dimensional analysis and stochastic processes.
2883
+ Research Notes in Math. S. Albeverio, ed., Pitman, 124:119–144, 1985.
2884
+ [MR90] Ma, Z.-M. and Röckner, M.
2885
+ Introduction to the Theory of (Non-
2886
+ Symmetric) Dirichlet Forms. Springer, 1990.
2887
+ [MR00] Ma, Z.-M. and Röckner, M. Construction of Diffusions on Configuration
2888
+ Spaces. Osaka J. Math., 37:273–314, 2000.
2889
+ [Nak14] Nakano, F. Level statistics for one-dimensional schrödinger operators
2890
+ and gaussian beta ensemble. J. Stat. Phys., 156(1):66–93, 2014.
2891
+ [NR18] Najnundel, J. and Reda, C. Rigidity of the Sineβ process. Electron.
2892
+ Commun. Probab., 23:1–8, 2018.
2893
+ [Osa13] Osada, H. Interacting Brownian Motions in Infinite Dimensions with
2894
+ Logarithmic Interaction Potentials. Ann. Probab., 41(1):1–49, 2013.
2895
+ [RS75] Reed, M. and Simon, B. Methods of Modern Mathematical Physics II –
2896
+ Fourier Analysis, Self-Adjointness. Academic Press, New York, London,
2897
+ 1975.
2898
+ [RS80] Reed, M. and Simon, B. Methods of Modern Mathematical Physics I –
2899
+ Functional Analysis. Academic Press, New York, London, 1980.
2900
+
2901
+ CURVATURE BOUND OF DYSON BROWNIAN MOTION
2902
+ 33
2903
+ [Spo87] Spohn, H. Interacting Brownian Particles: A Study of Dyson’s Model.
2904
+ Hydrodynamic Behavior and Interacting Particle Systems, pages 151–
2905
+ 179, 1987.
2906
+ [Stu06] Sturm, K.-T. On the geometry of metric measure spaces. I. Acta Math.,
2907
+ 196:65–131, 2006.
2908
+ [Tsa16] Tsai, L.-C.
2909
+ Infinite dimensional stochastic differential equations for
2910
+ dyson’s model. Probability Theory and Related Fields, 166:801–850, 2016.
2911
+ [VG20] Von Renesse, M.. and Gyünesu, B. Molecules as metric measure spaces
2912
+ with kato-bounded ricci curvature.
2913
+ Comptes Rendus. Mathématique,
2914
+ 358:595–602, 2020.
2915
+ [VV09] Valkó, B. and Virág, B. Continuum limits of random matrices and the
2916
+ brownian carousel. Invent. math., 177(3):463–508, 2009.
2917
+ Department of Mathematical Science, Durham University, Science Laboratories,
2918
+ South Road, DH1 3LE, United Kingdom
2919
+
0NAyT4oBgHgl3EQfbfc0/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
19AyT4oBgHgl3EQfbvd_/content/2301.00269v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0011a87477c61017e21d02cff6a0842721e29c87cda988c7a5a17d30a2ae8da2
3
+ size 2910959
19AyT4oBgHgl3EQfbvd_/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dd1afaf0445042f2c21ba93eb2d8b4f801c272484912f0ba85d33e5b9305b67
3
+ size 3538989
19AyT4oBgHgl3EQfbvd_/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:290837edc72211eec9a702d175b670d8fefc327c57f985e26f445aa5cbe6ffe3
3
+ size 143870
29E0T4oBgHgl3EQfuwF7/content/tmp_files/2301.02609v1.pdf.txt ADDED
@@ -0,0 +1,819 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Hybrid Quantum-Classical Autoencoders for
2
+ End-to-End Radio Communication
3
+ Zsolt Tabi∗, Bence Bak´o∗†, D´aniel T. R. Nagy∗‡, P´eter Vaderna‡, Zs´ofia Kallus‡,
4
+ P´eter H´aga‡, Zolt´an Zimbor´as∗†
5
+ ∗E¨otv¨os Lor´and University, Budapest, Hungary
6
7
+ †Wigner Research Centre for Physics, Budapest, Hungary
8
9
+ ‡Ericsson Research, Budapest, Hungary
10
+ Email: {daniel.a.nagy, peter.vaderna, zsofia.kallus, peter.haga}@ericsson.com
11
+ Abstract—Quantum neural networks are emerging as poten-
12
+ tial candidates to leverage noisy quantum processing units for
13
+ applications. Here we introduce hybrid quantum-classical au-
14
+ toencoders for end-to-end radio communication. In the physical
15
+ layer of classical wireless systems, we study the performance
16
+ of simulated architectures for standard encoded radio signals
17
+ over a noisy channel. We implement a hybrid model, where
18
+ a quantum decoder in the receiver works with a classical
19
+ encoder in the transmitter part. Besides learning a latent space
20
+ representation of the input symbols with good robustness against
21
+ signal degradation, a generalized data re-uploading scheme for
22
+ the qubit-based circuits allows to meet inference-time constraints
23
+ of the application.
24
+ Index Terms—variational quantum algorithms, quantum ma-
25
+ chine learning, quantum autoencoder, radio communication
26
+ I. INTRODUCTION
27
+ One of the most popular Quantum Machine Learning
28
+ (QML) methods are Quantum Neural Networks (QNNs) [1],
29
+ [2]. These are special variational quantum circuits, designed as
30
+ the quantum analogues of classical neural networks. QNNs can
31
+ be optimized with gradient-based or gradient-free optimization
32
+ algorithms forming hybrid quantum-classical training loops
33
+ [3], [4]. Various QNN architectures have been proposed such
34
+ as quantum convolutional neural networks [5], generative mod-
35
+ els [6], long short-term memories [7] and autoencoders [8]–
36
+ [10]. Beside the high activity in algorithmic research within
37
+ QML, their novel benchmarking and requirement setting ap-
38
+ plications are also motivating a wide variety of works [11].
39
+ Although QML is still in a phase of basic research with
40
+ many open questions, its early implementations in wireless
41
+ communication systems spark both scientific curiosity and
42
+ commercial interest [12]. However, high-performance, near
43
+ real-time applications might impose a new set of requirements
44
+ on these solutions.
45
+ Wireless communication has undergone tremendous evolu-
46
+ tion during the last decades. The increasing adoption of AI
47
+ and ML methods is opening up new development possibilities
48
+ in various parts of the radio stack. In the design of the sixth
49
+ generation (6G) wireless networks, AI and ML technologies
50
+ are considered to be tightly integrated into the system and
51
+ smart algorithms can be applied to all aspects of network
52
+ operations and procedures [13]. Considering the improvement
53
+ of quantum computers it is envisioned that quantum algorithms
54
+ and especially QML will play a significant role in future
55
+ networks [12].
56
+ The structure of this paper is as follows. In Sec. II, we
57
+ give a high-level overview of wireless communication systems
58
+ together with an autoencoder solution used in the radio phys-
59
+ ical layer. Our novel hybrid classical-quantum autoencoder
60
+ prototype is presented in Sec. III. We discuss our result in
61
+ Sec. IV. Finally, we conclude with an outlook in Sec. V.
62
+ II. AUTOENCODER ARCHITECTURE IN END-TO-END
63
+ COMMUNICATION
64
+ I
65
+ Q
66
+ 0
67
+ 1
68
+ 2
69
+ 3
70
+ 4
71
+ 5
72
+ 6
73
+ 7
74
+ 8
75
+ 9
76
+ 10
77
+ 11
78
+ 12
79
+ 13
80
+ 14
81
+ 15
82
+ (a)
83
+ I
84
+ Q
85
+ (b)
86
+ Fig. 1: 16-QAM constellation diagrams. (a) Transmitted
87
+ symbols; (b) Received noisy signal.
88
+ s
89
+ Transmitter
90
+ Channel
91
+ Receiver
92
+ ˆs
93
+ x
94
+ y
95
+ Fig. 2: High-level representation of a communication sys-
96
+ tem. A message is transmitted through a noisy communication
97
+ channel to be recovered by the receiver.
98
+ The components of communication networks are organized
99
+ in a layered architecture where each layer is responsible for
100
+ different communication aspects [14]. The physical layer is the
101
+ arXiv:2301.02609v1 [quant-ph] 6 Jan 2023
102
+
103
+ Fig. 3: The proposed hybrid quantum-classical autoencoder embedded into the end-to-end communication architecture.
104
+ The transmitter maps each message s to a symbol, then sends it through the channel. The channel is represented by an
105
+ additive noise acting on the signal x. The receiver, realized by a quantum decoder, consists of a multi-layer QNN adapting
106
+ data re-uploading. It decodes the noisy signal and gives an estimate of the original message.
107
+ lowest layer. It provides the means of transmitting a stream of
108
+ raw bits over a data channel connecting the network elements.
109
+ The physical layer on the transmitter side converts data in
110
+ the form of bits to electromagnetic waves to be transmitted
111
+ wirelessly, while the receiver converts the electromagnetic
112
+ waves received by an antenna to binary data. The main
113
+ challenge in wireless data transmission is to overcome the
114
+ channel impairments so that the messages can be recovered
115
+ with small error rate.
116
+ The role of modulation is to convert digital data into radio
117
+ waves. It can be achieved in different ways, the informa-
118
+ tion can be encoded by varying (shifting) either amplitude,
119
+ frequency or phase of the electromagnetic wave. The more
120
+ states the modulation has, the more bits are transferred in one
121
+ symbol, resulting in higher data rate. However, with higher
122
+ order modulation the signal is more sensitive to channel errors,
123
+ so the applied modulation usually depends on the channel
124
+ quality. Fig. 1 shows a constellation diagram of the symbol
125
+ representation of 16-QAM modulation, where 4-bit strings
126
+ can be represented as complex numbers in a scheme resistant
127
+ to general noise patterns while achieving high data rate with
128
+ minimal channel uses.
129
+ Based on [15], we model a simple communications system,
130
+ shown in Fig. 2, consisting of a transmitter that modulates
131
+ message s ∈ M = {0, . . . , M − 1} into a signal x and
132
+ sends it over a noisy channel to the receiver that tries to
133
+ decode the received signal y resulting in the received message
134
+ ˆs. The transmitted signal x suffers degradation due to the
135
+ noise present on the channel. In case of transmission over a
136
+ complex channel with n discrete channel uses, the transmitter
137
+ can be represented as the transformation f : M �→ R2n,
138
+ mapping the message s to x ∈ R2n signal with certain
139
+ constraints imposed by the transmitting hardware (e.g., energy
140
+ constraint or average power constraint). The channel can be
141
+ modeled as a conditional probability density function p(y|x)
142
+ that produces the output signal y ∈ R2n given the input signal,
143
+ usually via some noise model (e.g., additive white Gaussian
144
+ noise (AWGN)). The receiver is represented as the mapping
145
+ g : R2n �→ M that recovers some estimate ˆs of the original
146
+ message from the received signal. In this work, we focus on
147
+ the case of single channel use (n = 1), however, this model
148
+ can be easily adopted to cases of n > 1. Also, the number of
149
+ transmit-receive pairs can be increased to get a Multiple-Input
150
+ and Multiple-Output (MIMO) system [15], [16].
151
+ Both of f and g transformations can be created in various
152
+ ways. In case of simple noise models applied in the channel
153
+ the transformations can be designed as explicit mathematical
154
+ formulas. In the case of complex noise scenarios that are
155
+ difficult to describe with mathematical models, a possible way
156
+ is to train deep neural networks, especially autoencoders, to
157
+ solve the encoding and decoding tasks [15].
158
+ An autoencoder is a special type of deep neural network,
159
+ with the aim to compress or denoise data [17]–[19]. Autoen-
160
+ coders consist of an encoder function f : RD �→ RL, and
161
+ a decoder function g : RL �→ RD. The encoder transforms
162
+ its input χ into a latent space representation f(χ) ∈ RL,
163
+ whereas the decoder tries to reconstruct it: ˆχ = g(f(χ)).
164
+ Usually we have L < D, i.e., the encoder produces a compact
165
+ representation of the data. f and g are typically deep neural
166
+ networks trained jointly to minimize a loss function of the
167
+ form L(χ, g(f(χ)).
168
+ In telecommunication, opposite to the general compressing
169
+ and denoising interpretation, autoencoders can be effectively
170
+ used in the presented communication system to learn how to
171
+ represent input messages as signals [15]. This model differs
172
+ from the “typical” autoencoder concept in the sense that it does
173
+ not try to remove noise from the input, instead it learns how
174
+ to represent the input in a way that is robust against a given
175
+ noisy channel acting in the latent space of the autoencoder.
176
+ As a result of the training process, the latent space (or hidden
177
+ layer) of the autoencoder contains the learned constellation of
178
+ symbols (or codebook). The learned constellation is optimized
179
+ for best mapping of the input messages to signals that can be
180
+ accurately decoded with the largest success probability for the
181
+
182
+ Quantum Decoder
183
+ Encoder
184
+ cod
185
+ Noise model p(y|x)
186
+ argmax
187
+ Message vector
188
+ Qubit encoding
189
+ Qubit encoding
190
+ Normalization
191
+ ayer
192
+ 0.1
193
+ abit readou
194
+ La
195
+ ·
196
+ 0.8
197
+ 3
198
+ 0.05
199
+ Transmitter
200
+ Receiver
201
+ hannelEncoding
202
+ First Layer
203
+ · · ·
204
+ · · ·
205
+ · · ·
206
+ · · ·
207
+ |0⟩
208
+ Rx(y1)
209
+ R(α1, β1, γ1)
210
+ |0⟩
211
+ Rx(y2)
212
+ R(α2, β2, γ2)
213
+ |0⟩
214
+ R(α3, β3, γ3)
215
+ |0⟩
216
+ R(α4, β4, γ4)
217
+ (a)
218
+ First Layer with Encoding
219
+ · · ·
220
+ · · ·
221
+ · · ·
222
+ · · ·
223
+ |0⟩
224
+ Rx(y1)
225
+ R(α1, β1, γ1)
226
+ |0⟩
227
+ Rx(y2)
228
+ R(α2, β2, γ2)
229
+ |0⟩
230
+ R(α3, β3, γ3)
231
+ |0⟩
232
+ R(α4, β4, γ4)
233
+ (b)
234
+ First Layer with Double Encoding
235
+ · · ·
236
+ · · ·
237
+ · · ·
238
+ · · ·
239
+ |0⟩
240
+ Rx(y1)
241
+ R(α1, β1, γ1)
242
+ |0⟩
243
+ Rx(y2)
244
+ R(α2, β2, γ2)
245
+ |0⟩
246
+ Rx(y1)
247
+ R(α3, β3, γ3)
248
+ |0⟩
249
+ Rx(y2)
250
+ R(α4, β4, γ4)
251
+ (c)
252
+ First Layer with Weighted Double Encoding
253
+ · · ·
254
+ · · ·
255
+ · · ·
256
+ · · ·
257
+ |0⟩
258
+ Rx(w1 · y1)
259
+ R(α1, β1, γ1)
260
+ |0⟩
261
+ Rx(w2 · y2)
262
+ R(α2, β2, γ2)
263
+ |0⟩
264
+ Rx(w3 · y1)
265
+ R(α3, β3, γ3)
266
+ |0⟩
267
+ Rx(w4 · y2)
268
+ R(α4, β4, γ4)
269
+ (d)
270
+ Fig. 4: Quantum decoder implementations with encoding schemes. Ansatz circuits with (a) simple data encoding; (b) simple
271
+ data re-uploading; (c) double data re-uploading; (d) weighted double data re-uploading.
272
+ specific channel model. Whereas the encoder learns how to
273
+ produce optimal symbols, the receiver learns how to decode
274
+ these symbols after they have been corrupted by the channel,
275
+ i.e., how to recover x after sampling from p(y|x).
276
+ III. HYBRID QUANTUM AUTOENCODER FOR RADIO
277
+ PHYSICAL LAYER
278
+ A. Hybrid quantum autoencoder overview
279
+ Quantum autoencoder architectures have previously been
280
+ proposed to compress as well as denoise quantum data [8],
281
+ [10]. Hybrid quantum-classical autoencoders enable many
282
+ variations for quantum or classical encoding/decoding or the
283
+ use of classical data. In this work, a hybrid quantum-classical
284
+ autoencoder is applied for processing classical information.
285
+ Building on the physical layer autoencoder presented in
286
+ Sec. II, we propose a hybrid quantum-classical autoencoder
287
+ with classical encoder on the transmitter side and a quantum
288
+ decoder on the receiver side – trained in an end-to-end
289
+ solution. The encoder projects the original message to a lower
290
+ dimensional representation, robust to the channel degradation
291
+ effect. Once the signal is passed to the quantum decoder, the
292
+ compressed information is mapped to a higher dimensional
293
+ Hilbert space of the qubits by a QNN that has been previously
294
+ shown to be efficient for classification tasks [20], [21].
295
+ In our model, the classical encoder consists of an embedding
296
+ followed by a normalization. A simple linear embedding is
297
+ used to produce the constellation, satisfying the average power
298
+ constraint by normalization. The decoder is realized by a
299
+ general strongly connected quantum neural network which
300
+ we refer to as a quantum decoder. By simulating increasing
301
+ levels of noise in the channel, we can present a performance
302
+ evaluation of the various neural network architectures.
303
+ B. Quantum decoder architectures
304
+ A general QNN architecture has three main components
305
+ as shown in Fig. 3: qubit encoding for embedding the input
306
+ data, the parameterized QNN layers, and the qubit readout
307
+ given as a probability distribution over the possible con-
308
+ stellation symbols obtained from suitable measurements with
309
+ high enough number of shots. To encode the output of the
310
+ channel, we choose angle embedding with parameterized Rx
311
+ rotations [22]. With this embedding, there are multiple ways
312
+ to encode two-dimensional feature vectors into four qubits. As
313
+ for the variational ansatz, we use strongly entangling layers
314
+ introduced in quantum classifiers as they are known to be
315
+ expressive reaching ‘wide corners of the Hilbert space’ [23].
316
+ The measurements are performed in the computational basis
317
+ and the obtained probability distribution over the 16 basis
318
+ states is the output of the decoder.
319
+ The simplest single-layer realization of such a QNN struc-
320
+ ture is presented in Fig. 4a. To improve this ansatz, we can
321
+ apply the data re-uploading trick recently introduced in [24].
322
+ This technique, as shown in Fig. 4b, repeats the input encoding
323
+ block before each layer of the QNN circuit. The intuition be-
324
+ hind the effectiveness of this method is that by re-introducing
325
+ the input before each layer, one can mimic the computational
326
+ structure of typical classical deep neural networks, where the
327
+ copying of the classical information is readily available, which
328
+
329
+ would be, without this trick, prohibited by the no-cloning
330
+ theorem in quantum machine learning. The expressivity of a
331
+ model can be further increased by applying the encoding on
332
+ different subsystems in parallel [25]. With this in mind, we
333
+ further enhance the ansatz by encoding the first feature into
334
+ both qubit no. 1 and no. 3 and the second input feature into
335
+ both qubit no. 2 and no. 4. This double data re-uploading
336
+ ansatz is presented in Fig. 4c. As a final improvement, we
337
+ considered the role of the number of trainable parameters. As
338
+ the expressive power of the ansatz is highly dependent on the
339
+ number of trainable parameters, one should try to include as
340
+ many parameters as possible. One way to increase the number
341
+ of parameters while keeping the circuit as shallow as possible
342
+ – to respect the limited hardware capabilities and the inference
343
+ time constraints of the application – is to introduce trainable
344
+ weights in the data re-uploading blocks, as shown in Fig. 4d.
345
+ This modification keeps the depth constant.
346
+ C. Training and fine-tuning
347
+ For our hybrid autoencoder to achieve low estimation errors,
348
+ the training of the end-to-end system requires to be further
349
+ improved via hyper-parameter tuning.
350
+ First, the training of the hybrid model is done on batches
351
+ uniformly sampled from the set of messages {0, . . . , 15}.
352
+ These are sent as two dimensional encoded symbols through
353
+ the AWGN channel with SNR of 15 dB and i.i.d. noise.
354
+ The accuracy of the model is measured by evaluating
355
+ the Symbol Error Rate (SER), a key performance indica-
356
+ tor commonly used in radio communication. The network
357
+ weight updates are calculated with the sparse categorical cross-
358
+ entropy of the distribution generated by the decoder and the
359
+ ground truth symbols. This loss function is used to calculate
360
+ gradients in a mini-batch gradient descent with batch size of 64
361
+ and Adam optimizer [26]. We simulate the hybrid autoencoder
362
+ using PennyLane [27], a quantum machine learning framework
363
+ with its TensorFlow [28] backend. Second, we evaluate the
364
+ reached model accuracy at various hyper-parameter settings.
365
+ The search is conducted by KerasTuner [29] after partitioning
366
+ the space as the simulator compute times are prohibitive of a
367
+ full grid search.
368
+ We start by first evaluating the learning rate parameter set
369
+ η ∈ {0.1, 0.01, 0.001} using the simple ansatz presented on
370
+ Fig. 4a with L = 8 layers with 1000-shot measurements and
371
+ 1000 training steps. Based on these results, the only viable
372
+ value of η = 0.1 is set for the rest of this study.
373
+ We continue with evaluating modifications to the basic
374
+ ansatz but keeping the number of layers L = 8 and 1000
375
+ training steps fixed, to minimize the overall computation time.
376
+ The results are shown in Fig. 5. For the basic circuit, the SER
377
+ fluctuates around its initial value without showing convergence
378
+ to a desirable level. A significant accuracy improvement of
379
+ roughly 40% is achieved by implementing single data re-
380
+ uploading (Fig. 4b with ansatz of 1×DR). Introducing the
381
+ double data re-uploading layer (2×DR with ansatz of Fig. 4c)
382
+ leads to another 15% improvement. Finally, we can even fur-
383
+ ther increase the performance by another 20% when using the
384
+ 0
385
+ 200
386
+ 400
387
+ 600
388
+ 800
389
+ 1000
390
+ Number of training steps
391
+ 0.1
392
+ 0.2
393
+ 0.3
394
+ 0.4
395
+ 0.5
396
+ 0.6
397
+ 0.7
398
+ 0.8
399
+ Symbol Error Rate
400
+ basic
401
+ 1x DR
402
+ 2x DR
403
+ 2x wDR
404
+ Fig. 5: Learning curves of circuit architectures. The number
405
+ of data re-uploading (none, single, double) and the weighted
406
+ data encoding have high impact on the convergence properties
407
+ of the quantum autoencoder.
408
+ weighted double data re-uploading technique (2×wDR with
409
+ ansatz Fig. 4d). Based on these results, the weighted double
410
+ data re-uploading ansatz is chosen for further experiments.
411
+ As a last step, we optimize the number of layers. The hybrid
412
+ autoencoder using the best performing ansatz is trained with
413
+ 8 to 24 layers. Increasing the number of layers clearly shows
414
+ the improvement in SER as well as in convergence time as
415
+ seen in Fig. 6.
416
+ IV. PERFORMANCE EVALUATION
417
+ A. Validation
418
+ Comparing our hybrid architecture to the classical method is
419
+ crucial to validate the solution. Based on the learning curves
420
+ presented, the shallowest network reaching accuracy similar
421
+ to the classical solution contains L = 16 layers. Further
422
+ increasing the number of layers leads to small improvements
423
+ in accuracy but it is suboptimal in terms of circuit depth.
424
+ Although the hybrid quantum autoencoder models are
425
+ trained at SNR of 15 dB we further validate the results at
426
+ different values. The evaluation is shown in Fig. 7. We see
427
+ that the trained networks generalize well on previously unseen
428
+ SNR values, and reach performance similar to the classical
429
+ baseline.
430
+ In Fig. 8, the constellation diagrams produced by autoen-
431
+ coders having different numbers of layers are shown. If the
432
+ trained autoencoder has good performance, it is expected that
433
+ the symbols are uniformly distributed in the diagram, similarly
434
+ to Fig 1. We see that increasing the number of layers leads to
435
+ a more balanced distribution of symbols in the Q − I space,
436
+ which implies that the symbols can be well separated in case
437
+ of noisy channels.
438
+ B. Time characteristics
439
+ In radio telecommunication, the latency of the data trans-
440
+ mission is also an important performance metric. In some use
441
+
442
+ 0
443
+ 200
444
+ 400
445
+ 600
446
+ 800
447
+ 1000
448
+ Number of training steps
449
+ 0.0
450
+ 0.1
451
+ 0.2
452
+ 0.3
453
+ 0.4
454
+ 0.5
455
+ 0.6
456
+ 0.7
457
+ Symbol Error Rate
458
+ L=8
459
+ L=12
460
+ L=16
461
+ L=24
462
+ classical
463
+ Fig. 6: Learning curves of classical and hybrid autoen-
464
+ coders for a set of layer numbers. We find that that
465
+ the minimal number of layers necessary to achieve results
466
+ comparable to the classical baseline is 16. Throughout these
467
+ tests, we used ansatz according to Fig. 4d.
468
+ cases it is even critical that the end-to-end delay falls below a
469
+ certain threshold. In 5G networks, it is possible to achieve
470
+ ms level latency. Hence, in addition to the accuracy it is
471
+ inevitable to investigate the time characteristics of the autoen-
472
+ coder model. After transpiling [30] the circuit ansatz to IBM
473
+ QPU backend ibmq_belem and ibmq_santiago [31] and
474
+ constructing the pulse-level scheduling, we can calculate the
475
+ theoretical execution times on both QPUs. The transpiled
476
+ circuits are deeper than the original ansatz, because we need
477
+ SWAP gates due to limited qubit connectivity and the basis
478
+ gate-set of the device can differ from the one used in Fig 4.
479
+ In Table I, we present the circuit depth and the approximate
480
+ per shot execution times of quantum decoders depending on
481
+ the number of layers. The time values in the table suggest
482
+ the following feasibility considerations for running QNN in a
483
+ real-time system. The number of shots highly determines the
484
+ reliability of the result of the inference. When the quantum
485
+ decoder is executed with 1000 shots (a level already acceptable
486
+ in current systems for this problem size), the inference time
487
+ is the order of magnitude of 100ms which is higher than the
488
+ accepted level in real-time radio systems. However, this can
489
+ be reduced to the accepted level of below 10ms because the
490
+ probability distribution is expected to be highly peaked for
491
+ well-trained autoencoders.
492
+ V. CONCLUSION AND OUTLOOK
493
+ We presented a novel hybrid implementation of a quantum-
494
+ classical autoencoder for end-to-end radio communication.
495
+ The decoder was implemented as a variational quantum circuit.
496
+ We showed that the use of advanced double re-uploading
497
+ encoding schemes allows for the inference-time constraints of
498
+ the application to be met without losing accuracy required
499
+ from the autoencoder.
500
+ By implementing a combination of parallel encodings and
501
+ weighted data re-uploading, we showed how these schemes
502
+ 5
503
+ 0
504
+ 5
505
+ 10
506
+ 15
507
+ 20
508
+ Signal-to-noise Ratio [dB]
509
+ 10
510
+ 2
511
+ 10
512
+ 1
513
+ 10
514
+ 0
515
+ Symbol Error Rate
516
+ L=8
517
+ L=12
518
+ L=16
519
+ L=24
520
+ classical
521
+ Fig. 7: Validation of inference accuracy of the trained
522
+ classical and hybrid autoencoders. With increasing SNR
523
+ values, hybrid models generalize to validation data on par with
524
+ the classical.
525
+ I
526
+ Q
527
+ 0
528
+ 1
529
+ 2
530
+ 3
531
+ 4
532
+ 5
533
+ 6
534
+ 7
535
+ 8
536
+ 9
537
+ 10
538
+ 11
539
+ 12
540
+ 13
541
+ 14
542
+ 15
543
+ (a)
544
+ I
545
+ Q
546
+ 0
547
+ 1
548
+ 2
549
+ 3
550
+ 4
551
+ 5
552
+ 6
553
+ 7
554
+ 8
555
+ 9
556
+ 10
557
+ 11
558
+ 12
559
+ 13
560
+ 14
561
+ 15
562
+ (b)
563
+ Fig. 8: Constellations (latent space representations) learned
564
+ by the hybrid autoencoder trained with SNR=15. (a) L = 8
565
+ layers (b) L = 24 layers.
566
+ TABLE I: Estimated execution times of the quantum
567
+ decoder. The circuit was run on the ibmq_belem and
568
+ ibmq_santiago depending on the number of layers, cal-
569
+ culated with Qiskit’s transpiler.
570
+ ibmq_belem
571
+ ibmq_santiago
572
+ # layers
573
+ depth
574
+ time [µs/shot]
575
+ depth
576
+ time [µs/shot]
577
+ 8
578
+ 125
579
+ 54.3
580
+ 145
581
+ 30.4
582
+ 12
583
+ 187
584
+ 78.4
585
+ 221
586
+ 43.6
587
+ 16
588
+ 260
589
+ 111.8
590
+ 297
591
+ 56.9
592
+ 20
593
+ 311
594
+ 124.2
595
+ 373
596
+ 70.12
597
+ 24
598
+ 379
599
+ 149.8
600
+ 449
601
+ 83.4
602
+ can improve not just the QNN expressivity but also the
603
+ performance of the whole autoencoder model. We expect these
604
+ quantum-enhanced models to outperform classical ones in
605
+ more complex channel noise scenarios, a direction for future
606
+ study.
607
+
608
+ ACKNOWLEDGMENT
609
+ Zsolt Tabi and Zimbor´as Zolt´an would like to thank the
610
+ support of the Hungarian National Research, Development and
611
+ Innovation Office (NKFIH) through the Quantum Information
612
+ National Laboratory of Hungary and through the Grants No.
613
+ FK 135220, K124351 and TKP2021-NVA-29.
614
+ REFERENCES
615
+ [1] M. Schuld, I. Sinayskiy, and F. Petruccione, “The quest for a quantum
616
+ neural network,” Quantum Information Processing, vol. 13, no. 11, pp.
617
+ 2567–2586, Nov 2014. https://doi.org/10.1007/s11128-014-0809-8
618
+ [2] N.
619
+ Killoran,
620
+ T.
621
+ R.
622
+ Bromley,
623
+ J.
624
+ M.
625
+ Arrazola,
626
+ M.
627
+ Schuld,
628
+ N. Quesada, and S. Lloyd, “Continuous-variable quantum neural
629
+ networks,”
630
+ Phys. Rev.
631
+ Research,
632
+ vol.
633
+ 1,
634
+ p.
635
+ 033063, Oct
636
+ 2019.
637
+ https://link.aps.org/doi/10.1103/PhysRevResearch.1.033063
638
+ [3] D. Wierichs, J. Izaac, C. Wang, and C. Y.-Y. Lin, “General parameter-
639
+ shift rules for quantum gradients,” Quantum, vol. 6, p. 677, Mar. 2022.
640
+ https://doi.org/10.22331/q-2022-03-30-677
641
+ [4] V. Bergholm, J. Izaac, M. Schuld, C. Gogolin, M. Sohaib Alam,
642
+ S. Ahmed et al., “Pennylane: Automatic differentiation of hybrid
643
+ quantum-classical computations,” arXiv e-prints, p. arXiv:1811.04968,
644
+ Nov. 2018.
645
+ [5] S. Wei, Y. Chen, Z. Zhou, and G. Long, “A quantum convolutional
646
+ neural network on nisq devices,” AAPPS Bulletin, vol. 32, no. 1, p. 2,
647
+ Jan 2022. https://doi.org/10.1007/s43673-021-00030-3
648
+ [6] S.
649
+ Lloyd
650
+ and
651
+ C.
652
+ Weedbrook,
653
+ “Quantum
654
+ generative
655
+ adversarial
656
+ learning,”
657
+ Phys.
658
+ Rev.
659
+ Lett.,
660
+ vol.
661
+ 121,
662
+ p.
663
+ 040502,
664
+ Jul
665
+ 2018.
666
+ https://link.aps.org/doi/10.1103/PhysRevLett.121.040502
667
+ [7] S. Y.-C. Chen, S. Yoo, and Y.-L. L. Fang, “Quantum long short-term
668
+ memory,” in APS March Meeting Abstracts, ser. APS Meeting Abstracts,
669
+ vol. 2021, Jan. 2021, p. V32.009.
670
+ [8] C.-J. Huang, H. Ma, Q. Yin, J.-F. Tang, D. Dong, C. Chen et al.,
671
+ “Realization of a quantum autoencoder for lossless compression
672
+ of quantum data,” Phys. Rev. A, vol. 102, p. 032412, Sep 2020.
673
+ https://link.aps.org/doi/10.1103/PhysRevA.102.032412
674
+ [9] J.
675
+ Romero,
676
+ J.
677
+ P.
678
+ Olson,
679
+ and
680
+ A.
681
+ Aspuru-Guzik,
682
+ “Quantum
683
+ autoencoders for efficient compression of quantum data,” Quantum
684
+ Science and Technology, vol. 2, no. 4, p. 045001, aug 2017.
685
+ https://doi.org/10.1088/2058-9565/aa8072
686
+ [10] D. Bondarenko and P. Feldmann, “Quantum autoencoders to denoise
687
+ quantum data,” Phys. Rev. Lett., vol. 124, p. 130502, Mar 2020.
688
+ https://link.aps.org/doi/10.1103/PhysRevLett.124.130502
689
+ [11] S. Chakrabarti, R. Krishnakumar, G. Mazzola, N. Stamatopoulos,
690
+ S. Woerner, and W. J. Zeng, “A Threshold for Quantum Advantage
691
+ in
692
+ Derivative
693
+ Pricing,”
694
+ Quantum,
695
+ vol.
696
+ 5,
697
+ p.
698
+ 463,
699
+ Jun.
700
+ 2021.
701
+ https://doi.org/10.22331/q-2021-06-01-463
702
+ [12] S. J. Nawaz, S. K. Sharma, S. Wyne, M. N. Patwary, and M. Asaduz-
703
+ zaman, “Quantum machine learning for 6G communication networks:
704
+ State-of-the-art and vision for the future,” IEEE Access, vol. 7, pp.
705
+ 46 317–46 350, 2019.
706
+ [13] “6G
707
+
708
+ connecting
709
+ a
710
+ cyber-physical
711
+ world
712
+ -
713
+ a
714
+ re-
715
+ search
716
+ outlook
717
+ toward
718
+ 2030,”
719
+ Ericsson,
720
+ Tech.
721
+ Rep.,
722
+ February 2022. https://www.ericsson.com/4927de/assets/local/reports-
723
+ papers/white-papers/6g–connecting-a-cyber-physical-world.pdf
724
+ [14] A. S. Tanenbaum and D. Wetherall, Computer networks, 5th Edition.
725
+ Pearson, 2011. https://www.worldcat.org/oclc/698581231
726
+ [15] T. O’Shea and J. Hoydis, “An introduction to deep learning for the
727
+ physical layer,” IEEE Transactions on Cognitive Communications and
728
+ Networking, vol. 3, no. 4, pp. 563–575, 2017.
729
+ [16] D. Garc´ıa, J. O. Lacruz, D. Badini, D. De Donno, and J. Widmer,
730
+ “Model-free machine learning of wireless siso/mimo communications,”
731
+ Computer
732
+ Communications,
733
+ vol.
734
+ 181,
735
+ pp.
736
+ 192–202,
737
+ 2022.
738
+ https://www.sciencedirect.com/science/article/pii/S0140366421003704
739
+ [17] G. E. Hinton and R. R. Salakhutdinov, “Reducing the dimensionality of
740
+ data with neural networks,” Science, vol. 313, no. 5786, pp. 504–507,
741
+ Jul. 2006. https://doi.org/10.1126/science.1127647
742
+ [18] I. Goodfellow, Y. Bengio, and A. Courville, Deep Learning. MIT Press,
743
+ 2016, http://www.deeplearningbook.org.
744
+ [19] H. Neji, J. Nogueras-Iso, J. Lacasta, M. Ben Halima, and A. M. Alimi,
745
+ “Adversarial autoencoders for denoising digitized historical documents:
746
+ The use case of incunabula,” in 2019 International Conference on
747
+ Document Analysis and Recognition Workshops (ICDARW), vol. 6, 2019,
748
+ pp. 31–34.
749
+ [20] E. Farhi and H. Neven, “Classification with quantum neural networks
750
+ on near term processors,” arXiv: Quantum Physics, 2018.
751
+ [21] A. Mari, T. R. Bromley, J. Izaac, M. Schuld, and N. Killoran, “Transfer
752
+ learning in hybrid classical-quantum neural networks,” Quantum, vol. 4,
753
+ p. 340, Oct. 2020.
754
+ [22] M. Schuld and F. Petruccione, Machine Learning with Quantum
755
+ Computers,
756
+ ser.
757
+ Quantum
758
+ Science
759
+ and
760
+ Technology.
761
+ Springer
762
+ International
763
+ Publishing,
764
+ 2021.
765
+ https://books.google.hu/books?id=-
766
+ N5IEAAAQBAJ
767
+ [23] M. Schuld, A. Bocharov, K. M. Svore, and N. Wiebe, “Circuit-centric
768
+ quantum classifiers,” Phys. Rev. A, vol. 101, p. 032308, Mar 2020.
769
+ https://link.aps.org/doi/10.1103/PhysRevA.101.032308
770
+ [24] A. P’erez-Salinas, A. Cervera-Lierta, E. Gil-Fuster, and J. I. Latorre,
771
+ “Data re-uploading for a universal quantum classifier,” Quantum, vol. 4,
772
+ p. 226, 2020.
773
+ [25] M.
774
+ Schuld,
775
+ R.
776
+ Sweke,
777
+ and
778
+ J.
779
+ J.
780
+ Meyer,
781
+ “Effect
782
+ of
783
+ data
784
+ encoding on the expressive power of variational quantum-machine-
785
+ learning models,” Phys. Rev. A, vol. 103, p. 032430, Mar 2021.
786
+ https://link.aps.org/doi/10.1103/PhysRevA.103.032430
787
+ [26] D. P. Kingma and J. Ba, “Adam: A method for stochastic optimization,”
788
+ arXiv preprint arXiv:1412.6980, 2014.
789
+ [27] V. Bergholm, J. A. Izaac, M. Schuld, C. Gogolin, and N. Killoran,
790
+ “Pennylane: Automatic differentiation of hybrid quantum-classical com-
791
+ putations,” ArXiv, vol. abs/1811.04968, 2018.
792
+ [28] M.
793
+ Abadi,
794
+ A.
795
+ Agarwal,
796
+ P.
797
+ Barham,
798
+ E.
799
+ Brevdo,
800
+ Z.
801
+ Chen,
802
+ C.
803
+ Citro
804
+ et
805
+ al.,
806
+ “TensorFlow:
807
+ Large-scale
808
+ machine
809
+ learning
810
+ on
811
+ heterogeneous systems,” 2015, software available from tensorflow.org.
812
+ https://www.tensorflow.org/
813
+ [29] T. O’Malley, E. Bursztein, J. Long, F. Chollet, H. Jin, L. Invernizzi et al.,
814
+ “Kerastuner,” https://github.com/keras-team/keras-tuner, 2019.
815
+ [30] M. S. ANIS, Abby-Mitchell, H. Abraham, AduOffei, R. Agarwal,
816
+ G. Agliardi et al., “Qiskit: An open-source framework for quantum
817
+ computing,” 2021.
818
+ [31] “IBM quantum,” 2021. https://quantum-computing.ibm.com/
819
+
29E0T4oBgHgl3EQfuwF7/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
49E0T4oBgHgl3EQfvgE1/content/tmp_files/2301.02618v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
49E0T4oBgHgl3EQfvgE1/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
4NA0T4oBgHgl3EQfNf9p/content/2301.02147v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:349964e82d781082b5293f194210ee6f951418a9fd20c09f16001c5d05c4cae9
3
+ size 1341103
4NA0T4oBgHgl3EQfNf9p/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f34bf162e6429fc49dd89605539f8a087a14047f2ea83b3daf7e00dcf708917
3
+ size 2097197
4NA0T4oBgHgl3EQfNf9p/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b385b0ba39ecfd51fb2900cfceb193ae312f1bc3a1b74c2e6d04db0d5137e87
3
+ size 78902
59E0T4oBgHgl3EQfewCK/content/tmp_files/2301.02395v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
59E0T4oBgHgl3EQfewCK/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
7dE1T4oBgHgl3EQf7QV5/content/2301.03532v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4baf42cd6e2309668019e78180d7628c615bba1de31acb32806d7d8a83ed1001
3
+ size 646838
7dE1T4oBgHgl3EQf7QV5/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c09ba1fafa3ba4609cec00a127c149fb7b72b8d9d54cf73923b1e905d1ebff4
3
+ size 129713
8dFQT4oBgHgl3EQf4jbF/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21ef11127050e390fd09148ebe3dee04d644d87ce7f765ac5e8acb2c47be7792
3
+ size 133383
99AyT4oBgHgl3EQfRPYW/content/tmp_files/2301.00060v1.pdf.txt ADDED
@@ -0,0 +1,1119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MORPHOLOGY-BASED NON-RIGID REGISTRATION OF
2
+ CORONARY COMPUTED TOMOGRAPHY AND INTRAVASCULAR
3
+ IMAGES THROUGH VIRTUAL CATHETER PATH OPTIMIZATION
4
+ Karim Kadry∗
5
+ Institute of Medical Engineering and Science
6
+ Massachusetts Institute of Technology
7
+ Cambridge, MA 02139
8
9
+ Abhishek Karmakar
10
+ Meinig School of Biomedical Engineering
11
+ Cornell University
12
+ Ithaca, NY 14850
13
14
+ Andreas Schuh
15
+ Biomedical Image Analysis Group
16
+ Imperial College London
17
+ HeartFlow, Inc., USA
18
+ London, UK
19
20
+ Kersten Peterson
21
+ HeartFlow, Inc., USA
22
+ Redwood City, CA, 94063, USA
23
24
+ Michiel Schaap
25
+ HeartFlow, Inc., USA
26
+ Redwood City, CA, 94063, USA
27
28
+ David Marlevi
29
+ Department of Molecular Medicine and Surgery
30
+ Karolinska Institute
31
+ Stockholm, Sweden
32
33
+ Charles Taylor
34
+ Department of Electrical Engineering
35
+ HeartFlow, Inc., USA
36
+ Redwood City, CA, 94063, USA
37
38
+ Elazer Edelman
39
+ Institute of Medical Engineering and Science
40
+ Massachusetts Institute of Technology
41
+ Cambridge, MA 02139
42
43
+ Farhad Nezami
44
+ Department of Surgery
45
+ Brigham and Women’s Hospital Harvard Medical School
46
+ Boston, MA 02115
47
48
+ ABSTRACT
49
+ Coronary Computed Tomography Angiography (CCTA) provides information on the presence, extent,
50
+ and severity of obstructive coronary artery disease. Large-scale clinical studies analyzing CCTA-
51
+ derived metrics typically require ground-truth validation in the form of high-fidelity 3D intravascular
52
+ imaging. However, manual rigid alignment of intravascular images to corresponding CCTA images is
53
+ both time consuming and user-dependent. Moreover, intravascular modalities suffer from several
54
+ non-rigid motion-induced distortions arising from distortions in the imaging catheter path. To address
55
+ these issues, we here present a semi-automatic segmentation-based framework for both rigid and
56
+ non-rigid matching of intravascular images to CCTA images. We formulate the problem in terms
57
+ of finding the optimal virtual catheter path that samples the CCTA data to recapitulate the coronary
58
+ artery morphology found in the intravascular image. We validate our co-registration framework on a
59
+ cohort of n = 40 patients using bifurcation landmarks as ground truth for longitudinal and rotational
60
+ registration. Our results indicate that our non-rigid registration significantly outperforms other co-
61
+ registration approaches for luminal bifurcation alignment in both longitudinal (mean mismatch: 3.3
62
+ frames) and rotational directions (mean mismatch: 28.6 degrees). By providing a differentiable
63
+ framework for automatic multi-modal intravascular data fusion, our developed co-registration modules
64
+ ∗Corresponding Author
65
+ arXiv:2301.00060v1 [cs.CV] 30 Dec 2022
66
+
67
+ arXiv Template
68
+ A PREPRINT
69
+ significantly reduces the manual effort required to conduct large-scale multi-modal clinical studies
70
+ while also providing a solid foundation for the development of machine learning-based co-registration
71
+ approaches.
72
+ 1
73
+ Introduction
74
+ Coronary computed tomography angiography (CCTA) is a three dimensional image modality that provides information
75
+ on the presence, extent and severity of obstructive coronary artery disease (CAD) (Tzimas et al. [2022]). As such, CCTA
76
+ allows for the detection of stenotic atherosclerotic sections and assists clinicians in diagnosing CAD and planning
77
+ treatment. CCTA Images can also be used to create computational models of coronary blood flow, allowing for
78
+ the non-invasive estimation of fractional flow reserve (FFR-CT); a key diagnostic parameter in assessing functional
79
+ impairment (Uzu et al. [2019]).
80
+ Albeit widespread in use, CCTA provides primary information on luminal anatomy, with limited capacity in assessing
81
+ soft-tissue intraplaque tissue components. CCTA also suffers from blooming artifacts in the presence of highly
82
+ attenuating calcium deposits (Kim et al. [2015], Budoff et al. [2008]), which, combined with comparably low image
83
+ resolution, creates difficulties in resolving highly calcified arteries. Multiple studies have also been conducted to
84
+ quantify the degree to which CCTA can accurately assess CAD-related diagnostic metrics such as luminal area (Uzu
85
+ et al. [2019]), calcium morphology (Takahashi et al. [2021]), and plaque burden (Fischer et al. [2013], De Graaf et al.
86
+ [2013], Brodoefel et al. [2009]). The majority of such studies (Takahashi et al. [2021], Fischer et al. [2013], Uzu et al.
87
+ [2019], Brodoefel et al. [2009]) validate the performance of CCTA by manually co-registering image slices taken along
88
+ the CCTA artery to intravascular imaging modalities such as intravascular ultrasound (IVUS) and optical coherence
89
+ tomography (OCT); both providing higher-fidelity visualization of the lumen and surrounding tissue. There is also an
90
+ increasing interest in validating CCTA-derived segmentation algorithms against co-registered intravascular imaging
91
+ frames, again necessitating such multimodal image assessment (Lin et al. [2021], van Assen et al. [2019]).
92
+ Manual co-registeration of CCTA and intravascular images is, however, a challenging and time consuming task.
93
+ Typically, cross-sectional frames of the artery of interest are extracted from the CCTA images which then have to be
94
+ matched with corresponding frames from an intravascular acquisition through an imaging catheter pullback procedure.
95
+ Rigid registration in the longitudinal and rotational directions is usually achieved by matching single landmarks in both
96
+ modalities, such as a large bifurcation (Takahashi et al. [2021]). However, the beating of the heart, the irregular motion
97
+ of the imaging catheter, and the rotation of the catheter about its own axis create non-rigid distortions that accumulate
98
+ along the length of the pullback (Tsiknakis et al. [2021]). Manually correcting for such artifacts is prohibitively
99
+ time-consuming, requiring a cardiologist to manually mark fiduciary points in both images and shift images such that
100
+ the annotated points sufficiently align (Carlier et al. [2014], Tu et al. [2011], Hebsgaard et al. [2015]). Although such
101
+ techniques are accurate up to rigid translation, they require time investment from a trained expert to find matching
102
+ features in both modalities, creating a need for computational algorithms that non-rigidly register CCTA images to
103
+ corresponding intravascular data in an automatic fashion.
104
+ Automatic co-registration techniques typically consist of discretely optimizing a constructed cost function over a set of
105
+ longitudinal or rotational image shifts, where the cost function varies depending on the modalities being registered.
106
+ Some proposed cost functions include metrics such as lumen diameters (Qin et al. [2021]), lumen contours (Molony
107
+ et al. [2016], Karmakar et al. [2020]), calcium thickness (Gharaibeh et al. [2020], Molony et al. [2016]), and image
108
+ pixel intensities (Tsiknakis et al. [2021]). Similarly, rigid rotational registration for intravascular pullbacks has also
109
+ been based on extracted features such as luminal contours (Karmakar et al. [2020]), and calcium angle (Molony et al.
110
+ [2016]). However, the registration accuracy of all rigid registration methods is compromised by inconsistent motor
111
+ pullback speeds and rotational drift, which introduce non-rigid longitudinal and rotational distortions that misalign
112
+ image features such as diseased plaque and bifurcations.
113
+ To compensate for the longitudinal, rotational, and transverse motion of the catheter, several non-rigid registration
114
+ approaches have been proposed, typically to be employed after initial rigid alignment. Currently, non-rigid registration
115
+ of multiple intravascular imaging datasets has been predominantly performed through Dynamic Time Warping (DTW)
116
+ and Dynamic Programming (DP) (Tsiknakis et al. [2021], Molony et al. [2016]). However, DTW introduces non-
117
+ physiological assumptions into the registration process by discretely skipping or repeating intravascular frames, assumed
118
+ to be evenly spaced along the longitudinal direction. As a result, DTW is not well suited for use for intravascular images,
119
+ with pullback acquisitions sometimes rendering up to 10 repeated intravascular imaging frames at a time (Molony et al.
120
+ [2016]). On the contrary, continuous non-rigid registration methods have been developed to model the longitudinal
121
+ stretch and rotational drift between intravascular imaging frames using affine transforms and spline interpolation (Zhang
122
+ et al. [2014], Uzu et al. [2019]). While such continuous non-rigid methods are more realistic, they extensively rely on
123
+ manual annotations of all bifurcation zones for image registration, severely limiting their scalability. As such, there is
124
+ 2
125
+
126
+ arXiv Template
127
+ A PREPRINT
128
+ no continuous non-rigid registration method as of yet that does not explicitly require fiduciary landmarks for rotational
129
+ and longitudinal alignment. Further, there has been an increasing interest in machine learning approaches to image
130
+ co-registration in which a neural network is trained to predict a spatial transform that maps a moving image onto a
131
+ static target image (Balakrishnan et al. [2019], Fu et al. [2020]). Such approaches critically rely on a differentiable and
132
+ continuous spatial transform allowing for back-propagation of gradients to adjust the neural network weights (Jaderberg
133
+ et al. [2015]). While such continuous and differential spatial transforms are available for co-registration of 3D and 2D
134
+ medical images, a similar framework that accounts for the unique variation in intravascular catheter motion has not
135
+ been developed.
136
+ Given the previous limitations noted in prior co-registration algorithms, we here propose a novel semi-automatic
137
+ framework that takes as input an intravascular imaging pullback and a CCTA 3D image and aligns each intravascular
138
+ image frame along the artery to the equivalent frame in the CCTA image. The proposed continuous registration
139
+ methodology does not require manual matching of landmarks, with the only manual effort being the selection of viable
140
+ intravascular imaging frames and the provision of a rough centerline within the CT image. Specifically, we explore the
141
+ problem of reconstructing the path of a virtual catheter moving through and sampling from a 3D CCTA image such that
142
+ the set of frames produced by the motion of the catheter optimally reflect the equivalent target intravascular pullback.
143
+ Key contributions of this framework include:
144
+ • We present the first continuous co-registration framework for rigid and non-rigid matching of CCTA images
145
+ and intravascular images up to pixelwise alignment, with segmentations of the lumen and vessel wall as sole
146
+ input.
147
+ • We introduce a rigid registration approach that consists of our published longitudinal rigid registration
148
+ algorithm, which uses lumen area in a multi-step decision process, and a rotational registration step that
149
+ leverages the segmentation of the vessel wall to produce an initial rotational configuration for subsequent
150
+ registration.
151
+ • We introduce a novel non-rigid registration step, based only on the lumen segmentation, which is robust to
152
+ physiological catheter motions. The registration is formulated in terms of finding the path of a virtual catheter,
153
+ which translates the CCTA image into an intravascular-like image by sampling the segmentation along the
154
+ virtual catheter path. The virtual catheter path is reconstructed by spatially deforming the CCTA centerline by
155
+ B-spline deformations formulated in the longitudinal, rotational, and transverse directions, ensuring a smooth
156
+ and physiological reconstruction of catheter motion.
157
+ • Our non-rigid registration module being both continuous and differentiable, allows for easy integration into
158
+ future machine-learning-based approaches for intravascular image registration.
159
+ • We validate in a direct clinical setting, evaluating performance across a multimodal cohort of cardiac
160
+ patients(n = 40) and benchmarking performance against previously developed state-of-the-art approaches.
161
+ 2
162
+ Methodology
163
+ An overview of the co-registration pipeline is detailed in Figure 1. In brief, bi-modality images are processed to produce
164
+ binary segmentations of the lumen and vessel wall (section 2.1.1), which are first used in a rigid registration step,
165
+ involving both longitudinal and rotational alignment (section 2.1.2). The rigid registration is then used as an initial
166
+ estimate of a virtual catheter path forming the basis for a non-rigid registration (section 2.1.3). The virtual catheter
167
+ path initially samples the geometry of the CT lumen to produce a virtual imaging pullback that is then compared to a
168
+ Signed Distance Field (SDF) derived from the intravascular equivalent. A non-rigid transformation for the longitudinal,
169
+ rotational, and transverse motion distortions is applied on the virtual catheter path and optimized to align the SDF’s in
170
+ both modalities. The performance of our proposed co-registration algorithm is then validated on a clinical cohort of
171
+ relevant cardiac patients (section 2.2.2)
172
+ 2.1
173
+ Co-registration framework
174
+ 2.1.1
175
+ Preprocessing
176
+ As the basis for our co-registration pipeline, luminal segmentations from the two different image modalities are provided.
177
+ Starting with the intravascular image set, luminal frame-by-frame segmentations are used to produce an SDF using
178
+ a fast marching method (Treister and Haber [2016]), clamped to only have negative values (indicating that a pixel is
179
+ inside the lumen). Further, the SDF is smoothed in the axial direction with a Gaussian convolutional kernel of size 3
180
+ and standard deviation 0.1 in order to regularize the optimization process.
181
+ 3
182
+
183
+ arXiv Template
184
+ A PREPRINT
185
+ OCT image
186
+ CT image
187
+ Rigid registration
188
+ Non-rigid registration
189
+ 0
190
+ 20
191
+ 40
192
+ 60
193
+ 80
194
+ 100
195
+ 120
196
+ 140
197
+ 160
198
+ Frame number
199
+ 2
200
+ 4
201
+ 6
202
+ 8
203
+ 10
204
+ 12
205
+ 14
206
+ 16
207
+ Area (mm^2)
208
+ CT rigid
209
+ OCT
210
+ OCT
211
+ CT
212
+ Aligned frames
213
+ 0
214
+ 20
215
+ 40
216
+ 60
217
+ 80
218
+ 100
219
+ 120
220
+ 140
221
+ 160
222
+ Frame number
223
+ 2
224
+ 4
225
+ 6
226
+ 8
227
+ 10
228
+ 12
229
+ 14
230
+ 16
231
+ Area (mm^2)
232
+ CT non-rigid
233
+ OCT
234
+ Figure 1: Overview of the proposed registration pipeline. The imaging modalities are rigidly co-registered in the
235
+ longitudinal and rotational directions, serving as the basis for the initialization of the virtual pullback trajectory. The
236
+ virtual pullback trajectory is then used to sample a CT lumen signed distance field (SDF), used in direct comparison to
237
+ the equivalent OCT SDF.
238
+ Arc angle (degrees)
239
+ Rigid longitudinal registration
240
+ OCT image
241
+ Lumen
242
+ CT image
243
+ Rigid rotational registration
244
+ Vessel thickness (pixels)
245
+ 0
246
+ 50
247
+ 100
248
+ 150
249
+ 200
250
+ 250
251
+ 300
252
+ 350
253
+ 1
254
+ 2
255
+ 3
256
+ 4
257
+ 5
258
+ 6
259
+ 7
260
+ Lumen
261
+ Vessel
262
+ Vessel
263
+ 0
264
+ 20
265
+ 40
266
+ 60
267
+ 80
268
+ 100
269
+ 120
270
+ 140
271
+ 160
272
+ Frame number
273
+ 2
274
+ 4
275
+ 6
276
+ 8
277
+ 10
278
+ 12
279
+ 14
280
+ 16
281
+ Area (mm^2)
282
+ CT rigid
283
+ OCT
284
+ CT rigid
285
+ OCT
286
+ Figure 2: Overview of the proposed rigid registration pipeline. The lumen segmentation area vectors from both
287
+ modalities are used to rigidly register the modalities in the longitudinal direction using a sliding window approach. The
288
+ longitudinal registration is then used to match each equivalent frame for the rotational registration. The vessel wall
289
+ segmentations are then converted to vessel thickness-arc angle plots and are used to determine an optimal rigid rotation.
290
+ Coupled to the intravascular image set, a corresponding 3D SDF from the CCTA images is generated. Although several
291
+ methods could potentially be applied for such, a convenient approach is to derive the SDF from a computational mesh
292
+ of the coronary tree. Herein, to create an SDF a narrow band is defined within the object mesh boundary, subsequently
293
+ used to compute exact Euclidean distances from each voxel center to the boundary. Outside the object boundary,
294
+ the distance field values are then set to zero. Corresponding binary segmentations can then be produced by simple
295
+ thresholding operations. Using these computational meshes, vessel centerlines are obtained using VMTK (Antiga et al.
296
+ [2008]), generating an array ¯r representing n spatial positions with an axial spacing of 0.2mm. A spatial derivative
297
+ is then applied to the centerline points ¯r, defining a tangent vector T for each point. The two vectors U and V that
298
+ are orthogonal to the tangent vector can then be obtained through the parallel transport method (Guo et al. [2013]),
299
+ ensuring that the vectors V and U remain stable between frames placed along the axial direction. The centerline points
300
+ and the orthogonal vectors hence define a set of frames(¯r,T,U,and V) in 3D space that are used to sample the CCTA
301
+ SDF along an equivalent virtual catheter pullback, with dimensions equalling the intravascular dataset (in our case:
302
+ 96x96xNframes with an in-plane resolution of 80 micrometers), all using a curved-planar reformation procedure
303
+ (Kanitsar et al. [2002]). The resulting SDF is then smoothed in the axial direction with a Gaussian convolutional
304
+ kernel of size 3 and standard deviation of 0.1. Through this method, virtual pullbacks of both the lumen and vessel
305
+ segmentations were produced.
306
+ 4
307
+
308
+ 0
309
+ 20
310
+ 40
311
+ 60
312
+ 80
313
+ 0
314
+ 20
315
+ 40
316
+ 60
317
+ 800
318
+ 20
319
+ 40
320
+ 60
321
+ 80
322
+ 0
323
+ 20
324
+ 40
325
+ 60
326
+ 800
327
+ 20
328
+ 40
329
+ 60
330
+ 80
331
+ 0
332
+ 20
333
+ 40
334
+ 60
335
+ 800
336
+ 20
337
+ 40
338
+ 60
339
+ 80
340
+ 0
341
+ 20
342
+ 40
343
+ 60
344
+ 80arXiv Template
345
+ A PREPRINT
346
+ 2.1.2
347
+ Rigid registration
348
+ An overview of the rigid registration step can be seen in Figure 2. For the rigid longitudinal registration, the processed
349
+ lumen segmentations are used to create an area vector of equal lengths, sampling the CT virtual pullback to correspond
350
+ to the acquired intravascualr set. Here, We leverage our previous work to rigidly align the pullbacks using a multi-step
351
+ sliding window method, minimizing the difference in area vectors (for details see (Karmakar et al. [2020])). Before
352
+ registration, continuous segments of the OCT pullback with poor lumen segmentations due to residual blood or catheter
353
+ housing were manually excluded.
354
+ For rigid rotational registration, the luminal profiles were deemed unreliable for producing good alignment. Therefore,
355
+ the vessel border segmentations were instead used for rotationally aligning the pullbacks. For each CT and intravascular
356
+ frame, respectively, a thickness-arc angle vector is extracted by tracing a set of radial rays from the centroid of the
357
+ vessel segmentation in increments of 12 degrees. The thickness vectors are then matched according to the result of the
358
+ longitudinal registration, with non-overlapping frames subsequently cropped. The optimal rigid rotation angle is then
359
+ obtained by sliding the set of CT thickness vectors over each equivalent intravascular image vector and minimizing the
360
+ mean squared error across all frames.
361
+ 2.1.3
362
+ Non-rigid registration
363
+ The non-rigid registration process (Figure 3) consists of optimizing a set of frame variables (¯r, T, U, and V) representing
364
+ a virtual catheter path moving through the CCTA image. The loss function to be optimized is defined as the mean squared
365
+ error between the 3D SDF generated from the two image sets, with the CCTA-SDF sampled along the aforementioned
366
+ virtual catheter path. The virtual pullback is initialized as the centerline that was calculated from the CCTA 3D model
367
+ and longitudinally cropped and rotated according to the output of the rigid registration. After rigid registration a spline
368
+ is defined based on the centerline points ¯r where the centerline points are fully described by their arclength values ¯s
369
+ along the spline. Accordingly, every i-th frame can be manipulated by 4 variables, representing the arclength along
370
+ the virtual catheter path si, the rotation angle of the frame θi about the catheter path T, and the in-plane transverse
371
+ displacements du and dv along the frame vectors U and V respectively (see Figure 3).
372
+ To regularize the motion of the virtual catheter to be smooth and physiological, the 4 frame manipulation variable
373
+ sets are parametrized by a sparse set of control points controlling a B-spline deformation (Rueckert et al. [1999])
374
+ independently acting on 4 nx1 vectors representing the frame manipulation variables ¯s, ¯θ, ¯du, and ¯dv. Thus, for a 1D
375
+ control point grid of size N, the relation between a frame manipulation variable v and the control points p can be
376
+ described by:
377
+ v(s) =
378
+ N
379
+
380
+ i=0
381
+ Bi(s)pi,
382
+ (1)
383
+ where Bi(s) is a polynomial basis function of order 2. In matrix form, the same can be represented by:
384
+ V = BP,
385
+ (2)
386
+ in which V ∈ Rn×1, B ∈ Rn×N, P ∈ RN×1 where n is the number of frames and N is the number of control points. B
387
+ is the univariate B-spline tensor and can be pre-computed from the initial frame manipulation variable vectors, while P
388
+ is the deformed control point grid vector that is optimized during co-registration.
389
+ Instead of directly optimizing for the set of Ns = 30 control points P s controlling the arclength variables s for each
390
+ frame, the control point deformations ∆P s
391
+ i can be parametrized by a deformation vector Xs of size Ns − 1. dictating
392
+ the relative displacement of each control point from its proximal neighbor, with the most proximal control point being
393
+ fixed. This is done to account for the cumulative effect of catheter motor speed variations on the rest of the pullback.
394
+ Therefore, the deformation of each control point can be defined as the cumulative sum of the relative deformations
395
+ along the proximal control points. Moreover, to regularize the catheter motion and prevent backwards movement, the
396
+ relative deformation of each control point is limited to a fraction (0.35) of the distance between control points.
397
+ ∆P s
398
+ i = Xs
399
+ i +
400
+ i−1
401
+
402
+ j=0
403
+ Xs
404
+ j
405
+ (3)
406
+ Once the control points are deformed into a new configuration, the new arclength values for each frame ¯s is calculated
407
+ through equation 2 and the frame vectors (T,U, and V) are then recalculated.
408
+ ¯s = BsP s
409
+ (4)
410
+ 5
411
+
412
+ arXiv Template
413
+ A PREPRINT
414
+ Rigid initialization
415
+ Non-rigid transform
416
+ Rotational transform
417
+ Longitudinal transform
418
+ Transverse transform
419
+ 0
420
+ 20
421
+ 40
422
+ 60
423
+ 80
424
+ 100
425
+ Frame number
426
+ 0
427
+ 5
428
+ 10
429
+ 15
430
+ 20
431
+ 25
432
+ 30
433
+ 35
434
+ 40
435
+ Arclength (mm)
436
+ Rigid
437
+ Non Rigid
438
+ 0
439
+ 20
440
+ 40
441
+ 60
442
+ 80
443
+ 100
444
+ Frame number
445
+ 60
446
+ 50
447
+ 40
448
+ 30
449
+ 20
450
+ 10
451
+ Theta (degrees)
452
+ Rigid
453
+ Non Rigid
454
+ 0
455
+ 20
456
+ 40
457
+ 60
458
+ 80
459
+ 100
460
+ Frame number
461
+ 1.5
462
+ 1.0
463
+ 0.5
464
+ 0.0
465
+ 0.5
466
+ 1.0
467
+ 1.5
468
+ Displacement (mm)
469
+ U-displacement
470
+ V-displacement
471
+ OCT: Target
472
+ Loss
473
+ CT: Moving
474
+ Figure 3: Overview of the spatial deformation acting on the virtual catheter path. The longitudinal transform stretches
475
+ and compresses the space between adjacent frames, at which point the frame vectors (T,U, and V) are recalculated. The
476
+ rotational transform is then applied to the frame vectors orthogonal to the tangent (U and V) about T, and the transverse
477
+ transform is then applied to shift the centerline points in the direction of the new frame vectors (U and V).
478
+ 2.1.4
479
+ Non-rigid rotational registration
480
+ Similar to the longitudinal registration, the set of Nθ = 20 control points P θ controlling the rotation of each frame
481
+ about the catheter axis can be parameterized by a relative rotation vector Xθ of size Nθ. The rotation value for each
482
+ control points is defined by:
483
+ ∆P θ
484
+ i = Xθ
485
+ i +
486
+ i−1
487
+
488
+ j=0
489
+
490
+ j
491
+ (5)
492
+ The rotation correction for each frame is applied after the non-rigid longitudinal transformation but before the non-rigid
493
+ transverse transformation. Once the control points are deformed into a new configuration, the new rotation values for
494
+ each frame ¯θ can be calculated through equation 2 and used to rotate frame vectors U and V about the tangent vectors T.
495
+ ¯θ = BθP θ
496
+ (6)
497
+ 2.1.5
498
+ Non-rigid transverse registration
499
+ The virtual catheter was biased to stay close to the centerline by optimizing the Nd = 60 control points determining the
500
+ in-plane transverse displacements du and dv directly. Thus the 2 orthogonal transverse displacements for each frame
501
+ was calculated from the matrix relation:
502
+ ¯d = BdP d
503
+ (7)
504
+ Where for each frame the displacements along the vectors U and V were applied as a final step after the non-rigid
505
+ longitudinal and rotational transforms.
506
+ 2.2
507
+ Performance evaluation
508
+ 2.2.1
509
+ Image data
510
+ To evalute our proposed co-registration framework, a dataset consisting of n = 40 matched OCT and CT image pairs
511
+ from 5 different clinical centers were selected, all originating from the Precise Percutaneous Coronary Intervention Plan
512
+ (P3) study (Nagumo et al. [2021]). As each OCT pullback image consisted of 375 frames, the intravascular imaging
513
+ dataset comprised of approximately 15,000 image frames. The OCT lumen in every frame was manually annotated by
514
+ 6
515
+
516
+ arXiv Template
517
+ A PREPRINT
518
+ trained cardiologists. Further, the vessel wall borders in every OCT frame were segmented using a convolutional neural
519
+ network, using the previously published U-net as base architecture (Ronneberger et al. [2015]). Details of the network,
520
+ training, and validation can be found in Supplementary Material A. The lumen and vessel wall segmentations were then
521
+ re-sampled to represent a 3D image of dimensions 96x96xNframes with an in-frame resolution of 80 micrometers
522
+ and an out-of-frame resolution of 0.4 mm. All utilized intravascular pullbacks were manually deemed as of sufficient
523
+ image quality, with appropriate quality lumen segmentations. For the CCTA data, a 3D model of the coronary tree for
524
+ each patient was produced by HeartFlow using the CCTA image (Sonck et al. [2022]). The 3D model was then used to
525
+ produce a 3D SDF with a resolution of 0.25mm along each axis with an image dimension of 768x768x482.
526
+ 2.2.2
527
+ Co-registration accuracy
528
+ In order to evaluate the performance of the non-rigid registration, 114 bifurcations were manually marked in the OCT
529
+ pullback as well as in the rigid and non-rigid virtual pullback segmentations generated from the CCTA data. Bifurcations
530
+ were defined as the last image frame before a visual coronary artery split into two branches could be seen. Bifurcations
531
+ that were common to both modalities had their frame numbers recorded for validation of the non-rigid registration
532
+ algorithm. Longitudinal validation was conducted by comparing the frame number of a bifurcation in the OCT data
533
+ with the equivalent bifurcation frame number in the virtual pullback before and after non-rigid registration. In order to
534
+ validate the non-rigid rotational registration, the bifurcation angle difference between OCT pullback and the virtual
535
+ pullback was compared before and after rotational registration. As the bifurcation angle between bifurcation sections
536
+ that were not longitudinally matched is expected to be uncorrelated, a separate analysis was conducted to characterize
537
+ how angular mismatch varies when the bifurcations are longitudinally matched. Furthermore, only bifurcations that had
538
+ a frame mismatch below 6 frames were considered for extensive analysis of rotational accuracy.
539
+ 2.2.3
540
+ Comparison to alternative approaches
541
+ The most common co-registration methodology employed for coronary artery registration has been discrete optimization
542
+ approaches such as DTW and Dynamic Programming. Therefore, in order to evaluate the performance of our
543
+ longitudinal and rotational co-registration framework against state-of-the-art discrete approaches, we applied the
544
+ methodology described in Karmakar et. al (Karmakar et al. [2022]) on the same dataset used in this study. The approach
545
+ utilizes DTW to longitudinally align two coronary imaging modalities and Dynamic Programming to rotationally align
546
+ each frame. We utilized a window length of 4 (0.8mm) as implemented in the previous study and recorded identical
547
+ alignment metrics for 114 matched bifurcations in the dataset. The non-rigid registration algorithm was applied after
548
+ the rigid longitudinal registration step described in section 2.1.2. A substudy was also conducted in which the angular
549
+ alignment of all bifurcations was compared to the angular alignment of longitudinally matched bifurcations.
550
+ 2.2.4
551
+ Optimization details
552
+ The gradient descent-based optimization procedure was implemented in PyTorch with the Adam optimizer (Kingma
553
+ and Ba [2014]). A learning rate of 0.001 was used for the non-rigid longitudinal parameters and a rate of 0.01 was
554
+ used for both the rotational and transverse parameters. Each co-registration procedure was run for a minimum of 200
555
+ iterations to ensure convergence.
556
+ 3
557
+ Results
558
+ 3.1
559
+ Longitudinal Registration
560
+ Longitudinal registration plots in Figures 4 and 7A1-2 show that using rigid registration alone (Figure 7A1), few
561
+ bifurcations were longitudinally aligned within 6 (dotted line), 4 (dashed line), or 2 (solid line) frame distances.
562
+ However, after non-rigid alignment (Figure 7A2), distinct improvement can be observed with a majority of bifurcations
563
+ are aligned within 6 frames. These results are visualized by the longitudinal mismatch plot (Figure 5A), revealing
564
+ that after rigid alignment, the percentage of bifurcations matched within 2, 4, and 6 frames are 26.3, 42.1, and 57.9%,
565
+ respectively, while after non-rigid alignment, these values increase to 60.5, 78.9, and 86.8%. Moreover, the scatterplot
566
+ for non-rigid registration (A2) demonstrates that the majority of bifurcations (86% shown in green) were enhanced
567
+ in terms of frame alignment, while a negligible number of bifurcations had slightly (11.4 % shown in orange) or
568
+ significantly (2.6 % shown in red) worse alignment after non-rigid registration. Table 2 further demonstrates the effect
569
+ of non-rigid registration, in which the mean frame difference after rigid registration was 7.9 frames and subsequently
570
+ decreased to 3.3 frames after non-rigid registration.
571
+ 7
572
+
573
+ arXiv Template
574
+ A PREPRINT
575
+ A1
576
+ B1
577
+ C1
578
+ D1
579
+ E1
580
+ F1
581
+ G1
582
+ A2
583
+ B2
584
+ C2
585
+ D2
586
+ E2
587
+ F2
588
+ G2
589
+ A3
590
+ B3
591
+ C3
592
+ D3
593
+ E3
594
+ F3
595
+ G3
596
+ Bifurcation Frames
597
+ 0
598
+ 20
599
+ 40
600
+ 60
601
+ 80
602
+ 100
603
+ 120
604
+ 140
605
+ 160
606
+ Frame number
607
+ 0
608
+ 2
609
+ 4
610
+ 6
611
+ 8
612
+ 10
613
+ 12
614
+ 14
615
+ 16
616
+ 18
617
+ Area (mm^2)
618
+ CT
619
+ OCT
620
+ A
621
+ B
622
+ C D
623
+ E F
624
+ G
625
+ Figure 4: Qualitative results for a single co registered case. Top row shows area plot along the artery for the non-rigidly
626
+ registered CT (green) and the OCT images. The bifurcation zones (Sections A-G) are marked and labeled for further
627
+ analysis. Bifurcation frames from the CT, OCT, and overlapped segmentation maps are presented in the bottom row for
628
+ qualitative analysis of the rotational and transverse co-registration.
629
+ A
630
+ B
631
+ 0
632
+ 5
633
+ 10
634
+ 15
635
+ 20
636
+ 25
637
+ 30
638
+ 35
639
+ 40
640
+ Maximum frame mismatch
641
+ 0.0
642
+ 0.2
643
+ 0.4
644
+ 0.6
645
+ 0.8
646
+ 1.0
647
+ Matched bifucations (%)
648
+ Rigid
649
+ Rigid+Non-rigid
650
+ 0
651
+ 20
652
+ 40
653
+ 60
654
+ 80
655
+ 100
656
+ 120
657
+ 140
658
+ 160
659
+ 180
660
+ Maximum angular mismatch (degrees)
661
+ 0.0
662
+ 0.2
663
+ 0.4
664
+ 0.6
665
+ 0.8
666
+ 1.0
667
+ Matched bifucations (%)
668
+ Rigid
669
+ Rigid+Non-rigid
670
+ Figure 5: Quantitative results comparing the quality of
671
+ rigid and non-rigid co registration in longitudinal and ro-
672
+ tational directions with varying degrees of misalignment.
673
+ The mismatch plots exhibit the % of matched bifurca-
674
+ tions with increasing longitudinal (A) and rotational (B)
675
+ alignment mismatch criteria (x-axis).
676
+ ~10 degrees
677
+ ~20 degrees
678
+ ~30 degrees
679
+ Angular Mismatch
680
+ Figure 6: Grid plot showing multiple aligned bifurcation
681
+ segmentations using an SDF-based loss. Angular mis-
682
+ matches up to 10, 20, and 30 degrees are shown in the
683
+ first, second, and third columns respectively.
684
+ 3.2
685
+ Rotational Registration
686
+ Examination of the individual bifurcating frames in figure 4 for the CT (row 1) and OCT (row 2) frames indicates
687
+ excellent rotational and transverse alignment between both imaging modalities as evident from the raw images and the
688
+ overlapped segmentations (row 3). Rotational registration plots in figure 7B1-2 demonstrate that few bifurcations are
689
+ rotationally aligned within 30 (dotted line), 20 (dashed line), or 10 (solid line) degrees after rigid alignment (B1). After
690
+ non-rigid alignment (Figure 7B2), a majority of bifurcations were aligned within 30 degrees, with a significant amount
691
+ aligned within 20 and 10 degrees. Examination of the rotational mismatch plot (Figure 5B) quantitatively demonstrates
692
+ an increase in the percentage of bifurcations aligned up to an angular mismatch of 10, 20, and 30 degrees from %
693
+ values of 25.3, 40.4, and 52.3 to 51.5, 69.7, and 79.8% respectively. Similarly, the non-rigid registration scatterplot
694
+ 8
695
+
696
+ arXiv Template
697
+ A PREPRINT
698
+ A1
699
+ A2
700
+ B1
701
+ B2
702
+ Bifurcation number
703
+ 0
704
+ 5
705
+ 10
706
+ 15
707
+ 20
708
+ 25
709
+ 30
710
+ 35
711
+ 40
712
+ Longitudinal misalignment
713
+ Bifurcation number
714
+ 0
715
+ 5
716
+ 10
717
+ 15
718
+ 20
719
+ 25
720
+ 30
721
+ 35
722
+ 40
723
+ Longitudinal misalignment
724
+ Non-rigid<0
725
+ Non-rigid<2
726
+ Non-rigid>=2
727
+ Bifurcation number
728
+ 0
729
+ 25
730
+ 50
731
+ 75
732
+ 100
733
+ 125
734
+ 150
735
+ 175
736
+ 200
737
+ Angular misalignment
738
+ Non-rigid<0
739
+ Non-rigid<20
740
+ Non-rigid>=20
741
+ Bifurcation number
742
+ 0
743
+ 25
744
+ 50
745
+ 75
746
+ 100
747
+ 125
748
+ 150
749
+ 175
750
+ 200
751
+ Angular misalignment
752
+ Figure 7: Quantitative results comparing the quality of rigid and non-rigid co-registration in longitudinal and rotational
753
+ directions. The first row compares bifurcation frame mismatch before (A1) and after (A2) non-rigid registration in
754
+ the form of scatterplots. The second row compares bifurcation angular mismatch before (B1) and after (B2) non-rigid
755
+ registration in the form of scatterplots. The scatterplot for the longitudinal and rotational non-rigid registration (A2 and
756
+ B2) are color-coded to exhibit the change in alignment metric after non-rigid registration, where green represents an
757
+ increase in alignment, orange represents a mild decrease in alignment, and red represents a strong decrease in alignment.
758
+ Only bifurcations that were longitudinally matched within 6 OCT frames were analyzed for rotational alignment.
759
+ demonstrates that the majority of bifurcations had their angular mismatch decreased after non-rigid alignment (66%
760
+ shown in green) and only a minority had their angular mismatch values slightly (22% shown in orange) or significantly
761
+ (12% shown in red) increased. The mean value of the angular mismatch before and after non-rigid alignment is reported
762
+ in Table 2, in which the mean angular mismatch decreases from 36.0 to 28.6 degrees.
763
+ 3.3
764
+ Comparison with previous approaches
765
+ A direct comparison of the virtual catheter method with state-of-the-art discrete optimization approaches can be seen in
766
+ Tables 2 and 3. Comparing the virtual catheter method to a discrete optimization approach for longitudinal registration,
767
+ it was shown that DTW produces significantly poorer results in longitudinal registration, with the longitudinal mismatch
768
+ of 11.7 frames being higher than rigid longitudinal registration average of 7.9 frames. Comparing the virtual catheter
769
+ method to using Dynamic Programming for rotational registration, it was shown that such discrete optimization
770
+ algorithms exhibit poor performance for CT-OCT rotational registration (angular mismatch of 77.9 degrees) which
771
+ is higher than the angular mismatch after rigid rotational registration alone. Table 3 quantifies the angular mismatch
772
+ in the case where non-rigid longitudinal registration is successful. For bifurcations with a maximum frame mismatch
773
+ of 6 after non-rigid registration, the angular mismatch decreases from 77.9 to 65.2 for the Dynamic Programming
774
+ approach, while for the virtual catheter method, the angular mismatch decreases from 28.6 to 24.8. In contrast, the
775
+ angular mismatch for the rigid registration is unchanged after excluding non-matching bifurcations.
776
+ 9
777
+
778
+ arXiv Template
779
+ A PREPRINT
780
+ Figure 8: Qualitative results comparing the alignment of calcium annotations between OCT (first row) and CT (third
781
+ row) for selected frames with good luminal alignment. The middle row shows the calcium annotations for OCT (red)
782
+ and CT (green) superimposed on each other.
783
+ Table 1: Accuracy of alternative co-registration approaches, proposed for intravascular-intravascular image registration.
784
+ Data is presented from left to right including evaluated co-registered modalities, dataset size, and overall methodological
785
+ approach. Further, average errors are presented in both longitudinal (frames) and rotational (degree) directions.
786
+ Ref.
787
+ Modalities
788
+ Dataset Size
789
+ Methodology
790
+ Longitudinal mismatch
791
+ Angular mismatch
792
+ Karmakar et al. [2022]
793
+ OCT-OCT
794
+ 9 patients
795
+ DTW + Dynamic Programming
796
+ 0.9 ± 0.8
797
+ 7.7 ± 6.7
798
+ Tsiknakis et al. [2023]
799
+ OCT-OCT
800
+ 21 patients
801
+ DTW + Harmony Search
802
+ 5.6 ± 6.7
803
+ 1.2 ± 0.81
804
+ Karmakar et al. [2022]
805
+ OCT-IVUS
806
+ 7 patients
807
+ DTW + Dynamic Programming
808
+ 1.45 ± 0.7
809
+ 29.1 ± 23.2
810
+ Molony et al. [2016]
811
+ OCT-IVUS
812
+ 12 patients
813
+ DTW + Dynamic Programming
814
+ 5.0 ± 6.2
815
+ 17.8 ± 21.9
816
+ Table 2: Accuracy of co-registration approaches applied to CT-OCT image registration. Data is presented from left to
817
+ right including evaluated co-registered modalities, dataset size, and overall methodological approach. Further, average
818
+ errors are presented in both longitudinal (frames) and rotational (degree) directions. All approaches in this table have
819
+ been evaluated on the same dataset
820
+ Ref.
821
+ Modalities
822
+ Dataset Size
823
+ Methodology
824
+ Longitudinal mismatch
825
+ Angular mismatch
826
+ Karmakar et al. [2022]
827
+ CT-OCT
828
+ 40 patients
829
+ DTW + Dynamic Programming
830
+ 11.7 ± 12.1
831
+ 77.9 ± 61.0
832
+ Ours (Rigid)
833
+ CT-OCT
834
+ 40 patients
835
+ Virtual Catheter Method
836
+ 7.9 ± 7.1
837
+ 36.0 ± 31.9
838
+ Ours (Rigid+Non-rigid)
839
+ CT-OCT
840
+ 40 patients
841
+ Virtual Catheter Method
842
+ 3.3 ± 3.9
843
+ 28.6 ± 40.9
844
+ Table 3: Accuracy of co-registration approaches applied to CT-OCT image registration for bifurcations that are
845
+ longitudinally matched. Data is presented from left to right including methodological approach and number of
846
+ longitudinally matched bifurcations. Bifurcations are considered longitudinally matched when they have a maximum
847
+ frame difference of 6 after non-rigid longitudinal registration. Further, average errors are presented for rotational
848
+ direction in degrees.
849
+ Ref.
850
+ Methodology
851
+ Matched Bifurcations
852
+ Angular Mismatch
853
+ Karmakar et al. [2022]
854
+ DTW + Dynamic Programming
855
+ 52/114
856
+ 65.2 ± 72.9
857
+ Ours (Rigid)
858
+ Virtual Catheter Method
859
+ 99/114
860
+ 36.0 ± 33.0
861
+ Ours (Rigid+Non-rigid)
862
+ Virtual Catheter Method
863
+ 99/114
864
+ 24.8 ± 39.0
865
+ 10
866
+
867
+ OUarXiv Template
868
+ A PREPRINT
869
+ 4
870
+ Discussion
871
+ The aim of the current study was to develop a fully automatic registration algorithm to align CCTA and intravascular
872
+ images. Specifically, we propose a novel registration process finding the optimal rigid and non-rigid spatial transforms
873
+ using a virtual catheter path in the CCTA data, aligning the non-invasive modality to its invasive counterpart. Our results
874
+ indicate excellent co-registration accuracy, with excellent agreement with reference manual landmark annotations
875
+ (Figure 4). Further, our results underline the critical importance of a non-rigid registration step, with significant
876
+ enhancement in both longitudinal and rotational alignments observed when comparing rigid vs. non-rigid alignments in
877
+ Table 2. We demonstrate that for the vast majority of bifurcations, our framework is able to improve the longitudinal
878
+ and rotational alignment of common bifurcations within the CT and OCT images. Lastly, we demonstrate the added
879
+ value of our approach as compared to state-of-the-art alternatives, with a head-to-head comparison to previously
880
+ developed discrete optimization alignment algorithms (Table 1). A head-to-head comparison demonstrates that discrete
881
+ optimization approaches for longitudinal and rotational alignment suffer a significant drop in alignment quality when
882
+ applied for the task of CT-OCT co-registration. Meanwhile, our approach maintains performance accuracy in line with
883
+ simpler tasks such as intravascular-intravascular image registration.
884
+ 4.1
885
+ Related work
886
+ Currently, a majority of CCTA studies that validate their CT findings with intravascular images have used rigid manual
887
+ registration based on fiduciary landmarks such as bifurcations or large calcifications (Carlier et al. [2014], Tu et al.
888
+ [2011], Hebsgaard et al. [2015]). One of the few studies that attempted to align CT and intravascular data up to a non-
889
+ rigid level is by Uzu et al. [2019] in which a B-spline deformation model was used to optimize the alignment of manually
890
+ annotated bifurcation landmarks. Though powerful, such an approach is time-consuming due to the significant amount
891
+ of manual processing required to process the OCT images, rigidly align the geometric models, and mark bifurcations
892
+ within every artery. In comparison, our approach implicitly matches nearby bifurcations using longitudinally smoothed
893
+ SDFs representing the CT and OCT lumens, respectively. Other approaches that register intravascular-to-intravascular
894
+ modalities have in the past relied on DTW (Molony et al. [2016], Karmakar et al. [2022]), discretely optimizing the
895
+ frame-wise progression of one intravascular pullback to maximize longitudinal and rotational alignment with another.
896
+ Such methods, nevertheless, attempt to recapitulate the continuous motion distortions introduced by the catheter path
897
+ with discrete non-physiological frames or repeats (Molony et al. [2016]). However, skipping or repeating several frames
898
+ that the catheter motion is not smooth or continuous, which is an unrealistic assumption about the catheter path.
899
+ Direct numerical comparison of reported co-registration accuracy across published approaches is inherently difficult
900
+ as co-registration accuracy is highly dependent on the specific dataset explored as well as which modalities are being
901
+ coregistered. For example, the simplest co-registration task would be represented by the alignment of same-modality
902
+ images such as OCT-OCT image pairs. For such tasks, our previously published DTW and dynamic programming
903
+ approach (Karmakar et al. [2022]) exhibits similar perfomance compared to other state-of-the-art algorithms (Tsiknakis
904
+ et al. [2023]) (See Table 1). When applied to multimodality datasets, such as IVUS-OCT image pairs, our previously
905
+ developed approach (Karmakar et al. [2022]) suffers distinctive drops in both longitudinal and rotational accuracy (see
906
+ Table 1), however, still maintains comparative performance to similar Dynamic Programming approaches (Molony et al.
907
+ [2016]). Thus, in order to facilitate a head-to-head comparison on the more challenging task of registering CT-OCT
908
+ image pairs, we applied our previously developed discrete optimization algorithm (Karmakar et al. [2022]) on our
909
+ multi-modal dataset of 40 patients. Doing so, we found that our previous approach produced significantly worse
910
+ longitudinal and rotational alignment compared to the virtual catheter method, with both longitudinal and angular
911
+ alignment being worse than simple rigid registration (Table 2). In contrast, our developed methodology achieves
912
+ competitive results with even intravascular-intravascular registration studies (Table 1 and Table 2).
913
+ 4.2
914
+ Methodological Adaptations
915
+ From the above it can be seen that the task of co-registering CT and OCT images presents several unique difficulties for
916
+ discrete registration algorithms. Our framework has several features that were designed to mitigate such challenges.
917
+ First, the low resolution of CT images induces a circular bias in the lumen segmentations (see Figure 4), as well as a
918
+ tendency to miss small bifurcations. Such circularly symmetric regions hence create zones of longitudinal and rotational
919
+ ambiguity along the pullback. Our approach tries to minimize the dependency of such by formulating the longitudinal
920
+ and rotational transforms acting on the virtual path in terms of a regularized and smooth B spline deformation. As
921
+ such, the optimization procedure is mainly dominated by the alignment of prominent non-symmetric features such
922
+ as bifurcations, rather than the circularly symmetric lumen segments. This ensures that the rotational alignment of
923
+ all non-bifurcating lumen frames that are in proximity to matched bifurcations are properly matched due to B spline
924
+ interpolation (Figure 4). Another significant issue faced in previous rotational co-registration algorithms (Karmakar
925
+ 11
926
+
927
+ arXiv Template
928
+ A PREPRINT
929
+ et al. [2022], Molony et al. [2016]) is that lumen bifurcations are only able to contribute to rotational alignment if they
930
+ exist within the same frame. As such, poor longitudinal alignment of bifurcations was a significant contributing factor
931
+ to the poor performance of our previously developed dynamic programming algorithm for rotational co-registration
932
+ (Table 2). Our framework, in contrast, minimizes this dependency through the use of a Gaussian smoothing kernel
933
+ applied longitudinally over the SDF. Longitudinal smoothing allows single-frame bifurcations to appear in adjacent
934
+ frames and smooths the loss surface such that bifurcations in the different modalities can be better aligned (Figures 4
935
+ and 7). Another design choice that was found to increase training stability and co-registration quality was the use of
936
+ SDF’s to determine alignment, as opposed to using a segmentation loss such as cross-entropy or Dice. This was due to
937
+ the fact that when the lumen segmentations were fully overlapping, multiple rotational and transverse configurations
938
+ contribute equally to a segmentation loss function, preventing the algorithm from making fine adjustments in the spatial
939
+ transform. Figure 6 further demonstrates how the quality of rotational registration varies with angular mismatch, where
940
+ the angular mismatch tends to occur due to the limitations of local optimization of pixel alignment. At less than 10
941
+ degrees mismatch, the difference in alignment is minimal, while under 30 degrees, the difference in alignment can
942
+ be attributed to differing lumen bifurcation shapes in the CT and OCT data. Lastly, many co-registration methods
943
+ normalize the position of the lumen by the artery centroid (Uzu et al. [2019], Karmakar et al. [2020, 2022], Molony
944
+ et al. [2016]). While such an approach manages to align CT and OCT frames with circularly symmetric lumen, it fails
945
+ to effectively align equivalent frames with bifurcations as the segmentation can have different maximum diameters
946
+ between the modalities and thus different centroids. Moreover, centering the image around the lumen centroids can
947
+ cause the algorithm to align bifurcations 180-degrees from the correct orientation. It was empirically found that this
948
+ phenomenon was found to be a significant contributing factor to the degradation of the co-registration performance of
949
+ our previously developed discrete co-registration algorithm. In this framework, we instead choose to jointly optimize
950
+ for the transverse displacements of the virtual path in addition to the longitudinal and rotational displacements, which
951
+ allows for the bifurcations in both modalities to be anchored around the OCT catheter location and enables near
952
+ pixelwise alignment of the lumen (Figures 4,6) and plaque constituents such as calcium (Figure 8).
953
+ 4.3
954
+ Limitations
955
+ Though very promising for clinical applications, our developed approach has a number of limitations. First, the
956
+ non-rigid spatial transform acting on the virtual catheter path is found through gradient-based optimization, requiring
957
+ that landmarks lie sufficiently close such that proper matching is ensured. For example, common bifurcations that
958
+ have a frame mismatch of more than 6 frames (corresponding to the longitudinal smoothing kernel) are expected to
959
+ be uncorrelated in terms of orientation. This issue can be mitigated by integrating deep learning networks which can
960
+ accurately predict the spatial transform needed to align the two modalities. Another limitation is the dependence of
961
+ non-rigid registration on the lumen. The lumen estimation is expected to be accurate for both modalities and as such,
962
+ ensures good registration accuracy for regions that include many bifurcations. However, due to the poor resolution of
963
+ CCTA images, the lumen estimation tends to be highly circular. Accordingly, it is expected that rotational co-registration
964
+ certainty increases with bifurcation proximity but decreases in stenotic regions that contain highly circular CT luminal
965
+ profiles. In the future, co-registration accuracy can likely be improved by including contextual information relating
966
+ to the vessel wall such as lesion content and morphology as a supervisory signal in the loss function. Third, the use
967
+ of a pixel-wise loss as a surrogate for luminal alignment may not necessarily result in optimal alignment of lumen
968
+ bifurcations. As seen in Figure 6, a pixel-wise loss function can occasionally bias the spatial transform to align the
969
+ central lumen body over aligning the bifurcation in scenarios where the bifurcation shapes are not perfectly matching.
970
+ In the future, this issue can be mitigated by introducing an orientation loss to bias the spatial transform to rotationally
971
+ align bifurcations. Lastly, regularizing the spatial transform and smoothing the SDF’s can create difficulties in localizing
972
+ landmarks up to frame-wise precision. This can be seen in the area curve in Figure 4 section B with the slightly
973
+ mismatched bifurcation and in Figure 7 A2 and B2 with a minor amount of bifurcations with increased frame and
974
+ angular mismatch values. The localization capabilities of the algorithm can be improved by introducing multi-scale
975
+ deformation steps where finer control point grids can be recursively used as the basis for the spatial transform.
976
+ 4.4
977
+ Translational Benefits
978
+ The development of automatic frame-wise matching algorithms for CT-OCT data fusion would enable the development
979
+ of several research-based applications. First, intravascular imaging data can act as ground truth to validate the reliability
980
+ of CT in delineating several morphological metrics of atherosclerosis, such as luminal area, lipid content, and calcium
981
+ volume. Understanding when CT-derived morphological metrics are reliable is critical for both therapy planning and
982
+ deciding when to rely on intravascular imaging. For example, studying the interaction between calcium blooming and
983
+ measured lumen size in CT images necessitates that ground truth lumen measurements be available, which can only be
984
+ provided by frame-wise co-registration algorithms. Figure 8 demonstrates that such a frame-by-frame comparison can
985
+ be done provided that longitudinal and rotational co-registration is of sufficient quality. Second, multi-modal data fusion
986
+ 12
987
+
988
+ arXiv Template
989
+ A PREPRINT
990
+ would allow for the enhanced generation of patient-specific digital twins from coronary images. There has been an
991
+ increasing interest in the use of intravascular-images to create computational digital twins for the prediction of coronary
992
+ pathophysiology and clinical decision-making (Kadry et al. [2021]). However, intravascular images, while providing
993
+ excellent resolution within the imaging frame, do not provide sufficient information to create a fully physiological
994
+ artery model. Intravascular images typically suffer from intra-frame motion drift artifacts in the longitudinal and
995
+ rotational directions and cannot capture information on the three-dimensional centerline of the artery. On the other
996
+ hand, CT suffers from poor resolution but is able to capture the three-dimensional nature of the artery with high
997
+ accuracy. Combining both modalities would allow researchers to investigate the importance of longitudinal and
998
+ rotational distortions, as well as modeling arterial tortuosity.
999
+ References
1000
+ Georgios Tzimas, Gaurav S Gulsin, Hidenobu Takagi, Niya Mileva, Jeroen Sonck, Olivier Muller, Jonathon A Leipsic,
1001
+ and Carlos Collet. Coronary ct angiography to guide percutaneous coronary intervention. Radiology: Cardiothoracic
1002
+ Imaging, 4(1):e210171, 2022.
1003
+ Kenzo Uzu, Hiromasa Otake, Gilwoo Choi, Takayoshi Toba, Hyun Jin Kim, Arjun Roy, Michiel Schaap, Leo Grady,
1004
+ Masahito Kawata, Toshiro Shinke, et al. Lumen boundaries extracted from coronary computed tomography angiogra-
1005
+ phy on computed fractional flow reserve (ffrct): validation with optical coherence tomography. Eurointervention:
1006
+ Journal of Europcr in Collaboration with the Working Group on Interventional Cardiology of the European Society
1007
+ of Cardiology, 14(15):e1609–e1618, 2019.
1008
+ Choongki Kim, Sung-Jin Hong, Dong-Ho Shin, Jung-Sun Kim, Byeong-Keuk Kim, Young-Guk Ko, Donghoon Choi,
1009
+ Yangsoo Jang, and Myeong-Ki Hong. Limitations of coronary computed tomographic angiography for delineating the
1010
+ lumen and vessel contours of coronary arteries in patients with stable angina. European Heart Journal-Cardiovascular
1011
+ Imaging, 16(12):1358–1365, 2015.
1012
+ Matthew J Budoff, David Dowe, James G Jollis, Michael Gitter, John Sutherland, Edward Halamert, Markus Scherer,
1013
+ Raye Bellinger, Arthur Martin, Robert Benton, et al. Diagnostic performance of 64-multidetector row coronary
1014
+ computed tomographic angiography for evaluation of coronary artery stenosis in individuals without known coronary
1015
+ artery disease: results from the prospective multicenter accuracy (assessment by coronary computed tomographic
1016
+ angiography of individuals undergoing invasive coronary angiography) trial. Journal of the American College of
1017
+ Cardiology, 52(21):1724–1732, 2008.
1018
+ Yu Takahashi, Takayoshi Toba, Hiromasa Otake, Yusuke Fukuyama, Shinsuke Nakano, Yoichiro Matsuoka, Kosuke
1019
+ Tanimura, Yu Izawa, Hiroyuki Kawamori, Atsushi K Kono, et al. Feasibility of morphological assessment of coronary
1020
+ artery calcification with electrocardiography-gated non-contrast computed tomography: a comparative study with
1021
+ optical coherence tomography. The International Journal of Cardiovascular Imaging, 37(4):1445–1453, 2021.
1022
+ Collin Fischer, Edward Hulten, Pallavi Belur, Ryan Smith, Szilard Voros, and Todd C Villines. Coronary ct angiography
1023
+ versus intravascular ultrasound for estimation of coronary stenosis and atherosclerotic plaque burden: a meta-analysis.
1024
+ Journal of cardiovascular computed tomography, 7(4):256–266, 2013.
1025
+ Michiel A De Graaf, Alexander Broersen, Pieter H Kitslaar, Cornelis J Roos, Jouke Dijkstra, Boudewijn PF Lelieveldt,
1026
+ J Wouter Jukema, Martin J Schalij, Victoria Delgado, Jeroen J Bax, et al. Automatic quantification and characterization
1027
+ of coronary atherosclerosis with computed tomography coronary angiography: cross-correlation with intravascular
1028
+ ultrasound virtual histology. The international journal of cardiovascular imaging, 29(5):1177–1190, 2013.
1029
+ H Brodoefel, C Burgstahler, M Heuschmid, A Reimann, F Khosa, A Kopp, S Schroeder, CD Claussen, and ME Clouse.
1030
+ Accuracy of dual-source ct in the characterisation of non-calcified plaque: use of a colour-coded analysis compared
1031
+ with virtual histology intravascular ultrasound. The British journal of radiology, 82(982):805–812, 2009.
1032
+ A Lin, N Manral, P McElhinney, A Killekar, H Matsumoto, S Cadet, S Achenbach, SJ Nicholls, DT Wong, D Berman,
1033
+ et al. Deep learning-based plaque quantification from coronary computed tomography angiography: external
1034
+ validation and comparison with intravascular ultrasound. European Heart Journal, 42(Supplement_1):ehab724–0161,
1035
+ 2021.
1036
+ Marly van Assen, Akos Varga-Szemes, U Joseph Schoepf, Taylor M Duguay, H Todd Hudson, Svetlana Egorova,
1037
+ Kjell Johnson, Samantha St Pierre, Beatrice Zaki, Matthijs Oudkerk, et al. Automated plaque analysis for the
1038
+ prognostication of major adverse cardiac events. European Journal of Radiology, 116:76–83, 2019.
1039
+ Nikos Tsiknakis, Constantinos Spanakis, Panagiota Tsompou, Georgia Karanasiou, Gianna Karanasiou, Antonis
1040
+ Sakellarios, George Rigas, Savvas Kyriakidis, Michael Papafaklis, Sotirios Nikopoulos, et al. Ivus longitudinal and
1041
+ axial registration for atherosclerosis progression evaluation. Diagnostics, 11(8):1513, 2021.
1042
+ 13
1043
+
1044
+ arXiv Template
1045
+ A PREPRINT
1046
+ Stéphane Carlier, Rich Didday, Tristan Slots, Peter Kayaert, Jeroen Sonck, Mike El-Mourad, Nicolas Preumont,
1047
+ Dany Schoors, and Guy Van Camp. A new method for real-time co-registration of 3d coronary angiography and
1048
+ intravascular ultrasound or optical coherence tomography. Cardiovascular Revascularization Medicine, 15(4):
1049
+ 226–232, 2014.
1050
+ Shengxian Tu, Niels R Holm, Gerhard Koning, Zheng Huang, and Johan HC Reiber. Fusion of 3d qca and ivus/oct.
1051
+ The international journal of cardiovascular imaging, 27(2):197–207, 2011.
1052
+ Lasse Hebsgaard, Troels Munck Nielsen, Shengxian Tu, Lars Romer Krusell, Michael Maeng, Karsten Tange Veien,
1053
+ Bent Raungaard, Christian Juhl Terkelsen, Anne Kaltoft, Johan HC Reiber, et al. Co-registration of optical coherence
1054
+ tomography and x-ray angiography in percutaneous coronary intervention. the does optical coherence tomography
1055
+ optimize revascularization (doctor) fusion study. International journal of cardiology, 182:272–278, 2015.
1056
+ Hui Qin, Chunming Li, Yingguang Li, Jiayue Huang, Fan Yang, Takashi Kubo, Takashi Akasaka, Changyan Xiao,
1057
+ Juan Luis Gutiérrez-Chico, and Shengxian Tu.
1058
+ Automatic coregistration between coronary angiography and
1059
+ intravascular optical coherence tomography: Feasibility and accuracy. JACC: Asia, 1(2):274–278, 2021.
1060
+ David S Molony, Lucas H Timmins, Emad Rasoul-Arzrumly, Habib Samady, and Don P Giddens. Evaluation of a
1061
+ framework for the co-registration of intravascular ultrasound and optical coherence tomography coronary artery
1062
+ pullbacks. Journal of biomechanics, 49(16):4048–4056, 2016.
1063
+ Abhishek Karmakar, Max L Olender, Farhad Rikhtegar Nezami, David Marlevi, Evan Shlofmitz, Richard A Shlofmitz,
1064
+ and Elazer R Edelman. Detailed investigation of lumen-based tomographic co-registration. In 2020 IEEE International
1065
+ Conference on Bioinformatics and Biomedicine (BIBM), pages 1038–1042. IEEE, 2020.
1066
+ Yazan Gharaibeh, Juhwan Lee, David Prabhu, Pengfei Dong, Vladislav N Zimin, Luis A Dallan, Hiram Bezerra, Linxia
1067
+ Gu, and David Wilson. Co-registration of pre-and post-stent intravascular oct images for validation of finite element
1068
+ model simulation of stent expansion. In Medical Imaging 2020: Biomedical Applications in Molecular, Structural,
1069
+ and Functional Imaging, volume 11317, pages 306–316. SPIE, 2020.
1070
+ Ling Zhang, Richard Downe, Zhi Chen, Shanhui Sun, T Masiarov, Tomas Kovarnik, John Lopez, Milan Sonka, and
1071
+ Andreas Wahle. Side-branch guided registration of intravascular ultrasound pullbacks in coronary arteries. In
1072
+ MICCAI Workshop in Computing and Visualization for IntraVascular Imaging and Computer Assisted Stenting
1073
+ (CVII-STENT), pages 44–51, 2014.
1074
+ Guha Balakrishnan, Amy Zhao, Mert R Sabuncu, John Guttag, and Adrian V Dalca. Voxelmorph: a learning framework
1075
+ for deformable medical image registration. IEEE transactions on medical imaging, 38(8):1788–1800, 2019.
1076
+ Yabo Fu, Yang Lei, Tonghe Wang, Walter J Curran, Tian Liu, and Xiaofeng Yang. Deep learning in medical image
1077
+ registration: a review. Physics in Medicine & Biology, 65(20):20TR01, 2020.
1078
+ Max Jaderberg, Karen Simonyan, Andrew Zisserman, et al. Spatial transformer networks. Advances in neural
1079
+ information processing systems, 28, 2015.
1080
+ Eran Treister and Eldad Haber. A fast marching algorithm for the factored eikonal equation. Journal of Computational
1081
+ physics, 324:210–225, 2016.
1082
+ Luca Antiga, Marina Piccinelli, Lorenzo Botti, Bogdan Ene-Iordache, Andrea Remuzzi, and David A Steinman.
1083
+ An image-based modeling framework for patient-specific computational hemodynamics. Medical & biological
1084
+ engineering & computing, 46(11):1097–1112, 2008.
1085
+ Jixiang Guo, Shun Li, Yim Pan Chui, Jing Qin, and Pheng Ann Heng. Mesh quality oriented 3d geometric vascular
1086
+ modeling based on parallel transport frame. Computers in Biology and Medicine, 43(7):879–888, 2013.
1087
+ Armin Kanitsar, Dominik Fleischmann, Rainer Wegenkittl, Petr Felkel, and Eduard Groller. CPR-curved planar
1088
+ reformation. IEEE, 2002.
1089
+ Daniel Rueckert, Luke I Sonoda, Carmel Hayes, Derek LG Hill, Martin O Leach, and David J Hawkes. Nonrigid
1090
+ registration using free-form deformations: application to breast mr images. IEEE transactions on medical imaging,
1091
+ 18(8):712–721, 1999.
1092
+ Sakura Nagumo, Carlos Collet, Bjarne L Norgaard, Hiromasa Otake, Brian Ko, Bon-kwon Koo, Jonathon Leipsic,
1093
+ Daniele Andreini, Ward Heggermont, Jesper M Jensen, et al. Rationale and design of the precise percutaneous
1094
+ coronary intervention plan (p3) study: Prospective evaluation of a virtual computed tomography-based percutaneous
1095
+ intervention planner. Clinical cardiology, 44(4):446–454, 2021.
1096
+ Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmenta-
1097
+ tion. In International Conference on Medical image computing and computer-assisted intervention, pages 234–241.
1098
+ Springer, 2015.
1099
+ 14
1100
+
1101
+ arXiv Template
1102
+ A PREPRINT
1103
+ Jeroen Sonck, Sakura Nagumo, Bjarne L Norgaard, Hiromasa Otake, Brian Ko, Jinlong Zhang, Takuya Mizukami,
1104
+ Michael Maeng, Daniele Andreini, Yu Takahashi, et al. Clinical validation of a virtual planner for coronary
1105
+ interventions based on coronary ct angiography. Cardiovascular Imaging, 15(7):1242–1255, 2022.
1106
+ Abhishek Karmakar, Max L Olender, David Marlevi, Evan Shlofmitz, Richard A Shlofmitz, Elazer R Edelman, and
1107
+ Farhad R Nezami. Framework for lumen-based nonrigid tomographic coregistration of intravascular images. Journal
1108
+ of Medical Imaging, 9(4):044006, 2022.
1109
+ Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980,
1110
+ 2014.
1111
+ Nikos Tsiknakis, Constantinos Spanakis, Panagiota Tsoumpou, Georgia Karanasiou, Gianna Karanasiou, Antonis
1112
+ Sakellarios, George Rigas, Savvas Kyriakidis, Michail I Papafaklis, Sotirios Nikopoulos, et al. Oct sequence
1113
+ registration before and after percutaneous coronary intervention (stent implantation). Biomedical Signal Processing
1114
+ and Control, 79:104251, 2023.
1115
+ Karim Kadry, Max L Olender, David Marlevi, Elazer R Edelman, and Farhad R Nezami. A platform for high-fidelity
1116
+ patient-specific structural modelling of atherosclerotic arteries: from intravascular imaging to three-dimensional
1117
+ stress distributions. Journal of the Royal Society Interface, 18(182):20210436, 2021.
1118
+ 15
1119
+
99AyT4oBgHgl3EQfRPYW/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
9NAzT4oBgHgl3EQfSft5/content/tmp_files/2301.01233v1.pdf.txt ADDED
@@ -0,0 +1,1415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1
2
+ Transferable Energy Storage Bidder
3
+ Yousuf Baker, Ningkun Zheng, Student Member, IEEE, Bolun Xu, Member, IEEE
4
+ Abstract—Energy storage resources must consider both price
5
+ uncertainties and their physical operating characteristics when
6
+ participating in wholesale electricity markets. This is a challeng-
7
+ ing problem as electricity prices are highly volatile, and energy
8
+ storage has efficiency losses, power, and energy constraints. This
9
+ paper presents a novel, versatile, and transferable approach
10
+ combining model-based optimization with a convolutional long
11
+ short-term memory network for energy storage to respond to
12
+ or bid into wholesale electricity markets. We apply transfer
13
+ learning to the ConvLSTM network to quickly adapt the trained
14
+ bidding model to new market environments. We test our proposed
15
+ approach using historical prices from New York State, showing
16
+ it achieves state-of-the-art results, achieving between 70% to
17
+ near 90% profit ratio compared to perfect foresight cases,
18
+ in both price response and wholesale market bidding setting
19
+ with various energy storage durations. We also test a transfer
20
+ learning approach by pre-training the bidding model using
21
+ New York data and applying it to arbitrage in Queensland,
22
+ Australia. The result shows transfer learning achieves exceptional
23
+ arbitrage profitability with as little as three days of local training
24
+ data, demonstrating its significant advantage over training from
25
+ scratch in scenarios with very limited data availability.
26
+ Index Terms—Energy storage; Deep learning; Transfer learn-
27
+ ing; Power system economics.
28
+ I. INTRODUCTION
29
+ Successful participation of energy storage resources in com-
30
+ petitive electricity markets benefits storage investors and social
31
+ welfare. Ancillary services such as frequency regulation have
32
+ been the primary sources of profit for energy storage owners,
33
+ but these markets have quickly saturated due to surging storage
34
+ deployments and small market size [1]. In the meantime, the
35
+ share of storage arbitraging in wholesale markets has tripled
36
+ from a little less than 20% in 2016 to almost 60% in 2021 [1].
37
+ Thus price arbitrage in wholesale markets will be the main
38
+ focus for future grid-scale energy storage projects.
39
+ Energy storage arbitrages price differences and earns rev-
40
+ enues in wholesale energy markets, i.e., charging during low-
41
+ price periods and discharging during high-price periods. At
42
+ the same time, arbitrage from energy storage helps to reduce
43
+ renewable curtailments, meet peak demands, mitigate extreme
44
+ events, and reduce the cost of electricity [2], [3]. As countries
45
+ and regions ramp up decarbonization efforts, energy storage
46
+ resources are taking on an increasingly important role in future
47
+ electricity markets and are becoming a cornerstone for cost-
48
+ effective decarbonization [4], [5]. Thus, both energy storage
49
+ owners and market organizers have significant economic and
50
+ welfare drivers to evolve models and algorithms for energy
51
+ storage arbitraging robustly and profitably.
52
+ However, energy storage arbitrage is non-trivial due to
53
+ highly volatile electricity prices and limited storage capacity.
54
+ Y. Baker, N. Zheng, and B. Xu are with Columbia University, NY, USA
55
+ (e-mail: {ykb2105, nz2343, bx2177}@columbia.edu).
56
+ Various methods have been proposed in the literature to ad-
57
+ dress energy storage participation in wholesale markets based
58
+ on different theories, they require dedicated location-specific
59
+ tuning and excessive computing power to achieve competitive
60
+ arbitrage performance [6]. This paper proposes a novel end-to-
61
+ end system for opportunity value calculation, prediction, and
62
+ control, combining model-based dynamic programming with
63
+ neural networks. Our approach innovates and provides several
64
+ advantages as follows:
65
+ • Our approach has reliable performance as it uses model-
66
+ based dynamic programming to address physical con-
67
+ straints in both training and control stages;
68
+ • Our approach is extremely computation efficient as it uses
69
+ dynamic programming to pre-process the training data,
70
+ reducing the complexity of the learning module;
71
+ • Our approach is transferable to different market en-
72
+ vironments while maintaining competitive performance
73
+ because of the integration of transfer learning;
74
+ • Our approach is founded on dynamic programming value
75
+ functions and adapts to different storage market designs
76
+ and participation scenarios, including price response and
77
+ market economic bidding;
78
+ • Our approach achieves state-of-the-art arbitrage perfor-
79
+ mance, achieving 70% to near 90% profit ratio compared
80
+ to perfect foresight with various storage durations when
81
+ tested using price data from New York, US, and Queens-
82
+ land, Australia.
83
+ The rest of the paper is organized as follows: Section II
84
+ summarizes energy storage market participation and previous
85
+ work using the learning method, Section III and IV elaborates
86
+ on the arbitrage formulation and solution method, Section V
87
+ presents the case study for price response and economic bid
88
+ market rules in New York and the application of transfer
89
+ learning for Queensland, and Section VII concludes the paper.
90
+ II. LITERATURE REVIEW
91
+ A. Energy Storage Price Response and Self-Schedule
92
+ Energy storage price response assumes the storage partici-
93
+ pant can observe the real-time price realization first and then
94
+ decide on the operation privately without informing the system
95
+ operator. The price response participation option primarily
96
+ applies to small-scale behind-the-meter (BTM) storage re-
97
+ sources (< 1 MW) [7]. Plenty of prior works have investigated
98
+ energy storage price response using a variety of methods,
99
+ including model-predictive control (MPC) [8], stochastic pro-
100
+ gramming [9], approximate dynamic programming [10], and
101
+ reinforcement learning [11]. Price response is comparably an
102
+ easier problem than economic bids as the storage operator is
103
+ not limited to market clearing models and can act after observ-
104
+ ing new price signals. However, since price response mostly
105
+ arXiv:2301.01233v1 [cs.LG] 2 Jan 2023
106
+
107
+ 2
108
+ applies to small BTM storage projects, the revenue generated
109
+ from arbitrage will unlikely justify any specialized computing
110
+ hardware investments. Hence the arbitrage algorithm must be
111
+ slim and efficient to minimize the computation cost.
112
+ Alternatively, some markets allow energy storage operators
113
+ to self-schedule and submit the operational schedule to the
114
+ market operator. Still, this option is less frequently used in
115
+ practice compared to participating by economic bids [12].
116
+ Self-scheduled storage cannot update the operation based on
117
+ the system clearing price, a key difference compared to price-
118
+ response or economic bids, which often causes the storage to
119
+ miss price spike opportunities and deliver fewer market profits.
120
+ B. Energy Storage Economic Bids
121
+ FERC Order 841, issued in 2018, ordered all system oper-
122
+ ators in the US must allow storage to submit bids and cleared
123
+ in spot markets [13]. In this case, the storage participant must
124
+ submit charge and discharge bids to the system operator at a
125
+ specific time period ahead of the market clearing, usually one
126
+ hour (also called hour-ahead bidding). The storage participant
127
+ must follow market clearing results to charge or discharge,
128
+ unlike in the price response case in which the storage can
129
+ privately decide the control decision after observing the price.
130
+ The bid design adds another layer of complexity in arbitrag-
131
+ ing, as optimal bid design requires mathematical tools due to
132
+ storage SoC constraints. Wang et al. [14] formulate the energy
133
+ storage look-ahead profit maximization problem as a bi-level
134
+ optimization problem. A second approach for energy storage
135
+ arbitrage control is backward dynamic programming [15],
136
+ and then the evolution is approximate-dynamic programming.
137
+ Jiang and Powell outline a general approximate-dynamic pro-
138
+ gramming framework for policy generation for energy storage
139
+ operating with a stochastic generation source in response to
140
+ stochastic demand [16], and further introduce a “distribution-
141
+ free” variant of the previous algorithm that does not make any
142
+ assumption on the price process[10]. However, all of these
143
+ methods are held back by large computational costs that make
144
+ them hard to implement in real-world applications of arbitrage.
145
+ There are other algorithms for energy storage real-time
146
+ arbitrage control: Wang and Zhang [17] solve the arbitrage
147
+ problem using reinforcement learning to come to an optimal
148
+ arbitrage policy, and Zheng et al. [18] outline a computa-
149
+ tionally efficient analytical stochastic dynamic programming
150
+ algorithm (SDP) for the problem of real-time price arbitrage
151
+ of energy storage. Krishnamurthy et al. [19] also propose an
152
+ SDP algorithm for arbitrage under day-ahead and real-time
153
+ price uncertainties. However, none of the methods outlined
154
+ above demonstrate or address transferability between different
155
+ ISO zones and geographic locations, or the hour-ahead bid
156
+ submission requirements in most real-time markets.
157
+ C. Machine Learning for Storage Arbitrage
158
+ Recent efforts to apply machine learning for storage ar-
159
+ bitrage can be grouped into two thrusts: the first is to use
160
+ machine learning to generate price predictions and then in-
161
+ tegrate them with MPC. In this case, the learning module
162
+ is independent of the storage model. Sarafraz et al. [20]
163
+ and Nwulu and Fahrioglu [21] outline two machine learning
164
+ approaches for predicting locational marginal price (LMP)
165
+ prediction using neuro-fuzzy logic and soft computing re-
166
+ spectively, and Chaweewat and Singh [22] propose a residual
167
+ neural network approach to price interval prediction. The
168
+ main difficulty in combining price prediction with storage
169
+ optimization is storage arbitrage requires a look-ahead of at
170
+ least 24 hours to capture the daily price cycles [8], while most
171
+ real-time prediction methods may only accurately generate a
172
+ few steps ahead of time. To this end, existing MPC approaches
173
+ rely on pre-scheduling storage using day-ahead prices but have
174
+ to neglect the real-time price variability, which is significantly
175
+ higher than in day-ahead prices [9].
176
+ The second approach is to directly use machine learning,
177
+ mainly reinforcement learning (RL), to learn the optimal
178
+ control policy for storage arbitrage directly. Wang et al. [11]
179
+ developed the first RL approach to arbitrage storage in real-
180
+ time markets. Cao et al. [23] propose a deep reinforcement
181
+ learning approach to learn an optimal control policy for energy
182
+ storage arbitrage with consideration of battery degradation.
183
+ Kwon et al. [24] demonstrated RL could optimize more
184
+ sophisticated storage models in arbitrage by integrating battery
185
+ degradation into the model. Yet, a common disadvantage of
186
+ RL-based approaches is transferability, as the model must
187
+ undergo time-consuming training to be adapted to a new price
188
+ zone or market environment. Transferability is a crucial aspect
189
+ of storage arbitrage due to spatial and temporal variations: a
190
+ typical system consists of hundreds of price nodes, and system
191
+ price behaviors evolve with changes in system resource mix
192
+ and ambient climate conditions. While previous efforts have
193
+ looked into combining transfer learning with RL [25] and its
194
+ application in selected energy-related issues, including demand
195
+ response prediction [26], event identification [27], and battery
196
+ health forecast [28]. Yet, the transferability of the storage
197
+ arbitrage model has not been previously studied.
198
+ III. PROBLEM STATEMENT AND SYSTEM OUTLINE
199
+ Our algorithm aims to predict the opportunity value at the
200
+ current state of charge (SoC) of energy storage to maximize
201
+ the price arbitrage profit. Our system is composed of three
202
+ components: valuation, forecasting, and arbitrage. We will
203
+ first present our methods for valuation and arbitrage and then
204
+ combine them with our forecasting model to form our bidding
205
+ algorithm. We define Qt(e) as the opportunity value function
206
+ representing the monetary value of the SoC e at time step
207
+ t. The problem formulation is adapted from
208
+ [29], [30], in
209
+ which the solution is formulated using dynamic programming
210
+ as follows:
211
+ max
212
+ bt,pt,et
213
+ ∈E(et−1)
214
+ λt(pt − bt) − cpt + ˆQ
215
+
216
+ et|θ, X)
217
+ (1a)
218
+ where the first term is arbitrage revenue which is the product of
219
+ the real-time market price λt and the energy storage dispatch
220
+ decision (pt − bt), where pt is the discharge power and bt
221
+ is the charge power. The second term is the discharge cost,
222
+ where c is the marginal discharge cost. The third term ˆQ is
223
+ the predicted storage opportunity value function with respect
224
+
225
+ 3
226
+ Fig. 1. The proposed structure of training opportunity value function prediction model.
227
+ to SoC et. The dynamic programming approach evaluates the
228
+ energy storage by back-propagation, which is not viable in the
229
+ real-time market where we do not have price realization ahead
230
+ of time. Thus, we need to directly predict the value function
231
+ ˆQ using historical (and current) price data. ˆQ is dependent
232
+ on the prediction model parameters θ and the prediction input
233
+ features X over a look-back period.
234
+ We denote that the storage charge and discharge power and
235
+ the final storage SoC belong to a feasibility set E(et−1) which
236
+ is dependent on the storage starting SoC et−1 at the start of
237
+ time period t (same as by the end of time period t−1). E(et−1)
238
+ is described with the following constraints:
239
+ 0 ≤ bt ≤ P, 0 ≤ pt ≤ P
240
+ (1b)
241
+ pt = 0 if λt < 0
242
+ (1c)
243
+ et − et−1 = −pt/η + btη
244
+ (1d)
245
+ 0 ≤ et ≤ E
246
+ (1e)
247
+ where (1b) models the upper bound, P, and lower bound, 0,
248
+ constraints on the storage charge and discharge power. (1c) is a
249
+ relaxed form of the constraint that enforces the energy storage
250
+ not charging and discharging simultaneously. Negative price
251
+ is the necessary condition for storage to charge and discharge
252
+ simultaneously in price arbitrage, hence by enforcing the stor-
253
+ age to not discharge when the price is negative we eliminate
254
+ simultaneous charging and discharging [29]. (1d) models the
255
+ energy storage SoC evolution constraint with efficiency η and
256
+ (1e) models the upper bound E and lower bound (we assume
257
+ as 0) of the storage SoC level.
258
+ Creating our proposed system amounts to solving the
259
+ problem of optimizing the prediction model parameters θ
260
+ to maximize storage arbitrage profit over a set of training
261
+ price data and physical storage parameters. Intuitively, this
262
+ problem can be formulated as a bi-level problem in which
263
+ the upper level maximizes the total profit over the entire
264
+ training time horizon. At the same time, the lower-level
265
+ enforces a non-anticipatory decision-making process in which
266
+ the storage dispatch decision only depends on the current
267
+ price and the predicted value function as in (1). However, this
268
+ problem quickly becomes computationally intractable since
269
+ the prediction model is embedded in the lower-level problem,
270
+ formulated as a constrained optimization problem. Therefore,
271
+ strong duality is required to convert the bi-level problem into a
272
+ single-level equivalent problem or to derive partial derivatives
273
+ and calculate the back-propagation gradients for gradient-
274
+ based approaches. However, gradient-based approaches are
275
+ complicated by the inclusion of SoC constraints [31]. In
276
+ either case, the computational complexity quickly becomes
277
+ overwhelming as the lower-level can include thousands of
278
+ problems representing the arbitrage over a particular price data
279
+ point.
280
+ Problem Statement. We consider an alternative two-stage
281
+ training approach in which we first generate the optimal
282
+ opportunity value function and then train the learning model
283
+ to predict the generated value function. This is formulated as
284
+ min
285
+ θ
286
+
287
+ e∈S
288
+ ���
289
+ ���ˆqt
290
+
291
+ e|θ, X) − qt(e)
292
+ ���
293
+ ���
294
+ 2
295
+ 2
296
+ (2a)
297
+ subject to
298
+ qt(e) = ∂
299
+ ∂eQt(e)
300
+ (2b)
301
+ Qt−1(et−1) =
302
+ max
303
+ bt,pt,et
304
+ ∈E(et−1)
305
+ λt(pt − bt) − cpt + Qt(et)
306
+ (2c)
307
+ Note that (2c) is also subject to the storage operation con-
308
+ straint set E(et−1) as described in (1b)–(1e). (2c) is a dynamic
309
+ programming energy storage price arbitrage formulation in
310
+ which the storage opportunity value is defined recursively
311
+ as the maximized storage arbitrage profit including the profit
312
+ from the current time step and the future opportunity values.
313
+ This formulation fits a piece-wise linear approximation of
314
+ the value function qt(e) based on the first order derivative
315
+ of the optimal value function Qt, and e is from the set of
316
+ SoC segments S. Note that in this formulation the prediction
317
+ model parameters θ are not involved in (2c), hence this is a
318
+ two-stage model in which we solve (2c) first and obtain all
319
+ optimal value function results from Qt, and more specifically,
320
+ their derivatives qt. We are then able to use (2a) to solve for
321
+ the optimal value function at each time step, which we use to
322
+ train the prediction model.
323
+ IV. SOLUTION AND SYSTEM SETUP
324
+ Our approach includes three steps: first, we use the deter-
325
+ ministic price arbitrage dynamic programming approach from
326
+ the previous section to generate the optimal storage opportu-
327
+ nity value function segments using historical price data. We
328
+ then train a learning model to predict the optimal storage
329
+ opportunity value segments from past price data. Finally, we
330
+
331
+ qe(e)
332
+ Analytical Dynamic
333
+ Programming
334
+ Algorithm (model
335
+ based)
336
+ [X, Y] = [ADAP/AkP] Q]
337
+ Model-Based
338
+ Arbitrage
339
+ CNN-LSTM
340
+ Price Pre-
341
+ Processing
342
+ Opportu
343
+ qt (el0, X) - qt(e4
344
+ test the learned model over unseen (future) price datasets.
345
+ The system structure is shown in Fig. 1, which includes
346
+ the dynamic programming solution and training method, with
347
+ specifics on the data engineering in Section IV-A.
348
+ A. Feature and Label Formatting
349
+ In general, the spot price for energy exhibits long-term
350
+ and short-term cycles according to cycling demand: the daily
351
+ cycling between peak and non-peak hours and the long-term
352
+ seasonal cycles; though events and the stochastic nature of
353
+ price create differences in between. Thus we chose to use
354
+ a convolutional long short-term memory (ConvLSTM) neural
355
+ net, which can learn patterns in time series data. For learning
356
+ timestep t, our network input/target pair could be [λt, qt] (or
357
+ qt+hr, where +hr represents an hour time shift for the HA
358
+ case). However, to better capture daily cycling, we elaborate
359
+ our single-step input-output pair by constructing the following
360
+ input-output matrices:
361
+ {X, Y} = {[ΛDAP|ΛRTP], Q}
362
+ ΛDAP =
363
+
364
+ ����
365
+ λDAP,t−m
366
+ λDAP,t−m+1
367
+ . . .
368
+ λDAP,t
369
+ λDAP,t−m−1
370
+ λDAP,t−m
371
+ . . .
372
+ λDAP,t−1
373
+ ...
374
+ ...
375
+ ...
376
+ λDAP,t−m−5hr
377
+ λDAP,t−m−5hr+1
378
+ . . .
379
+ λDAP,t−5hr
380
+
381
+ ����
382
+ ΛRTP =
383
+
384
+ ����
385
+ λRTP,t−n
386
+ λRTP,t−n+1
387
+ . . .
388
+ λRTP,t
389
+ λRTP,t−n−1
390
+ λRTP,t−n
391
+ . . .
392
+ λRTP,t−1
393
+ ...
394
+ ...
395
+ ...
396
+ λRTP,t−n−5hr
397
+ λRTP,t−n−5hr+1
398
+ . . .
399
+ λRTP,t−5hr
400
+
401
+ ����
402
+ Q =
403
+
404
+ ����
405
+ qt
406
+ qt−1
407
+ ...
408
+ qt−5hr
409
+
410
+ ����
411
+ where ΛDAP and ΛRTP are matrices made up of our day ahead
412
+ and real-time price data, and m, n are a lookback window for
413
+ the day ahead and real-time prices respectively, and 5hr is the
414
+ number of timesteps that make up five hours in a given market
415
+ resolution (60 in a 5 min resolution market). This allows the
416
+ network to capture not only the information on past prices for
417
+ the current value function but also the relationship between
418
+ past value functions in a 5-hour lookback. We chose five hours
419
+ here as it is long enough to capture cycles within a single day
420
+ (e.g. peak vs non-peak demand and the transition between
421
+ them). The inclusion of the day ahead price here serves as
422
+ a more stable price reference for the corresponding hour’s
423
+ spot price. Also of note is the cyclic symmetry of the price
424
+ matrices along the diagonal, which allows the network to learn
425
+ better the equivariant properties of the dataset [32]. Finally, the
426
+ choice of a ConvLSTM, as opposed to a traditional LSTM, is
427
+ to allow the network to capture the ”vertical” temporal relation
428
+ between the five hours of data in each data block.
429
+ Note that for DAP, the shift applied to t across rows
430
+ corresponds to a step shift in the resolution of the real-time
431
+ market (RTM). Meaning that if it is a 5-minute resolution
432
+ RTM, the first 12 rows of ΛDAP will be the same since the
433
+ day-ahead market (DAM) is hourly resolution.
434
+ B. Model Selection and Transfer Learning
435
+ The focus of this paper is to demonstrate the robustness
436
+ of the approach across different market conditions and bat-
437
+ tery durations and to show its transferability between zones.
438
+ Thus we chose one general network architecture for testing.
439
+ However, initial experimentation showed minimal gain-loss
440
+ in network performance on minor parameter changes across
441
+ cases. Further, to guarantee that the training converges to a
442
+ well-performing set of weights, multiple networks were trained
443
+ for each case. The weights achieving the most consistent and
444
+ low validation error were saved for evaluation. Of those, the
445
+ best model was chosen by the highest arbitrage profit. The
446
+ network is trained over 100 epochs in the case where it is
447
+ trained from scratch, and 25 epochs for the transfer learning
448
+ training, with a learning rate of 10−3. Further, we use a
449
+ callback function that saves the model weights only when the
450
+ validation error improves, ensuring that the weights loaded for
451
+ training are not overfitted. This callback also allows us to set
452
+ our epochs with significant overhead to ensure convergence
453
+ without over-fitting in all cases.
454
+ Furthermore, we apply transfer learning to quickly adapt a
455
+ trained model from one price zone to another. Our transfer
456
+ learning approach freezes all model layers except the output
457
+ layer and retraining on the dataset of the task to be transferred
458
+ to [33]. The underlying assumption is that the output layer is
459
+ more sensitive to data variability while the rest of the network
460
+ captures persistent patterns in the data.
461
+ C. Full Algorithm
462
+ We lay out our workflow, which is a sequence of three
463
+ algorithms. As a prerequisite for model training, we generate
464
+ all value functions Q according to the dynamic programming
465
+ solution in VII-A. After this, we construct our data set and
466
+ train our LSTM prediction model according to Algorithm 1,
467
+ which produces our trained model weights θ.
468
+ Algorithm 1 Value Function Prediction Model Training
469
+ 1: Dataset Preparation: Pre-Process data according to IV-A
470
+ 2: Initialization: Initialize model parameters θ using random
471
+ seed.
472
+ 3: i ← 0
473
+ 4: while stop criteria not true do
474
+ 5:
475
+ for t ∈ [1, t] do
476
+ 6:
477
+ x ← [ΛDAP|ΛRTP]
478
+ 7:
479
+ y ← Q
480
+ 8:
481
+ Calculate Loss Components by Eq. (2a)
482
+ 9:
483
+ Update θ by backpropagation
484
+ 10:
485
+ end for
486
+ 11:
487
+ i ← i + 1
488
+ 12: end while
489
+ 13: return θ
490
+ ▷ Parameters of the prediction model
491
+ After this, if the trained LSTM model produced by Algo-
492
+ rithm 1 is to then be used by another zone, it can be retrained
493
+ using the transfer learning approach outlined in Algorithm
494
+ 2. This is largely the same as the workflow of algorithm
495
+ 1, save that the training dataset is of the new zone and the
496
+
497
+ 5
498
+ newly trained model weights are denoted θ∗. We differentiate
499
+ between the two sets of model weights since we compare
500
+ the two approaches of transferring (transfer learning, applying
501
+ the model on new zones without retraining) later in the
502
+ paper. Finally, algorithm 3 outlines the process of simulating
503
+ Algorithm 2 Transfer Learning
504
+ 1: Initialization: Initialize model parameters θ∗ using ran-
505
+ dom seed
506
+ 2: θ∗ ← θ (trained model parameters)
507
+ 3: Freeze all parameters except output layer parameters
508
+ 4: Repeat
509
+ training
510
+ loop
511
+ using
512
+ new
513
+ region’s
514
+ data
515
+ set
516
+ {[Λ∗
517
+ DAP|Λ∗
518
+ RTP], Q∗}
519
+ 5: return θ∗
520
+ ▷ Parameters of the prediction model
521
+ arbitrage using our prediction model. The arbitrage simulation
522
+ is as follows: use the prediction model trained in algorithm
523
+ 1 and/or algorithm 2 to predict value functions using the
524
+ current real-time price and a look-back window (including the
525
+ day ahead look-back) and then generate the bids according
526
+ to VII-B. Once the bids are generated, use them to simulate
527
+ arbitrage and market clearing as outlined in VII-C.
528
+ Algorithm 3 Arbitrage with Value Function Prediction
529
+ 1: Initialization:
530
+ 2: Set energy storage parameters c, P, ηp, ηb, E.
531
+ 3: Initialize et−1 ← e0.
532
+ 4: for t ∈ [1, T] do
533
+ 5:
534
+ Predict ˆv
535
+
536
+ et|θ, x
537
+
538
+ 6:
539
+ Solve single-period optimization (1)
540
+ 7:
541
+ Return et, pt, dt
542
+ 8: end for
543
+ V. CASE STUDY SET-UPS
544
+ A. Market Participation Setting and Storage Parameters
545
+ We consider the following four market designs and par-
546
+ ticipation settings to demonstrate that our proposed approach
547
+ fits a wide range of storage participation options and market
548
+ designs:
549
+ • HA-1 Energy storage owner submits single-segment bids
550
+ one hour-ahead to real-time markets. This represents the
551
+ current storage bidding model in most wholesale real-
552
+ time markets in the US [10], [34] where energy storage
553
+ submits one charge bid and one discharge bid one hour
554
+ ahead of the market clearing. The storage can update its
555
+ bid for each hour, but the bids must stay the same within
556
+ each hour for multiple market clearings (for example,
557
+ real-time markets clear every five minutes in NYISO, so
558
+ one hour includes 12 real-time clearings).
559
+ • HA-10 Same to HA-1 except the storage submits10-
560
+ segment SoC-dependent charge and discharge bids. This
561
+ is a new market design proposed by CAISO to econom-
562
+ ically manage storage SoC in real-time [35], [36].
563
+ • PR-10 The storage conducts price response in real-time,
564
+ deciding the storage control after observing the published
565
+ real-time price, instead of submitting bids [37]. The price
566
+ response option is limited to behind-the-meter storage
567
+ in which the associated demand is cleared in real-time
568
+ market prices. In this case, the storage is not limited to
569
+ any bidding models and can use any decision-making
570
+ models. Yet, we assume the storage uses a 10-segment
571
+ approximation of its opportunity value as it provides a
572
+ good enough approximation to the actual value function.
573
+ This also enables us to benchmark HA-10 and PR-10
574
+ cases to demonstrate the economic cost of the hour-ahead
575
+ bidding requirement.
576
+ • PR-1 Same as PR-10 except the storage uses the average
577
+ opportunity value (i.e., one segment approximation) for
578
+ arbitrage control. This is not a realistic case as there is no
579
+ motivation for the storage operator to limit itself to using
580
+ a single-segment, less accurate approximation of its value
581
+ function to conduct arbitrage. However, we include this
582
+ case with the sole purpose to benchmark against the HA-
583
+ 1 case and PR-10 case.
584
+ In all case studies, we consider storage with a 90% one-
585
+ way efficiency and a 10$/MWh cost of discharge (excluding
586
+ the opportunity cost), unless otherwise specified. We consider
587
+ three storage durations including 2-hour, 4-hour, and 12-hour.
588
+ Further, we adapt our base prediction model to predict the hour
589
+ ahead case by adding an hour time shift to our ground truth
590
+ training target value function, which corresponds to 12-time
591
+ steps in 5-minute price resolution.
592
+ We conduct the majority of our case studies over price
593
+ data from New York ISO (NYISO) [38] for four price zones:
594
+ NYC (Zone J), LONGIL (Zone K), NORTH (Zone D), and
595
+ WEST (Zone A). We also use data from the Australian Energy
596
+ Market Operator (AEMO) for Queensland to demonstrate the
597
+ transferability of our approach using transfer learning [39].
598
+ B. Market and Price Data
599
+ We observe differences in price statistics and generation
600
+ mix across zones from the same ISO, and in between zones
601
+ from ISO’s in other states and even countries, summarized in
602
+ Table I. In New York zones, these differences can be attributed
603
+ to significant transmission congestion when comparing the
604
+ two main zone groups in NY [40]. QUEENSLAND has the
605
+ highest price volatility, which can potentially be attributed to
606
+ the absence of a day-ahead market. Further, we see a clear
607
+ tie between penetration rates of renewables into the zones and
608
+ the price volatility [41]. We also see the highest occurrence
609
+ of negative prices in NORTH (NY), which is due to the
610
+ significantly higher penetration of wind when compared to
611
+ NYC and LONGIL.
612
+ TABLE I
613
+ PRICE DATA STATISTICS
614
+ ,
615
+ Zone
616
+ Negative Price #
617
+ STD
618
+ Renewable %
619
+ NYC (NY)
620
+ 208
621
+ 28.82
622
+ 0.93
623
+ LONGIL (NY)
624
+ 190
625
+ 50.17
626
+ 0.93
627
+ NORTH (NY)
628
+ 6334
629
+ 40.25
630
+ 13.06
631
+ WEST (NY)
632
+ 633
633
+ 37.55
634
+ 13.06
635
+ QNSLND (AUS)
636
+ 522
637
+ 243.00
638
+ 13.19
639
+
640
+ 6
641
+ Fig. 2. Accumulated Profit over 2019 test set for NYISO Zones
642
+ All code for valuation, network training, and arbitrage are
643
+ written in python with Jupyter notebook and is available on
644
+ GitHub1. All trials are run on a desktop computer with AMD
645
+ Ryzen 9 processor and Nvidia GPU on Tensorflow 2.9.1 and
646
+ with cuDNN and CUDA versions 8.1 and 11.2, respectively.
647
+ All case studies using price data from NYISO were trained
648
+ using data from 2017 to 2018, and tested over 2019 data.
649
+ Each year of price data for each price zone has 8760 day-
650
+ ahead price data points (hourly resolution) and 105,120 real-
651
+ time price points (5-minute resolution). The look-back price
652
+ window includes the last 36 real-time prices (3 hours) and
653
+ 24-day-ahead prices (one day). The maximum training time
654
+ over two years of training price data, including the generation
655
+ of historical optimal value functions and training of the neural
656
+ network, is 390 seconds, a bit more than five minutes. The net-
657
+ work consists of a Convolutional Block with three sequential
658
+ time-distributed Convolutions+MaxPool layers, then an LSTM
659
+ block with two sets of bi-directional LSTM+drop out layers,
660
+ and then finally a Dense layer at the output end. The specific
661
+ model hyperparameters and details can be found on GitHub.
662
+ VI. RESULTS
663
+ A. Benchmark with Competing Methods
664
+ We first benchmark our proposed approach with other
665
+ competing energy storage price arbitrage methods in a price
666
+ response setting, in which storage can observe price first
667
+ and act accordingly, without having to bid ahead into mar-
668
+ kets. We benchmark the proposed method (DP-ConvLSTM)
669
+ with a reinforcement learning method (RL) [17], a modified
670
+ stochastic dynamic programming with day-ahead price updates
671
+ (SDP) [37], the proposed method but implemented with a
672
+ multilayer perceptron (DP-MLP) network [42], and perfect
673
+ price predictions which provide the highest profit possible.
674
+ In RL, we have 11 actions, 103 price states, and 121 SoC
675
+ states, which takes more than 1 hour to train for 5-min
676
+ resolution arbitrage. The RL approach uses a Markov decision
677
+ process (MDP) model by discretizing the storage SoC, and it
678
+ only works with perfect efficiency (100%). To provide a fair
679
+ comparison, all methods in this case consider storage with
680
+ perfect efficiency.
681
+ 1https://github.com/ybaker661/LSTM-Value-Prediction
682
+ Fig. 2 shows the comparison result when trained using price
683
+ data from 2017-2018 and tested in 2019 at price zones in NY-
684
+ ISO. The result shows DP-ConvLSTM has a clear advantage
685
+ over other methods in terms of profitability. Notably, the DP-
686
+ ConvLSTM approach performs exceptionally well in capturing
687
+ low-frequency extreme events, such as the surge in profits
688
+ around June in LONGIL and WEST, where the ConvLSTM
689
+ captures profit spikes that the RL benchmark misses, and the
690
+ difference between the ConvLSTM profit value and the perfect
691
+ prediction comes from the difference in arbitrage decision as
692
+ a result of numerical saturation. In this context, numerical
693
+ saturation means that the network learns to predict numerical
694
+ values in the range of data it most frequently sees (value
695
+ functions of stable prices), and so when it predicts on anomaly
696
+ data (price spike value functions) that are numerically much
697
+ larger, the network prediction saturates at the largest common
698
+ numerical value it sees.
699
+ B. Price Response
700
+ In this subsection, we compare the price response arbitrage
701
+ performance (PR-1 and PR-10) with different storage dura-
702
+ tions. Table III shows the arbitrage profit ratio results.
703
+ Overall, the result shows stable performance over the four
704
+ price zones and three storage durations. In comparison, our
705
+ previous work using SDP [37] and DP-MLP [42] have worse
706
+ performance in LONGIL (more frequent price spikes) and
707
+ NORTH (more frequent negative prices). The comparison
708
+ between PR-1 and PR-10 shows that increasing the value
709
+ function approximation from one to ten segments increased
710
+ the profit ratio by around 3%. Considering different storage
711
+ durations, the profit ratio is lower for long-duration energy
712
+ storage (12hr), as the longer storage duration leads to a
713
+ longer temporal correlation into the future, leading to higher
714
+ prediction difficulties. Still, our method achieved around 75%
715
+ profit ratio (HA-10) in the worst-case scenario in the NORTH
716
+ zone.
717
+ C. Hour-ahead Bidding
718
+ We now investigate hour-ahead bidding which is the most
719
+ common market design for energy storage owners operating in
720
+ the real-time market, where the storage submits bids an hour
721
+
722
+ NYC
723
+ LONGIL
724
+ NORTH
725
+ WEST
726
+ 16
727
+ ... Perfect Prediction
728
+ .... Perfect Prediction
729
+ ..... Perfect Prediction
730
+ .... Perfect Prediction
731
+ DP+LSTM
732
+ DP+LSTM
733
+ DP+LSTM
734
+ DP+LSTM
735
+ 14
736
+ 14
737
+ DP+MLP
738
+ DP+MLP
739
+ DP+MLP
740
+ DP+MLP
741
+ -- SDP
742
+ 25
743
+ -- SDP
744
+ --- SDP
745
+ ---- SDP
746
+ - RL
747
+ RL
748
+ RL
749
+ 2
750
+ 20
751
+ 20
752
+ Profit
753
+ Cumulative F
754
+ 6
755
+ 8
756
+ 10
757
+ 12
758
+ 0
759
+ 2
760
+ 6
761
+ 10
762
+ 12
763
+ 2
764
+ 6
765
+ 8
766
+ 10
767
+ 12
768
+ 0
769
+ 2
770
+ 6
771
+ 8
772
+ 10
773
+ 12
774
+ months
775
+ months
776
+ months
777
+ months7
778
+ TABLE II
779
+ PROFIT RATIO FOR HA PREDICTION QUEENSLAND AUS WITH DIFFERENT AMOUNTS OF TRAINING DATA
780
+ HA-1
781
+ HA-10
782
+ Duration
783
+ Training
784
+ No Data
785
+ 3 Days
786
+ 1 Week
787
+ 1 Month
788
+ 1 Year
789
+ No Data
790
+ 3 Days
791
+ 1 Week
792
+ 1 Month
793
+ 1 Year
794
+ 2hr
795
+ T.L.
796
+ 77.24
797
+ 82.76
798
+ 82.85
799
+ 81.30
800
+ 85.35
801
+ 78.90
802
+ 84.00
803
+ 81.54
804
+ 79.93
805
+ 83.42
806
+ No T.L.
807
+ X
808
+ 48.22
809
+ 51.36
810
+ 78.59
811
+ 83.88
812
+ X
813
+ 44.59
814
+ 44.88
815
+ 78.10
816
+ 86.95
817
+ 4hr
818
+ T.L.
819
+ 81.21
820
+ 81.29
821
+ 81.31
822
+ 78.11
823
+ 79.12
824
+ 84.06
825
+ 87.44
826
+ 84.34
827
+ 77.79
828
+ 82.65
829
+ No T.L.
830
+ X
831
+ 62.40
832
+ 65.45
833
+ 74.92
834
+ 80.42
835
+ X
836
+ 55.99
837
+ 55.99
838
+ 74.36
839
+ 83.11
840
+ 12hr
841
+ T.L.
842
+ 92.43
843
+ 83.96
844
+ 81.32
845
+ 78.74
846
+ 82.73
847
+ 90.69
848
+ 80.78
849
+ 79.11
850
+ 78.84
851
+ 81.35
852
+ No T.L
853
+ X
854
+ 74.79
855
+ 75.53
856
+ 74.39
857
+ 75.43
858
+ X
859
+ 73.65
860
+ 73.59
861
+ 74.56
862
+ 82.36
863
+ TABLE III
864
+ CAPTURED PROFIT RATIOS: PRICE RESPONSE
865
+ Zone
866
+ PR-1
867
+ PR-10
868
+ 2hr
869
+ 4hr
870
+ 12hr
871
+ 2hr
872
+ 4hr
873
+ 12hr
874
+ NYC
875
+ 80.83
876
+ 79.96
877
+ 73.54
878
+ 83.69
879
+ 83.63
880
+ 75.67
881
+ LONGIL
882
+ 82.33
883
+ 80.61
884
+ 79.10
885
+ 82.98
886
+ 83.94
887
+ 82.38
888
+ NORTH
889
+ 78.24
890
+ 75.87
891
+ 71.02
892
+ 79.52
893
+ 79.96
894
+ 74.29
895
+ WEST
896
+ 84.43
897
+ 80.37
898
+ 83.65
899
+ 87.97
900
+ 87.44
901
+ 84.43
902
+ TABLE IV
903
+ CAPTURED PROFIT RATIOS: HOUR AHEAD
904
+ Zone
905
+ HA-1
906
+ HA-10
907
+ 2hr
908
+ 4hr
909
+ 12hr
910
+ 2hr
911
+ 4hr
912
+ 12hr
913
+ NYC
914
+ 73.99
915
+ 74.82
916
+ 77.00
917
+ 78.79
918
+ 80.61
919
+ 74.47
920
+ LONGIL
921
+ 74.26
922
+ 76.56
923
+ 82.01
924
+ 75.30
925
+ 79.63
926
+ 81.89
927
+ NORTH
928
+ 73.22
929
+ 71.71
930
+ 70.17
931
+ 75.83
932
+ 77.21
933
+ 73.16
934
+ WEST
935
+ 78.79
936
+ 80.13
937
+ 84.17
938
+ 83.12
939
+ 83.94
940
+ 84.60
941
+ ahead of time. Table IV shows the hour-ahead bidding profit
942
+ ratio in the NYC case study. The profit ratio is lower than the
943
+ price response as the storage owner must decide on the bids
944
+ one hour before the actual time of arbitrage. The short-duration
945
+ storage (2hr) cases have higher profit ratio reductions (up to
946
+ 7%) as the value function is more sensitive to recent market
947
+ prices due to it’s shorter duration. On the other hand, the long-
948
+ duration storage (12hr) is more resilient and the hour-ahead
949
+ bidding has little impact on the profit ratio.
950
+ Hour-ahead bidding results also restate our observation from
951
+ the price response case, that multi-segment SoC bids are
952
+ more beneficial for short-duration storage to better manage
953
+ their SoC, but the improvement is not obvious for long-
954
+ duration storage. Overall, our approach achieved a higher
955
+ than 70% profit ratio in all hour-ahead cases, showing ro-
956
+ bust performance under different market designs and storage
957
+ technologies.
958
+ D. Transfer Learning in AEMO
959
+ We now demonstrate the effectiveness of applying transfer
960
+ learning to quickly adapt a pre-trained value function predic-
961
+ tion model from one market to a new market. In this case
962
+ study, we pre-train the prediction model using NYC price
963
+ data from 2017-2018 and conduct arbitrage in Queensland,
964
+ Australia. In Queensland, we use selected data from 2019
965
+ for training and the first 6 months of 2021 for evaluation.
966
+ We skipped the year 2020 because of COVID-19’s impact.
967
+ To present the sensitivity of transfer learning over a limited
968
+ amount of data, we consider various durations of training
969
+ datasets ranging from 3 days to one year. We present a
970
+ sensitivity analysis comparing the performance of transfer
971
+ learning versus training a model from scratch for the situations
972
+ where we have access to training data for only 3 days, 1 week,
973
+ 1 month, and 1 year of data for the target zone. Thus this case
974
+ study has the following steps:
975
+ 1) Use a pre-trained network (transfer learning) or a ran-
976
+ domly initialized network (training from scratch).
977
+ 2) Use a limited duration of Queensland price data from
978
+ 2019, ranging from 3 days to 1 year, to train the model
979
+ using transfer learning as outlined in algorithm 2, or
980
+ normal training outlined in algorithm 1.
981
+ 3) Test the arbitrage performance to arbitrage, as outlined
982
+ in algorithm 3, using the first six-month of data in
983
+ Queensland, 2021.
984
+ Table II shows the arbitrage profit ratio results for Queens-
985
+ land. The main finding is that in the data scare scenarios,
986
+ the transfer learning approach vastly outperforms training a
987
+ model from scratch. We also see that adding more data to
988
+ the transfer learning case does not necessarily increase perfor-
989
+ mance, whereas training the model from scratch only becomes
990
+ a viable option once a certain amount of data is available.
991
+ For the 2-hour storage, training from scratch becomes viable
992
+ around the point where you have 1 month of data available for
993
+ the target zone. For the 4-hour storage, the model still needs
994
+ about 1 month of data to reasonably perform when trained
995
+ from scratch, though the model is able to capture higher profit
996
+ ratios for 3 and 1 week of data than the 2-hour case. Compared
997
+ to both of these, the 12-hour storage seems to be the easiest for
998
+ the model to learn, only needing 3 days of data when training
999
+ from scratch to achieve reasonable performance; however, the
1000
+ 12-hour storage shows that the transfer learning approach
1001
+ outperforms training from scratch for all data scenarios for 1
1002
+ and 10 segments. However, since the 12-hr storage has lower
1003
+ opportunity cost and less significant change in the opportunity
1004
+ cost between sequential time steps, predicting the opportunity
1005
+ value might not be an effective method.
1006
+ The takeaway is that transfer learning beats out training the
1007
+ model from scratch when data scarcity is an issue. However,
1008
+ when the dataset size increases to a general size of 1 month,
1009
+ training from scratch becomes a viable option. Additionally,
1010
+ adding extra data, past 3 days or 1 week for transfer learning
1011
+ and past 6 months for training from scratch, does not necessar-
1012
+ ily yield better performance. As such, it is more useful to focus
1013
+ on stabilizing ConvLSTM’s volatile and initialization-sensitive
1014
+ training as well as other changes to the training process. We
1015
+
1016
+ 8
1017
+ see that in almost all cases, using the model trained on NY
1018
+ data without any retraining performs comparably or even better
1019
+ than transfer learning and even training a model from scratch.
1020
+ This indicates statistical robustness and generality in the NY
1021
+ zone data, and it also points to a unified generating distribution
1022
+ behind the price data/opportunity value of zones. However, we
1023
+ cannot conclude this for certain without further analysis of
1024
+ other permutations of transfer learning, and on testing different
1025
+ data duration permutations (including different data scarcity
1026
+ scenarios in the training of the base model along with in
1027
+ transfer learning).
1028
+ VII. CONCLUSION
1029
+ In this paper, we propose a computation-efficient, versatile,
1030
+ and transferable energy storage arbitrage model that fits both
1031
+ price response and market bidding. Our proposed approach
1032
+ achieves state-of-the-art profits compared to other methods and
1033
+ is both computation and data-efficient. We also demonstrate
1034
+ that by incorporating transfer learning, we can quickly adapt
1035
+ our bidding model to a new location with very limited training
1036
+ data. Our model suits a variety of arbitrage settings, including
1037
+ behind-the-meter price response and economic bids for utility-
1038
+ scale storage, and can be implemented using non-proprietary
1039
+ software and regular computing hardware. Our work would fa-
1040
+ cilitate storage participation in electricity markets and promote
1041
+ economic decarbonization of the electric power system.
1042
+ REFERENCES
1043
+ [1] G. McGrath and O. Comstock, “Battery systems on the u.s. power
1044
+ grid are increasingly used to respond to price,” Jul 2022. [Online].
1045
+ Available: https://www.eia.gov/todayinenergy/detail.php?id=53199
1046
+ [2] N. Srianandarajah, S. J. Wilson, and A. C. Chapman, “From green to
1047
+ amber: is australia’s national electricity market signalling a financial
1048
+ warning for wind and solar
1049
+ power?” Energy Policy, vol. 167,
1050
+ p. 113052, 2022. [Online]. Available: https://www.sciencedirect.com/
1051
+ science/article/pii/S0301421522002774
1052
+ [3] X. Yan, C. Gu, F. Li, and Z. Wang, “Lmp-based pricing for energy
1053
+ storage in local market to facilitate pv penetration,” IEEE Transactions
1054
+ on Power Systems, vol. PP, pp. 1–1, 12 2017.
1055
+ [4] W. H. B. Room, “Fact sheet: President biden sets 2030 greenhouse gas
1056
+ pollution reduction target aimed at creating good-paying union jobs and
1057
+ securing us leadership on clean energy technologies,” The White House,
1058
+ 2021.
1059
+ [5] “2030
1060
+ climate
1061
+ &
1062
+ energy
1063
+ framework.”
1064
+ [Online].
1065
+ Avail-
1066
+ able:
1067
+ https://climate.ec.europa.eu/eu-action/climate-strategies-targets/
1068
+ 2030-climate-energy-framework en
1069
+ [6] R. Sioshansi, P. Denholm, J. Arteaga, S. Awara, S. Bhattacharjee, A. Bot-
1070
+ terud, W. Cole, A. Cort´es, A. De Queiroz, J. DeCarolis et al., “Energy-
1071
+ storage modeling: State-of-the-art and future research directions,” IEEE
1072
+ Transactions on Power Systems, vol. 37, no. 2, pp. 860–875, 2021.
1073
+ [7] M. Roozbehani, M. A. Dahleh, and S. K. Mitter, “Volatility of power
1074
+ grids under real-time pricing,” IEEE Transactions on Power Systems,
1075
+ vol. 27, no. 4, pp. 1926–1940, 2012.
1076
+ [8] K. Abdulla, J. De Hoog, V. Muenzel, F. Suits, K. Steer, A. Wirth,
1077
+ and S. Halgamuge, “Optimal operation of energy storage systems
1078
+ considering forecasts and battery degradation,” IEEE Transactions on
1079
+ Smart Grid, vol. 9, no. 3, pp. 2086–2096, 2016.
1080
+ [9] D. Krishnamurthy, C. Uckun, Z. Zhou, P. R. Thimmapuram, and
1081
+ A. Botterud, “Energy storage arbitrage under day-ahead and real-time
1082
+ price uncertainty,” IEEE Transactions on Power Systems, vol. 33, no. 1,
1083
+ pp. 84–93, 2017.
1084
+ [10] D. R. Jiang and W. B. Powell, “Optimal hour-ahead bidding in the real-
1085
+ time electricity market with battery storage using approximate dynamic
1086
+ programming,” INFORMS Journal on Computing, vol. 27, no. 3, pp.
1087
+ 525–543, 2015.
1088
+ [11] H. Wang and B. Zhang, “Energy storage arbitrage in real-time markets
1089
+ via reinforcement learning,” in 2018 IEEE Power & Energy Society
1090
+ General Meeting (PESGM).
1091
+ IEEE, 2018, pp. 1–5.
1092
+ [12] H. Mohsenian-Rad, “Optimal bidding, scheduling, and deployment of
1093
+ battery systems in california day-ahead energy market,” IEEE Transac-
1094
+ tions on Power Systems, vol. 31, no. 1, pp. 442–453, 2016.
1095
+ [13] S. Bhattacharjee, R. Sioshansi, and H. Zareipour, “Energy storage
1096
+ participation in wholesale markets: The impact of state-of-energy man-
1097
+ agement,” IEEE Open Access Journal of Power and Energy, vol. 9, pp.
1098
+ 173–182, 2022.
1099
+ [14] Y. Wang, Y. Dvorkin, R. Fernandez-Blanco, B. Xu, T. Qiu, and D. S.
1100
+ Kirschen, “Look-ahead bidding strategy for energy storage,” IEEE
1101
+ Transactions on Sustainable Energy, vol. 8, no. 3, pp. 1106–1117, 2017.
1102
+ [15] M. L. Puterman, Markov decision processes: discrete stochastic dynamic
1103
+ programming.
1104
+ John Wiley & Sons, 2014.
1105
+ [16] D. R. Jiang and W. B. Powell, “An approximate dynamic programming
1106
+ algorithm for monotone value functions,” 2014. [Online]. Available:
1107
+ https://arxiv.org/abs/1401.1590
1108
+ [17] H. Wang and B. Zhang, “Energy storage arbitrage in real-time
1109
+ markets via reinforcement learning,” CoRR, vol. abs/1711.03127, 2017.
1110
+ [Online]. Available: http://arxiv.org/abs/1711.03127
1111
+ [18] N. Zheng, J. Jaworski, and B. Xu, “Arbitraging variable efficiency
1112
+ energy storage using analytical stochastic dynamic programming,” IEEE
1113
+ Transactions on Power Systems, vol. 37, no. 6, pp. 4785–4795, 2022.
1114
+ [19] D. Krishnamurthy, C. Uckun, Z. Zhou, P. R. Thimmapuram, and
1115
+ A. Botterud, “Energy storage arbitrage under day-ahead and real-time
1116
+ price uncertainty,” IEEE Transactions on Power Systems, vol. 33, no. 1,
1117
+ pp. 84–93, 2018.
1118
+ [20] F. Sarafraz, H. Ghasemi, and H. Monsef, “Locational marginal price
1119
+ forecasting by locally linear neuro-fuzzy model,” in 2011 10th Inter-
1120
+ national Conference on Environment and Electrical Engineering, 2011,
1121
+ pp. 1–4.
1122
+ [21] N. I. Nwulu and M. Fahrioglu, “A soft computing approach to
1123
+ projecting locational marginal price,” NEURAL COMPUTING &
1124
+ APPLICATIONS, p. 1115–1124, 2013. [Online]. Available: https:
1125
+ //hdl.handle.net/11511/64998
1126
+ [22] P. Chaweewat and J. G. Singh, “An electricity price interval forecasting
1127
+ by using residual neural network,” International Transactions on Electri-
1128
+ cal Energy Systems, vol. 30, no. 9, p. e12506, 2020. [Online]. Available:
1129
+ https://onlinelibrary.wiley.com/doi/abs/10.1002/2050-7038.12506
1130
+ [23] J. Cao, D. Harrold, Z. Fan, T. Morstyn, D. Healey, and K. Li, “Deep
1131
+ reinforcement learning-based energy storage arbitrage with accurate
1132
+ lithium-ion battery degradation model,” IEEE Transactions on Smart
1133
+ Grid, vol. 11, no. 5, pp. 4513–4521, 2020.
1134
+ [24] K.-b. Kwon and H. Zhu, “Reinforcement learning based optimal battery
1135
+ control under cycle-based degradation cost,” IEEE Transactions on
1136
+ Smart Grid, 2022.
1137
+ [25] M. E. Taylor and P. Stone, “Transfer learning for reinforcement learning
1138
+ domains: A survey.” Journal of Machine Learning Research, vol. 10,
1139
+ no. 7, 2009.
1140
+ [26] T. Peirelinck, H. Kazmi, B. V. Mbuwir, C. Hermans, F. Spiessens,
1141
+ J. Suykens, and G. Deconinck, “Transfer learning in demand response:
1142
+ A review of algorithms for data-efficient modelling and control,”
1143
+ Energy
1144
+ and
1145
+ AI,
1146
+ vol.
1147
+ 7,
1148
+ p.
1149
+ 100126,
1150
+ 2022.
1151
+ [Online].
1152
+ Available:
1153
+ https://www.sciencedirect.com/science/article/pii/S2666546821000732
1154
+ [27] H. Li, Z. Ma, and Y. Weng, “A transfer learning framework for power
1155
+ system event identification,” IEEE Transactions on Power Systems,
1156
+ vol. 37, no. 6, pp. 4424–4435, 2022.
1157
+ [28] S. Kim, Y. Y. Choi, K. J. Kim, and J.-I. Choi, “Forecasting
1158
+ state-of-health of lithium-ion batteries using variational long short-term
1159
+ memory with transfer learning,” Journal of Energy Storage, vol. 41,
1160
+ p. 102893, 2021. [Online]. Available: https://www.sciencedirect.com/
1161
+ science/article/pii/S2352152X21006101
1162
+ [29] B. Xu, M. Korp˚as, and A. Botterud, “Operational valuation of energy
1163
+ storage under multi-stage price uncertainties,” in 2020 59th IEEE
1164
+ Conference on Decision and Control (CDC).
1165
+ IEEE, 2020, pp. 55–60.
1166
+ [30] N. Zheng and B. Xu, “Impact of bidding and dispatch models over
1167
+ energy storage utilization in bulk power systems,” IREP Symposium on
1168
+ Bulk Power System Dynamics and Control 2022, 2022.
1169
+ [31] H. Cui, F. Li, X. Fang, H. Chen, and H. Wang, “Bilevel arbitrage
1170
+ potential evaluation for grid-scale energy storage considering wind
1171
+ power and lmp smoothing effect,” IEEE Transactions on Sustainable
1172
+ Energy, vol. 9, no. 2, pp. 707–718, 2017.
1173
+ [32] S. Dieleman, J. De Fauw, and K. Kavukcuoglu, “Exploiting cyclic
1174
+ symmetry in convolutional neural networks,” 2016. [Online]. Available:
1175
+ https://arxiv.org/abs/1602.02660
1176
+
1177
+ 9
1178
+ [33] F. Chollet, “Transfer learning & fine-tuning,” https://keras.io/guides/
1179
+ transfer learning/, 2020.
1180
+ [34] A. Sakti, A. Botterud, and F. O’Sullivan, “Review of wholesale markets
1181
+ and regulations for advanced energy storage services in the united states:
1182
+ Current status and path forward,” Energy policy, vol. 120, pp. 569–579,
1183
+ 2018.
1184
+ [35] N. Zheng, X. Qin, D. Wu, G. Murtaugh, and B. Xu, “Energy storage
1185
+ state-of-charge market model,” arXiv preprint arXiv:2207.07221, 2022.
1186
+ [36] C.
1187
+ Chen
1188
+ and
1189
+ L.
1190
+ Tong,
1191
+ “Convexifying
1192
+ market
1193
+ clearing
1194
+ of
1195
+ soc-
1196
+ dependent bids from merchant storage participants,” arXiv preprint
1197
+ arXiv:2209.02107, 2022.
1198
+ [37] N. Zheng, J. J. Jaworski, and B. Xu, “Arbitraging variable efficiency
1199
+ energy storage using analytical stochastic dynamic programming,” IEEE
1200
+ Transactions on Power Systems, 2022.
1201
+ [38] “Energy
1202
+ market
1203
+ &
1204
+ operational
1205
+ data.”
1206
+ [Online].
1207
+ Available:
1208
+ https:
1209
+ //www.nyiso.com/energy-market-operational-data
1210
+ [39] “Engineering precinct battery.” [Online]. Available: http://dashboards.
1211
+ sustainability.uq.edu.au/engineering-precinct-battery/interactive/#/
1212
+ [40] D. B. Patton, P. LeeVanSchaick, J. Chen, and M. M. Unit, “2014 state
1213
+ of the market report for the new york iso markets,” Potomac Economics,
1214
+ 2016.
1215
+ [41] M. Waite and V. Modi, “Electricity load implications of space heating
1216
+ decarbonization pathways,” Joule, vol. 4, no. 2, pp. 376–394, 2020.
1217
+ [42] N. Zheng, X. Liu, B. Xu, and Y. Shi, “Energy storage price ar-
1218
+ bitrage via opportunity value function prediction,” arXiv preprint
1219
+ arXiv:2211.07797, 2022.
1220
+ APPENDIX
1221
+ A. Dynamic Programming Solution Algorithm
1222
+ We first solve the dynamic programming problem as listed
1223
+ in (2c) subject to constraints (1b)–(1e). We use results from our
1224
+ prior work [29] to solve the dynamic programming problem
1225
+ (2c) and obtain the full piece-wise linear approximation of the
1226
+ opportunity value function Qt for all time periods (i.e., one
1227
+ value function for each time step for an entire year, 105120
1228
+ for 5 min price resolution 35040 for 20 min price resolution).
1229
+ We start by defining qt as the derivative of storage opportunity
1230
+ value function Qt, which represents the marginal opportunity
1231
+ value of energy stored in the storage. Then we can use an
1232
+ analytical formulation to calculate the opportunity value qt(e)
1233
+ at any given energy storage SoC level.
1234
+ Our prior work proved qt−1 can be recursively calculated
1235
+ with next period value function qt, power rating P, and effi-
1236
+ ciency η. The value function calculated using the deterministic
1237
+ formulation is thus
1238
+ qt−1(e) =
1239
+
1240
+
1241
+
1242
+
1243
+
1244
+
1245
+
1246
+
1247
+
1248
+
1249
+
1250
+
1251
+
1252
+
1253
+
1254
+
1255
+
1256
+
1257
+
1258
+ qt(e + Pη)
1259
+ if λt ≤ qt(e + Pη)η
1260
+ λt/η
1261
+ if qt(e + Pη)η < λt ≤ qt(e)η
1262
+ qt(e)
1263
+ if qt(e)η < λt ≤ [qt(e)/η + c]+
1264
+ (λt − c)η
1265
+ if [qt(e)/η + c]+ < λt
1266
+ ≤ [qt(e − P/η)/η + c]+
1267
+ qt(e − P/η)
1268
+ if λt > [qt(e − P/η)/η + c]+
1269
+ (3)
1270
+ and calculates the opportunity value function assuming the
1271
+ price follows a recursive computation framework. This deter-
1272
+ ministic formulation is what we will use in our investigation,
1273
+ and from this we are able to calculate opportunity value
1274
+ function qt(e) at any time step using backwards recursion by
1275
+ defining an end period value function qT . We then discretize qt
1276
+ by splitting the energy storage SoC level e into small equally
1277
+ spaced segments, which must be far smaller than power rating
1278
+ P. For any SoC level et, we can find the nearest segment and
1279
+ return the corresponding value.
1280
+ B. Bid generation
1281
+ We now design discharge and charge bids using the oppor-
1282
+ tunity valuation results based on our prior work [30], [35]. We
1283
+ consider generating time-varying SoC-dependent bids with a
1284
+ total number of J segments for charge bids Bt,j and discharge
1285
+ bids Ct,j. Note that these bids represent the combination of
1286
+ the discharge cost and the change in the opportunity value. We
1287
+ assume each bid segment j is associated with an SoC range
1288
+ Ej−1 to Ej. The discharge bids are thus calculated based on
1289
+ the average value function between the internal Ej−1 and Ej
1290
+ Ct,j = 1
1291
+ E
1292
+ � Ej
1293
+ Ej−1
1294
+
1295
+ ∂pt
1296
+ (cpt − Qt(et−1 − pt/η + btη))det−1
1297
+ = c + 1
1298
+ E
1299
+ � Ej
1300
+ Ej−1
1301
+ qt(et−1 − pt/η + btη)det−1/η
1302
+ ≈ c + 1
1303
+ ηE
1304
+ � Ej
1305
+ Ej−1
1306
+ qt(e)de
1307
+ Similarly for charge bids
1308
+ Bt,j = 1
1309
+ E
1310
+ � Ej
1311
+ Ej−1
1312
+
1313
+ ∂bt
1314
+ (cpt − Qt(et−1 − pt/η + btη))det−1
1315
+ = 1
1316
+ E
1317
+ � Ej
1318
+ Ej−1
1319
+ qt(et−1 − pt/η + btη)det−1η
1320
+ ≈ η
1321
+ E
1322
+ � Ej
1323
+ Ej−1
1324
+ qt(e)de
1325
+ In the special case of one segment, i.e., bids are not
1326
+ dependent on SoC (the current energy storage bidding model
1327
+ in most wholesale markets), Ej−1 is zero or the lowest allowed
1328
+ SoC and Ej is the highest allowed SoC value or the energy
1329
+ capacity. In this case the bids are simply based on the average
1330
+ marginal opportunity value ¯qt
1331
+ ¯qt = 1
1332
+ E
1333
+ � E
1334
+ 0
1335
+ qt(e)de
1336
+ (4)
1337
+ and the discharge bid is c + ¯qt/η, and the charge bid is ¯qtη.
1338
+ C. Real-time market clearing and arbitrage simulation
1339
+ We consider the following simplified real-time market clear-
1340
+ ing model with a generalized multi-segment energy storage
1341
+ bids
1342
+ min
1343
+ pt,j,s,dt,j,s
1344
+ Jt(gt) +
1345
+
1346
+ s
1347
+
1348
+ j
1349
+ (Ct,j,sdt,j,s − Bt,j,sbt,j,s) (5a)
1350
+ s.t.
1351
+ et,j,s − et−1,j,s = bt,j,sη − pt,j,s/η
1352
+ (5b)
1353
+ 0 ≤ et,j,s ≤ Ej,s − Ej−1,s
1354
+ (5c)
1355
+ gt +
1356
+
1357
+ s
1358
+
1359
+ j
1360
+ pt,j,s = Dt +
1361
+
1362
+ s
1363
+
1364
+ j
1365
+ bt,j,s : λt
1366
+ (5d)
1367
+ where (5a) is the objective function minimizing total bidding
1368
+ costs. Note that we use aggregated generator supply curve
1369
+ Jt(gt) and total generation gt instead of modeling the bids
1370
+ from each individual generator for simplicity to focus on
1371
+ energy storage. The second term of the objective is the
1372
+ discharge bids and charge bids for each energy storage s and
1373
+
1374
+ 10
1375
+ each SoC segment j. (5b) models the SoC evolution under
1376
+ single-trip efficiency η for each SoC segment. (5c) models
1377
+ the upper and lower energy limit for each SoC segment, note
1378
+ that the minimum energy is always zero while the maximum
1379
+ energy for each segment is the difference between the upper
1380
+ and lower SoC range Ej,s − Ej−1,s. Finally, (5d) is the
1381
+ power balance constraint enforcing the sum of generation and
1382
+ storage charge/discharge equals to the total demand Dt over
1383
+ time period t, the associated dual variable is thus the market
1384
+ clearing price λt.
1385
+ Now in price-taker analysis, we use historical price data to
1386
+ simulate how the energy storage would have been cleared in
1387
+ the market. In this case, we perform a Lagrangian relaxation
1388
+ of (5d) and move it to the objective. This decomposes the
1389
+ optimization into independent sub-problems for each energy
1390
+ storage, and for each storage, the price-taker market clearing
1391
+ problem is equivalent to the following price arbitrage problem
1392
+ max
1393
+ pt,j,dt,j λt
1394
+
1395
+ j
1396
+ (dt,j − bt,j) −
1397
+
1398
+ j
1399
+ (Ct,jdt,j − Bt,jbt,j)
1400
+ (6)
1401
+ subject to the same storage unit constraints (5b) and (5c).
1402
+ Note that for this problem we omit the storage unit index
1403
+ s as the problem formulation is the same for each storage.
1404
+ Hence, price-taker market clearing simulation is equivalent to
1405
+ arbitrage using the same bidding cost model. While we did
1406
+ not consider the network model in this formulation, the price-
1407
+ taker market clearing model is the same should we use nodal
1408
+ prices.
1409
+ Note that the formulation in (6) applies to both price-taker
1410
+ market bidding (HA-1 and HA-10) and price response (PR-1
1411
+ and PR-10). The difference is that in HA cases, storage has
1412
+ to decide the bids (Ct,j and Bt,j) one hour before the market
1413
+ clearing period t, while in PR cases storage updates bids at
1414
+ the same time when observing the price.
1415
+
9NAzT4oBgHgl3EQfSft5/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
9NFRT4oBgHgl3EQfqTc6/content/tmp_files/2301.13616v1.pdf.txt ADDED
@@ -0,0 +1,1725 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Anti-Exploration by Random Network Distillation
2
+ Alexander Nikulin 1 Vladislav Kurenkov 1 Denis Tarasov 1 Sergey Kolesnikov 1
3
+ Abstract
4
+ Despite the success of Random Network Distilla-
5
+ tion (RND) in various domains, it was shown as
6
+ not discriminative enough to be used as an uncer-
7
+ tainty estimator for penalizing out-of-distribution
8
+ actions in offline reinforcement learning. In this
9
+ paper, we revisit these results and show that, with
10
+ a naive choice of conditioning for the RND prior,
11
+ it becomes infeasible for the actor to effectively
12
+ minimize the anti-exploration bonus and discrim-
13
+ inativity is not an issue. We show that this lim-
14
+ itation can be avoided with conditioning based
15
+ on Feature-wise Linear Modulation (FiLM), re-
16
+ sulting in a simple and efficient ensemble-free
17
+ algorithm based on Soft Actor-Critic. We eval-
18
+ uate it on the D4RL benchmark, showing that it
19
+ is capable of achieving performance comparable
20
+ to ensemble-based methods and outperforming
21
+ ensemble-free approaches by a wide margin. 1
22
+ 1. Introduction
23
+ In recent years, significant success has been achieved in ap-
24
+ plying Reinforcement Learning (RL) to challenging and
25
+ large-scale tasks such as Atari (Badia et al., 2020), Go
26
+ (Schrittwieser et al., 2020), Dota 2 (Berner et al., 2019),
27
+ and Minecraft (Baker et al., 2022). However, the online na-
28
+ ture of such RL algorithms makes it difficult to apply them
29
+ in the real world, where online collection of large amounts
30
+ of exploratory data may not be feasible for safety or fi-
31
+ nancial reasons. Offline Reinforcement Learning (Levine
32
+ et al., 2020) promises a more controllable and data-driven
33
+ approach, focusing on algorithms that can learn from a fixed,
34
+ pre-recorded dataset without requiring additional environ-
35
+ ment interactions.
36
+ The use of ensembles for uncertainty-based penalization has
37
+ proven to be one of the most effective approaches for offline
38
+ RL. Ensemble-based algorithms, such as SAC-N, EDAC
39
+ (An et al., 2021), and MSG (Ghasemipour et al., 2022)
40
+ 1Tinkoff, Moscow, Russia. Correspondence to: Alexander
41
+ Nikulin <[email protected]>.
42
+ 1Our implementation is available at https://github.
43
+ com/tinkoff-ai/sac-rnd
44
+ 0.0
45
+ 0.5
46
+ 1.0
47
+ 1.5
48
+ 2.0
49
+ 2.5
50
+ 3.0
51
+ training steps
52
+ 1e6
53
+ 0
54
+ 20
55
+ 40
56
+ 60
57
+ 80
58
+ 100
59
+ average normalized score
60
+ CQL (ensemble-free)
61
+ SAC-N (ensemble-based)
62
+ SAC-RND (Naive)
63
+ SAC-RND (Ours)
64
+ Figure 1. Mean performance of SAC-RND variants on walker and
65
+ hopper medium-* datasets, each averaged over 3 seeds. We plot
66
+ performance for the naive version, which uses concatenation con-
67
+ ditioning, and our final version, which is described in Section 5.
68
+ We also plot the final scores for the ensemble-free CQL (Kumar
69
+ et al., 2020) and the ensemble-based SAC-N (An et al., 2021). It
70
+ can be seen that our version is a significant improvement over the
71
+ naive version, achieving performance comparable to ensembles.
72
+ currently achieve state-of-the-art results on most D4RL (Fu
73
+ et al., 2020) datasets, outperforming ensemble-free methods
74
+ by a wide margin. Unfortunately, in order to achieve the best
75
+ performance, these algorithms may require tens or hundreds
76
+ of ensemble members, leading to significant computational
77
+ and memory overhead, as well as extended training duration
78
+ (Nikulin et al., 2022).
79
+ Recent research (Yang et al., 2022) has successfully reduced
80
+ the ensemble size to tens of Q-networks in the worst-case
81
+ scenarios. However, given the general trend for model scal-
82
+ ing in offline RL (Kumar et al., 2022; Reed et al., 2022; Lee
83
+ et al., 2022), efficiently training even ten Q-networks with
84
+ 80 million parameters each is not feasible. Furthermore,
85
+ Ghasemipour et al. (2022) showed that methods for efficient
86
+ ensemble training found in supervised learning literature
87
+ do not deliver performance comparable to naive ensembles
88
+ and can even worsen the results. Thus, further research
89
+ on efficient uncertainty estimation for offline RL is needed,
90
+ with the goal of reducing the size of the ensemble as much
91
+ as possible or even fully removing it.
92
+ In this work, we move away from ensembles and take an
93
+ alternative approach to uncertainty estimation, proposing an
94
+ arXiv:2301.13616v1 [cs.LG] 31 Jan 2023
95
+
96
+ Anti-Exploration by Random Network Distillation
97
+ efficient offline RL method with ensemble-free uncertainty
98
+ estimation via Random Network Distillation (RND) (Burda
99
+ et al., 2018). RND, a simple and fast ensemble competitor
100
+ for epistemic uncertainty estimation (Ciosek et al., 2019),
101
+ is an attractive choice for offline RL. However, previous
102
+ research (Rezaeifar et al., 2022) found RND to be insuffi-
103
+ ciently discriminative for good results.
104
+ In our preliminary experiment (Section 3), we show that
105
+ RND is discriminative enough to detect OOD actions, which
106
+ contradicts the previous study (Rezaeifar et al., 2022). Nev-
107
+ ertheless, our results show that the naive application of RND
108
+ does indeed not lead to good results (see Figure 1). Building
109
+ upon these findings, we further simplify the problem and
110
+ analyze the reasons for this issue (Section 4). We discover
111
+ that a naive choice of conditioning for the RND prior can
112
+ hinder the minimization of the anti-exploration bonus by
113
+ the actor, and that conditioning based on Feature-wise Lin-
114
+ ear Modulation (FiLM) (Perez et al., 2018) is particularly
115
+ effective in solving this problem.
116
+ Based on our findings, we propose a new ensemble-free of-
117
+ fline RL algorithm called SAC-RND (Section 5). We eval-
118
+ uate our method on the D4RL (Fu et al., 2020) benchmark
119
+ (Section 6), and show that SAC-RND achieves performance
120
+ comparable to ensemble-based methods while outperform-
121
+ ing ensemble-free approaches.
122
+ 2. Background
123
+ Offline Reinforcement Learning. Reinforcement learning
124
+ problem can be described as a Markov Decision Process
125
+ (MDP) defined by the {S, A, P, R, γ} tuple with state space
126
+ S ⊂ RN, action space A ⊂ RM, transition dynamics P :
127
+ S × A → S, reward function R : S × A → R, and a
128
+ discount factor γ. The goal of reinforcement learning in
129
+ an infinite horizon setting is to produce a policy π(a|s)
130
+ that maximizes the expected cumulative discounted return
131
+ Eπ[�∞
132
+ t=0 γtr(st, at)].
133
+ In offline reinforcement learning, a policy must be learned
134
+ from a fixed dataset D collected under a different policy or
135
+ mixture of policies, without any environment interaction.
136
+ This setting poses unique fundamental challenges (Levine
137
+ et al., 2020), since the learning policy is unable to explore
138
+ and has to deal with distributional shift and extrapolation
139
+ errors (Fujimoto et al., 2019) for actions not represented in
140
+ the training dataset.
141
+ Offline RL as Anti-Exploration. There are numerous ap-
142
+ proaches for offline RL, a substantial part of which constrain
143
+ the learned policy to stay within the support of the train-
144
+ ing dataset, thus reducing (Kumar et al., 2020) or avoiding
145
+ (Kostrikov et al., 2021) extrapolation errors. For our work,
146
+ it is essential to understand how such a constraint can be
147
+ framed as anti-exploration (Rezaeifar et al., 2022).
148
+ Similarly to online RL, where novelty bonuses are used as
149
+ additive intrinsic rewards for effective exploration, in offline
150
+ RL, novelty bonuses can induce conservatism, reducing the
151
+ reward in unseen state-action pairs. Hence the name anti-
152
+ exploration, since the same approaches from exploration
153
+ can be used, but a bonus is subtracted from the extrinsic
154
+ reward instead of being added to it.
155
+ However, unlike online RL, subtracting a bonus from the
156
+ raw reward would not be as useful, since the novelty bonus
157
+ is, by design, close to zero for in-dataset state-action pairs.
158
+ Therefore, it is more effective to apply it where the overesti-
159
+ mation for OOD actions emerges — the temporal difference
160
+ learning target:
161
+ r + γEa′∼π(·|s′)[Q(s′, a′) − b(s′, a′)]
162
+ (1)
163
+ where the actor is trained to maximize the expected Q-value,
164
+ as is usually done in off-policy actor-critic algorithms (Lil-
165
+ licrap et al., 2015; Haarnoja et al., 2018). It can be shown
166
+ that, theoretically, these approaches are equivalent, but the
167
+ latter is more suited for use in offline RL (Rezaeifar et al.,
168
+ 2022).
169
+ An illustrative example of how such framing can be effective
170
+ are ensemble-based approaches such as SAC-N & EDAC
171
+ (An et al., 2021) and MSG (Ghasemipour et al., 2022),
172
+ which currently outperform their ensemble-free counterparts
173
+ by a large margin on most D4RL (Fu et al., 2020) benchmark
174
+ datasets. For the anti-exploration bonus, these methods use
175
+ ensemble disagreement as a proxy for epistemic uncertainty.
176
+ However, a large number of ensemble members is usually
177
+ required for a competitive result.
178
+ Random Network Distillation. Random network distilla-
179
+ tion (RND) was first proposed in online RL (Burda et al.,
180
+ 2018) as a simple and effective exploration bonus. To this
181
+ day, RND is still considered a strong baseline for explo-
182
+ ration that can work well even in stochastic environments,
183
+ contrary to some more modern approaches (Jarrett et al.,
184
+ 2022).
185
+ RND consists of two neural networks: a fixed and randomly
186
+ initialized prior network ¯f ¯
187
+ ψ, and a predictor network fψ
188
+ which learns to predict the prior outputs on the training data:
189
+ ∥fψ(s) − ¯f ¯
190
+ ψ(s)∥2
191
+ 2
192
+ (2)
193
+ Both networks map states to embeddings in RK, and the
194
+ gradient through prior is disabled. The interpretation of
195
+ the novelty is straightforward: with the sufficiently diverse
196
+ prior, the predictor must learn to match embeddings on data
197
+ points similar to the training dataset, while failing to predict
198
+ on new examples. A bonus in such a case may simply be a
199
+ prediction error, as in Equation (2).
200
+
201
+ Anti-Exploration by Random Network Distillation
202
+ In a subsequent work, Ciosek et al. (2019) analyses the
203
+ success of RND in a supervised setting, and shows that
204
+ fitting random priors can be a competitive alternative to
205
+ ensembles for estimating epistemic uncertainty.
206
+ Note that in practice, the choice of predictor and prior having
207
+ the same architecture and the estimation of novelty from
208
+ states only are very common, but arbitrary. Moreover, for
209
+ offline RL, we are interested in estimating the novelty of an
210
+ action conditioned on the state, which is why in our work
211
+ RND depends on both: fψ(s, a).
212
+ Multiplicative Interactions. The most common way to
213
+ fuse two different streams of information is feature con-
214
+ catenation, which is straightforward but can be suboptimal
215
+ (Dumoulin et al., 2018). Jayakumar et al. (2020) shows that
216
+ multiplicative interactions provide a powerful inductive bias
217
+ for fusing or conditioning from multiple streams and are
218
+ superior in practice. We provide a brief review of those used
219
+ in our work (excluding concatenation): gating, bilinear, and
220
+ feature-wise linear modulation (FiLM).
221
+ Gating. Simple conditioning with two linear layers and
222
+ pointwise multiplication of the resulting features (Srivastava
223
+ et al., 2019).
224
+ f(a, s) = tanh(W1a + b1) ⊙ σ(W2s + b2)
225
+ Bilinear. Bilinear layer in its most general form, as pro-
226
+ posed by Jayakumar et al. (2020).
227
+ f(a, s) = sT Wa + sT U + Va + b
228
+ where W is a 3D tensor, U, V are regular matrices and b
229
+ is a vector. However, in our work, we also use the imple-
230
+ mentation as in PyTorch, which does not learn U, V by
231
+ default.
232
+ FiLM. Special case of a bilinear layer with low-rank weight
233
+ matrices (Perez et al., 2018).
234
+ f(h, s) = γ(s) ⊙ h + β(s)
235
+ Usually, FiLM operates on hidden activations h before non-
236
+ linearity between layers. Thus, the main network takes a as
237
+ an input.
238
+ 3. Random Network Distillation is
239
+ Discriminative Enough
240
+ To better understand the possible difficulties of applying
241
+ RND to offline RL, we first reproduce the main experiment
242
+ from Rezaeifar et al. (2022), which showed that RND is not
243
+ discriminative enough to be used as a novelty bonus. For
244
+ convenience, we provide the original figure from Rezaeifar
245
+ et al. (2022) in the Appendix A. We also compare RND with
246
+ 0.0
247
+ 2.5
248
+ 5.0
249
+ 7.5
250
+ 10.0
251
+ standard deviation
252
+ 0
253
+ 20000
254
+ 40000
255
+ Q-Ensemble
256
+ 0.0
257
+ 2.5
258
+ 5.0
259
+ 7.5
260
+ 10.0
261
+ prediction error
262
+ RND
263
+ dataset
264
+ uniform
265
+ dataset + noise (std .25)
266
+ dataset + noise (std .5)
267
+ Figure 2. Anti-exploration bonus (Rezaeifar et al., 2022) on the
268
+ walker2d-medium dataset for trained SAC-N (An et al., 2021),
269
+ Q-ensemble (N = 25) and RND. Bonus is computed for state-
270
+ action pairs from the original dataset and different perturbations of
271
+ actions: random actions, dataset actions to which Gaussian noise
272
+ is added with different scales. Both RND networks use simple
273
+ state-action concatenation. The result is strikingly different from a
274
+ similar figure in the Rezaeifar et al. (2022) (we provide the original
275
+ figure in the Appendix A for convenience). Contrary to previous
276
+ research, it can be seen that RND is capable of distinguishing ID
277
+ from OOD actions and is comparable to a trained Q-ensemble.
278
+ a trained Q-ensemble (N = 25) from the SAC-N algorithm
279
+ (An et al., 2021). Similarly to Rezaeifar et al. (2022), we
280
+ use simple state-action concatenation. Predictor and prior
281
+ share the identical architecture of 4-layer MLPs.
282
+ The goal of the experiment (see Figure 2) is to visually
283
+ plot the anti-exploration bonus for ID state-action pairs
284
+ and different perturbations of actions to model OOD data:
285
+ random actions sampled from a uniform distribution and
286
+ dataset actions to which Gaussian noise with different scales
287
+ is added.
288
+ To our surprise, the result on Figure 2 is strikingly different
289
+ from previous work. It shows that RND is able to discrim-
290
+ inate between ID and OOD actions with varying degrees
291
+ of distributional shift and is comparable to a trained Q-
292
+ ensemble. In contrast, Rezaeifar et al. (2022) hypothesizes
293
+ that RND can only work well out of the box for discrete ac-
294
+ tion spaces and visual features, and concludes that extending
295
+ it to continuous action spaces is not straightforward.
296
+ After further investigation of the open-sourced codebase2 in
297
+ search of discrepancies with our implementation, we found
298
+ that the only difference is that, contrary to the advice of
299
+ Ciosek et al. (2019), Rezaeifar et al. (2022) sets the predictor
300
+ smaller than prior by two layers during RND pretraining. It
301
+ is important to make the predictor larger or comparable in
302
+ capacity to the prior so that it can minimize the loss to zero
303
+ on the training dataset (Ciosek et al., 2019). However, the
304
+ actual RND hyperparameters used in the final publication
305
+ were not listed, so we cannot draw a definitive conclusion
306
+ about the reason for such different results.
307
+ 2https://github.com/shidilrzf/Anti-exploration-RL
308
+
309
+ Anti-Exploration by Random Network Distillation
310
+ 4. Concatenation Prior Hinders Bonus
311
+ Minimization
312
+ A well-behaved anti-exploration bonus for continuous action
313
+ spaces, be it RND or any other, should satisfy at least two
314
+ criteria. First, it should be discriminative enough to detect
315
+ novel actions and downweight their value estimates (see
316
+ Equation (1)). Ideally, the bonus should be close to zero for
317
+ ID data so that we do not bias the Q-function, as this can
318
+ be detrimental to training. Second, it should allow the actor
319
+ to easily minimize the bonus with gradient descent during
320
+ training.
321
+ In Section 3, we showed that RND can detect OOD ac-
322
+ tions. Nevertheless, naive use of RND as an anti-exploration
323
+ bonus on top of the Soft Actor Critic algorithm (Haarnoja
324
+ et al., 2018) still does not provide satisfactory performance
325
+ (see Figure 1) with scores lower than CQL (Kumar et al.,
326
+ 2020) and SAC-N (An et al., 2021). This gives us an hint
327
+ that the problem may not be the discriminative power of
328
+ RND, but that the actor cannot effectively minimize the
329
+ anti-exploration bonus during training.
330
+ To test our hypothesis that the actor cannot effectively min-
331
+ imize the anti-exploration bonus, we further simplify the
332
+ problem by removing the critic from the SAC algorithm
333
+ but keeping the entropy bonus (see Algorithm 2 in the Ap-
334
+ pendix). We expect that, in such a setting, the actor will be
335
+ able to successfully minimize the anti-exploration bonus to
336
+ the possible minimum, i.e. comparable to the bonus for the
337
+ ground truth data at the end of the RND pretraining. As a
338
+ consequence, since dataset actions provide the minimum
339
+ bonus by design, we also expect that the distance from the
340
+ agent to dataset actions should be small.
341
+ We set predictor architecture to state-action concatenation.
342
+ Additionally, we explore different conditioning schemes for
343
+ the prior. We use the halfcheetah, walker2d and hopper
344
+ medium datasets, with 3 seeds each. Figure 3 compares
345
+ the anti-exploration bonus for dataset actions during RND
346
+ pretraining (see Figure 3a) and for agent actions during
347
+ training (see Figure 3b).
348
+ As one can see for all prior architectures except one, the
349
+ anti-exploration bonus during actor training is much higher
350
+ than it should be according to the values on the dataset
351
+ actions. These results confirm our hypothesis. Furthermore,
352
+ we can note from Figure 3c that the actor cannot clone the
353
+ behavioral policy, since the distance to the dataset actions
354
+ can even increase during training.
355
+ However, RND with the FiLM prior architecture allows the
356
+ actor to effectively minimize the anti-exploration bonus and
357
+ successfully clone the behavioral policy. This suggests that,
358
+ with the right inductive bias for the prior, we can solve the
359
+ problems of naive RND and possibly achieve better results.
360
+ Table 1. Comparison of different RND predictors. Prior uses FiLM
361
+ conditioning. Predictor uses conditioning in the first layer. All
362
+ scores are averaged over 3 random seeds. Halfcheetah tasks are
363
+ ommited, as we found them non-representative of the final perfor-
364
+ mance on harder tasks.
365
+ Task Name
366
+ Concat
367
+ Gating
368
+ Bilinear
369
+ FiLM
370
+ hopper-medium-v2
371
+ 94.8
372
+ 39.7
373
+ 98.4
374
+ 86.3
375
+ hopper-medium-expert-v2
376
+ 71.5
377
+ 59.3
378
+ 110.3
379
+ 102.7
380
+ hopper-medium-replay-v2
381
+ 100.3
382
+ 51.3
383
+ 100.8
384
+ 100.3
385
+ walker2d-medium-v2
386
+ 94.8
387
+ 82.3
388
+ 92.8
389
+ 95.1
390
+ walker2d-medium-expert-v2
391
+ 86.1
392
+ 84.2
393
+ 108.9
394
+ 110.0
395
+ walker2d-medium-replay-v2
396
+ 90.3
397
+ 87.5
398
+ 88.3
399
+ 75.7
400
+ Average
401
+ 89.6
402
+ 67.3
403
+ 99.9
404
+ 95.0
405
+ 5. Anti-Exploration by Random Network
406
+ Distillation
407
+ We are now ready to present SAC-RND: a new offline RL
408
+ method for continuous action spaces, based on our findings
409
+ in Section 3 and Section 4. It is simple, ensemble-free and
410
+ achieves state-of-the-art results comparable to ensemble-
411
+ based methods.
412
+ We have chosen the Soft Actor-Critic
413
+ (Haarnoja et al., 2018) algorithm as the backbone of the
414
+ method. In this section, we will explain how the RND is
415
+ trained and how we define the anti-exploration bonus.
416
+ Random Network Distillation. We pretrain RND with
417
+ MSE loss between prior and predictor embeddings, stop-
418
+ ping gradient through prior and freezing both networks after-
419
+ wards during SAC training. We keep both networks similar
420
+ in size to the agent and critic, which are 4 layer MLPs. Con-
421
+ trary to Burda et al. (2018); Ciosek et al. (2019), we do not
422
+ add additional layers to the predictor to prevent undesirable
423
+ results. This is because, when the predictor size is bigger
424
+ than prior on state-based tasks (not image-based as in orig-
425
+ inal work by Burda et al. (2018)), we observe that it can
426
+ sometimes overgeneralize to OOD prior embeddings.
427
+ According to Section 4, for the prior, we use FiLM condi-
428
+ tioning on penultimate layer before nonlinearity. In prin-
429
+ ciple, the predictor can be arbitrary (Ciosek et al., 2019),
430
+ but in practice, its architecture and conditioning type can
431
+ also affect performance. We conduct a preliminary study
432
+ on a small subset of the D4RL Gym tasks to select the best-
433
+ performing conditioning. Based on the results in Table 1,
434
+ we chose a predictor with bilinear conditioning in the first
435
+ layer, as it showed the best performance.
436
+ Anti-Exploration Bonus. We define the anti-exploration
437
+ bonus similarly to RND loss as
438
+ b(s, a) = ∥fψ(s, a) − ¯f ¯
439
+ ψ(s, a)∥2
440
+ 2
441
+ (3)
442
+ and additionally divide it by RND loss running standard
443
+ deviation (which is tracked during pretraining phase) to
444
+ increase its scale uniformly among environments. Such
445
+
446
+ Anti-Exploration by Random Network Distillation
447
+ 0
448
+ 50000
449
+ 100000
450
+ 150000
451
+ 200000
452
+ 250000
453
+ 300000
454
+ 350000
455
+ training steps
456
+ 0.0
457
+ 0.1
458
+ 0.2
459
+ 0.3
460
+ 0.4
461
+ 0.5
462
+ 0.6
463
+ 0.7
464
+ RND bonus
465
+ Concat
466
+ FiLM
467
+ Bilinear
468
+ Gated
469
+ (a) RND bonus for dataset actions
470
+ 0
471
+ 20000
472
+ 40000
473
+ 60000
474
+ 80000
475
+ 100000
476
+ training steps
477
+ 0
478
+ 1
479
+ 2
480
+ 3
481
+ 4
482
+ 5
483
+ 6
484
+ 7
485
+ RND bonus
486
+ Concat
487
+ FiLM
488
+ Bilinear
489
+ Gated
490
+ (b) RND bonus for actor actions
491
+ 0
492
+ 20000
493
+ 40000
494
+ 60000
495
+ 80000
496
+ 100000
497
+ training steps
498
+ 0.2
499
+ 0.4
500
+ 0.6
501
+ 0.8
502
+ 1.0
503
+ 1.2
504
+ mean squared error
505
+ Concat
506
+ FiLM
507
+ Bilinear
508
+ Gating
509
+ (c) Distance to dataset actions
510
+ Figure 3. Effect of different state-action conditioning in the prior of RND on actor training. We use the halfcheetah, walker2d and hopper
511
+ medium datasets, with 3 seeds each. For training procedure, see Algorithm 2 in the Appendix. (a) Anti-exploration bonus for in-dataset
512
+ actions during RND pretraining. We additionally divide the bonus by the RND loss running standard deviation to increase its scale
513
+ (see Section 5) so the anti-exploration bonus increases slightly over time as standard deviation decreases. However, this does not affect
514
+ minimization by the actor and is needed to highlight the differences. (b) Anti-exploration bonus for actor actions during training. Ideally,
515
+ it should converge to values close to the final values in (a). (c) Distance of actor actions to true in-dataset actions during training. Ideally,
516
+ it should decrease, as actions closer to the behavioral policy have the lowest bonus by design.
517
+ scaling simplifies hyperparameter search, shrinking the pos-
518
+ sible range of useful α coefficients that control the level of
519
+ conservatism during training.
520
+ For detailed training procedure and full SAC losses, we
521
+ refer to Algorithm 1 in the Appendix (differences with the
522
+ original SAC algorithm are highlighted in blue).
523
+ 6. Experiments
524
+ In this section, we present an empirical evaluation of our
525
+ method using the D4RL benchmark on the Gym domain
526
+ (Section 6.1) and the more challenging AntMaze domain
527
+ (Section 6.2). Next, we provide additional analysis and
528
+ visual insight into why FiLM conditioning in the prior might
529
+ be beneficial (Section 6.3). Finally, we present an ablation
530
+ that compares more variations of conditioning for predictor
531
+ and prior (Section 6.4). For each experiment, we also list the
532
+ exact hyperparameters in Appendix D and implementation
533
+ details in Appendix C. Additionally, we analyse sensitivity
534
+ to hyperparameters in Appendix E.
535
+ 6.1. Evaluation on the Gym Domain
536
+ Setup. We evaluate our method on all available datasets for
537
+ the HalfCheetah, Walker2d and Hopper tasks in the Gym do-
538
+ main of the D4RL benchmark. For ensemble-free baselines,
539
+ we chose CQL (Kumar et al., 2020), IQL (Kostrikov et al.,
540
+ 2021), TD3+BC (Fujimoto & Gu, 2021), which show good
541
+ results and are widely used in practice. For ensemble-based
542
+ baselines, we chose SAC-N & EDAC (An et al., 2021) and
543
+ the more recent RORL (Yang et al., 2022), which currently
544
+ achieve state-of-the-art scores in this domain. We follow the
545
+ An et al. (2021) and train for 3M gradient steps, evaluating
546
+ on 10 episodes.
547
+ Results. The resulting scores are presented in Table 2. We
548
+ see that SAC-RND stands out from the ensemble-free meth-
549
+ ods and outperforms them by a wide margin, achieving a
550
+ mean score comparable to EDAC and only slightly behind
551
+ RORL. Note that we do not use ensembles, whereas SAC-N
552
+ can require up to 500 critics, EDAC up to 50 and RORL up
553
+ to 20. In addition, we compare our proposed changes with
554
+ the naive predictor and prior, confirming that our modifi-
555
+ cations are essential for achieving good performance (see
556
+ Figure 1).
557
+ 6.2. Evaluation on the AntMaze Domain
558
+ Setup. We evaluate our method on all datasets available for
559
+ the AntMaze domain of the D4RL benchmark. Ensemble-
560
+ free baselines are the same as in Section 6.1. For ensemble-
561
+ based baselines, we chose RORL (Yang et al., 2022) and
562
+ MSG (Ghasemipour et al., 2022), the latter of which, to
563
+ our knowledge, currently has the best mean score for this
564
+ domain. We do not include SAC-N and EDAC, as there are
565
+ no public results for them on this domain, and we were also
566
+ unable to obtain a non-zero result. We follow the An et al.
567
+ (2021) and train for 3M gradient steps, evaluating on 100
568
+ episodes.
569
+ Results. The resulting scores are presented in Table 3.
570
+ Kostrikov et al. (2021) has shown that many offline RL
571
+ methods that perform well on the Gym domain fail on the
572
+ AntMaze domain. It can be seen that, on the AntMaze do-
573
+ main, SAC-RND shows good results that are on par with
574
+ ensembles, and outperforms ensemble-free methods. This
575
+ also shows that our choice of predictor and prior generalises
576
+ well to new domains. Note that, in addition to ensembles,
577
+ both MSG and RORL require pre-training or supervision
578
+ with behavioural cloning in order to achieve reported results,
579
+
580
+ Anti-Exploration by Random Network Distillation
581
+ Table 2. SAC-RND evaluation on the Gym domain. We report the final normalized score averaged over 4 random seeds on v2 datasets.
582
+ TD3 + BC and IQL scores are taken from Lyu et al. (2022). CQL, SAC-N and EDAC scores are taken from An et al. (2021). RORL
583
+ scores are taken from Yang et al. (2022).
584
+ Ensemble-free
585
+ Ensemble-based
586
+ Task Name
587
+ TD3+BC
588
+ IQL
589
+ CQL
590
+ SAC-N
591
+ EDAC
592
+ RORL
593
+ SAC-RND
594
+ halfcheetah-random
595
+ 11.0 ± 1.1
596
+ 13.1 ± 1.3
597
+ 31.1 ± 3.5
598
+ 28.0 ± 0.9
599
+ 28.4 ± 1.0
600
+ 28.5 ± 0.8
601
+ 29.0 ± 1.5
602
+ halfcheetah-medium
603
+ 48.3 ± 0.3
604
+ 47.4 ± 0.2
605
+ 46.9 ± 0.4
606
+ 67.5 ± 1.2
607
+ 65.9 ± 0.6
608
+ 66.8 ± 0.7
609
+ 66.6 ± 1.6
610
+ halfcheetah-expert
611
+ 96.7 ± 1.1
612
+ 95.0 ± 0.5
613
+ 97.3 ± 1.1
614
+ 105.2 ± 2.6
615
+ 106.8 ± 3.4
616
+ 105.2 ± 0.7
617
+ 105.8 ± 1.9
618
+ halfcheetah-medium-expert
619
+ 90.7 ± 4.3
620
+ 86.7 ± 5.3
621
+ 95.0 ± 1.4
622
+ 107.1 ± 2.0
623
+ 106.3 ± 1.9
624
+ 107.8 ± 1.1
625
+ 107.6 ± 2.8
626
+ halfcheetah-medium-replay
627
+ 44.6 ± 0.5
628
+ 44.2 ± 1.2
629
+ 45.3 ± 0.3
630
+ 63.9 ± 0.8
631
+ 61.3 ± 1.9
632
+ 61.9 ± 1.5
633
+ 54.9 ± 0.6
634
+ halfcheetah-full-replay
635
+ -
636
+ -
637
+ 76.9 ± 0.9
638
+ 84.5 ± 1.2
639
+ 84.6 ± 0.9
640
+ -
641
+ 82.7 ± 0.9
642
+ hopper-random
643
+ 8.5 ± 0.6
644
+ 7.9 ± 0.2
645
+ 5.3 ± 0.6
646
+ 31.3 ± 0.0
647
+ 25.3 ± 10.4
648
+ 31.4 ± 0.1
649
+ 31.3 ± 0.1
650
+ hopper-medium
651
+ 59.3 ± 4.2
652
+ 66.2 ± 5.7
653
+ 61.9 ± 6.4
654
+ 100.3 ± 0.3
655
+ 101.6 ± 0.6
656
+ 104.8 ± 0.1
657
+ 97.8 ± 2.3
658
+ hopper-expert
659
+ 107.8 ± 7.0
660
+ 109.4 ± 0.5
661
+ 106.5 ± 9.1
662
+ 110.3 ± 0.3
663
+ 110.1 ± 0.1
664
+ 112.8 ± 0.2
665
+ 109.7 ± 0.3
666
+ hopper-medium-expert
667
+ 98.0 ± 9.4
668
+ 91.5 ± 14.3
669
+ 96.9 ± 15.1
670
+ 110.1 ± 0.3
671
+ 110.7 ± 0.1
672
+ 112.7 ± 0.2
673
+ 109.8 ± 0.6
674
+ hopper-medium-replay
675
+ 60.9 ± 18.8
676
+ 94.7 ± 8.6
677
+ 86.3 ± 7.3
678
+ 101.8 ± 0.5
679
+ 101.0 ± 0.5
680
+ 102.8 ± 0.5
681
+ 100.5 ± 1.0
682
+ hopper-full-replay
683
+ -
684
+ -
685
+ 101.9 ± 0.6
686
+ 102.9 ± 0.3
687
+ 105.4 ± 0.7
688
+ -
689
+ 107.3 ± 0.1
690
+ walker2d-random
691
+ 1.6 ± 1.7
692
+ 5.4 ± 1.2
693
+ 5.1 ± 1.7
694
+ 21.7 ± 0.0
695
+ 16.6 ± 7.0
696
+ 21.4 ± 0.2
697
+ 21.5 ± 0.1
698
+ walker2d-medium
699
+ 83.7 ± 2.1
700
+ 78.3 ± 8.7
701
+ 79.5 ± 3.2
702
+ 87.9 ± 0.2
703
+ 92.5 ± 0.8
704
+ 102.4 ± 1.4
705
+ 91.6 ± 2.8
706
+ walker2d-expert
707
+ 110.2 ± 0.3
708
+ 109.9 ± 1.2
709
+ 109.3 ± 0.1
710
+ 107.4 ± 2.4
711
+ 115.1 ± 1.9
712
+ 115.4 ± 0.5
713
+ 114.3 ± 0.6
714
+ walker2d-medium-expert
715
+ 110.1 ± 0.5
716
+ 109.6 ± 1.0
717
+ 109.1 ± 0.2
718
+ 116.7 ± 0.4
719
+ 114.7 ± 0.9
720
+ 121.2 ± 1.5
721
+ 105.0 ± 7.9
722
+ walker2d-medium-replay
723
+ 81.8 ± 5.5
724
+ 73.8 ± 7.1
725
+ 76.8 ± 10.0
726
+ 78.7 ± 0.7
727
+ 87.1 ± 2.4
728
+ 90.4 ± 0.5
729
+ 88.7 ± 7.7
730
+ walker2d-full-replay
731
+ -
732
+ -
733
+ 94.2 ± 1.9
734
+ 94.6 ± 0.5
735
+ 99.8 ± 0.7
736
+ -
737
+ 109.2 ± 1.8
738
+ Average
739
+ 67.5
740
+ 68.9
741
+ 73.6
742
+ 84.4
743
+ 85.2
744
+ 85.7
745
+ 85.2
746
+ while our method does not require any additional modifica-
747
+ tions.
748
+ Table 3. SAC-RND evaluation on AntMaze domain. We report
749
+ the final normalized score averaged over 4 random seeds on v1
750
+ datasets. IQL, CQL, MSG scores are taken from Ghasemipour
751
+ et al. (2022). TD3+BC, RORL scores are taken from Yang et al.
752
+ (2022).
753
+ Ensemble-free
754
+ Ensemble-based
755
+ Task Name
756
+ TD3+BC
757
+ IQL
758
+ CQL
759
+ RORL
760
+ MSG
761
+ SAC-RND
762
+ antmaze-umaze
763
+ 78.6
764
+ 87.5
765
+ 74.0
766
+ 97.7 ± 1.9
767
+ 97.8 ± 1.2
768
+ 97.2 ± 1.2
769
+ antmaze-umaze-diverse
770
+ 71.4
771
+ 62.2
772
+ 84.0
773
+ 90.7 ± 2.9
774
+ 81.8 ± 3.0
775
+ 83.5 ± 7.7
776
+ antmaze-medium-play
777
+ 10.6
778
+ 71.2
779
+ 61.2
780
+ 76.3 ± 2.5
781
+ 89.6 ± 2.2
782
+ 65.5 ± 35.7
783
+ antmaze-medium-diverse
784
+ 3.0
785
+ 70.0
786
+ 53.7
787
+ 69.3 ± 3.3
788
+ 88.6 ± 2.6
789
+ 88.5 ± 9.2
790
+ antmaze-large-play
791
+ 0.2
792
+ 39.6
793
+ 15.8
794
+ 16.3 ± 11.1
795
+ 72.6 ± 7.0
796
+ 67.2 ± 6.1
797
+ antmaze-large-diverse
798
+ 0.0
799
+ 47.5
800
+ 14.9
801
+ 41.0 ± 10.7
802
+ 71.4 ± 12.2
803
+ 57.6 ± 22.7
804
+ Average
805
+ 27.3
806
+ 63.0
807
+ 50.6
808
+ 65.2
809
+ 83.6
810
+ 76.6
811
+ 6.3. Why is FiLM Conditioning Beneficial for Bonus
812
+ Minimization?
813
+ In Section 4, we showed that FiLM conditioning in the RND
814
+ prior significantly improved the actors’ ability to minimize
815
+ the anti-exploration bonus. Since the issue occurred during
816
+ actor training, we hypothesize that this may be related to
817
+ the anti-exploration bonus optimization landscape. In this
818
+ section, we analyze the anti-gradient fields for conditioning
819
+ with concatenation or FiLM for the prior network.
820
+ For the purpose of analysis, we design a toy dataset with
821
+ only four categorical states and two-dimensional actions
822
+ sampled uniformly in each corner of the grid (see Ap-
823
+ pendix B for dataset visualization and generation details).
824
+ We fix the hyperparameters and pretrain two RNDs that
825
+ differ only in the type of prior conditioning. The predictor
826
+ uses simple concatenation. Next, in Figure 4, we plot the
827
+ two-dimensional anti-gradient field for the anti-exploration
828
+ bonus conditioned on each state. The effect of FiLM be-
829
+ comes more apparent in these plots. While the resulting
830
+ anti-gradients for concatenation are noisy and only point
831
+ in the direction of the minimum in a small neighbourhood,
832
+ the directions for FiLM are smooth over the entire available
833
+ action space and point to the correct global minimum for
834
+ each state. While we cannot draw general conclusions from
835
+ such a demonstration, based on the results of Section 4,
836
+ we hypothesize that a similar phenomenon might exist in
837
+ high-dimensional problems as well.
838
+ 6.4. Exploring More Conditioning Pairs
839
+ One might wonder (1) how different types of conditioning
840
+ for predictor and prior interact with each other and (2) where
841
+ to introduce conditioning in terms of depth for it to be most
842
+ beneficial.
843
+ To answer these questions, we return to the experiment from
844
+ Section 4 and generate more variations for each type (where
845
+ it is possible): conditioning on the first layer, on the last
846
+ layer, and on all layers. We also look at two variations
847
+ of the bilinear layer: full, as presented in Jayakumar et al.
848
+ (2020), and simplified, which is used by default in PyTorch.
849
+ In Figure 5 we plot the final MSE between the resulting
850
+ policy and the behavioural one on the training data. Two
851
+ interesting observations can be made from these findings.
852
+
853
+ Anti-Exploration by Random Network Distillation
854
+ 0.0
855
+ 0.5
856
+ 1.0
857
+ 1.5
858
+ 2.0
859
+ 2.5
860
+ 3.0
861
+ 3.5
862
+ 4.0
863
+ a0
864
+ 0.0
865
+ 0.5
866
+ 1.0
867
+ 1.5
868
+ 2.0
869
+ 2.5
870
+ 3.0
871
+ 3.5
872
+ 4.0
873
+ a1
874
+ 0.005
875
+ 0.010
876
+ 0.015
877
+ 0.020
878
+ 0.025
879
+ 0.030
880
+ 0.035
881
+ 0.0
882
+ 0.5
883
+ 1.0
884
+ 1.5
885
+ 2.0
886
+ 2.5
887
+ 3.0
888
+ 3.5
889
+ 4.0
890
+ a0
891
+ 0.0
892
+ 0.5
893
+ 1.0
894
+ 1.5
895
+ 2.0
896
+ 2.5
897
+ 3.0
898
+ 3.5
899
+ 4.0
900
+ a1
901
+ 0.0025
902
+ 0.0050
903
+ 0.0075
904
+ 0.0100
905
+ 0.0125
906
+ 0.0150
907
+ 0.0175
908
+ 0.0200
909
+ 0.0
910
+ 0.5
911
+ 1.0
912
+ 1.5
913
+ 2.0
914
+ 2.5
915
+ 3.0
916
+ 3.5
917
+ 4.0
918
+ a0
919
+ 0.0
920
+ 0.5
921
+ 1.0
922
+ 1.5
923
+ 2.0
924
+ 2.5
925
+ 3.0
926
+ 3.5
927
+ 4.0
928
+ a1
929
+ 0.005
930
+ 0.010
931
+ 0.015
932
+ 0.020
933
+ 0.025
934
+ 0.0
935
+ 0.5
936
+ 1.0
937
+ 1.5
938
+ 2.0
939
+ 2.5
940
+ 3.0
941
+ 3.5
942
+ 4.0
943
+ a0
944
+ 0.0
945
+ 0.5
946
+ 1.0
947
+ 1.5
948
+ 2.0
949
+ 2.5
950
+ 3.0
951
+ 3.5
952
+ 4.0
953
+ a1
954
+ 0.005
955
+ 0.010
956
+ 0.015
957
+ 0.020
958
+ 0.025
959
+ 0.0
960
+ 0.5
961
+ 1.0
962
+ 1.5
963
+ 2.0
964
+ 2.5
965
+ 3.0
966
+ 3.5
967
+ 4.0
968
+ a0
969
+ 0.0
970
+ 0.5
971
+ 1.0
972
+ 1.5
973
+ 2.0
974
+ 2.5
975
+ 3.0
976
+ 3.5
977
+ 4.0
978
+ a1
979
+ 0.1
980
+ 0.2
981
+ 0.3
982
+ 0.4
983
+ (a) State 0
984
+ 0.0
985
+ 0.5
986
+ 1.0
987
+ 1.5
988
+ 2.0
989
+ 2.5
990
+ 3.0
991
+ 3.5
992
+ 4.0
993
+ a0
994
+ 0.0
995
+ 0.5
996
+ 1.0
997
+ 1.5
998
+ 2.0
999
+ 2.5
1000
+ 3.0
1001
+ 3.5
1002
+ 4.0
1003
+ a1
1004
+ 0.1
1005
+ 0.2
1006
+ 0.3
1007
+ 0.4
1008
+ 0.5
1009
+ 0.6
1010
+ (b) State 1
1011
+ 0.0
1012
+ 0.5
1013
+ 1.0
1014
+ 1.5
1015
+ 2.0
1016
+ 2.5
1017
+ 3.0
1018
+ 3.5
1019
+ 4.0
1020
+ a0
1021
+ 0.0
1022
+ 0.5
1023
+ 1.0
1024
+ 1.5
1025
+ 2.0
1026
+ 2.5
1027
+ 3.0
1028
+ 3.5
1029
+ 4.0
1030
+ a1
1031
+ 0.05
1032
+ 0.10
1033
+ 0.15
1034
+ 0.20
1035
+ 0.25
1036
+ 0.30
1037
+ 0.35
1038
+ 0.40
1039
+ (c) State 2
1040
+ 0.0
1041
+ 0.5
1042
+ 1.0
1043
+ 1.5
1044
+ 2.0
1045
+ 2.5
1046
+ 3.0
1047
+ 3.5
1048
+ 4.0
1049
+ a0
1050
+ 0.0
1051
+ 0.5
1052
+ 1.0
1053
+ 1.5
1054
+ 2.0
1055
+ 2.5
1056
+ 3.0
1057
+ 3.5
1058
+ 4.0
1059
+ a1
1060
+ 0.05
1061
+ 0.10
1062
+ 0.15
1063
+ 0.20
1064
+ 0.25
1065
+ 0.30
1066
+ 0.35
1067
+ 0.40
1068
+ (d) State 3
1069
+ Figure 4. Actions’ anti-gradient field for the anti-exploration bonus conditioned on four categorical states at each corner for the toy problem
1070
+ introduced in Section 6.3. We visualize the dataset in Figure 7 in the appendix. The top row corresponds to RND with concatenation
1071
+ conditioning in the prior, while the bottom row corresponds to FiLM conditioning. As can be seen, the resulting anti-gradients for
1072
+ concatenation are noisy, while the directions for FiLM are smooth over the entire available action space.
1073
+ First, FiLM may not be the only architecture with the right
1074
+ inductive biases for the prior, and both bilinear types with
1075
+ conditioning on all layers can also achieve similar results.
1076
+ However, compared to FiLM, inner bilinear layers are much
1077
+ more computationally expensive, as they involve at least
1078
+ one 3D weight tensor and two additional 2D weight tensors,
1079
+ and the hidden dimensions are usually much higher than the
1080
+ input dimensions.
1081
+ Second, it appears that conditioning on the last layer is most
1082
+ beneficial for the predictor, while conditioning on all layers
1083
+ is beneficial for the prior. In spite of that, it is difficult to
1084
+ draw broad conclusions, as different types may work well
1085
+ for new problems and domains.
1086
+ 7. Related Work
1087
+ Model-free offline RL. Most offline RL approaches focus
1088
+ on the distribution shift problem and overestimation bias
1089
+ of Q-values for OOD actions. Some researchers address
1090
+ this by imposing strict constraints for policy updates, pe-
1091
+ nalizing the divergence from the behavioral policy with KL
1092
+ divergence, maximum mean discrepancy (MMD) distance
1093
+ (Kumar et al., 2019; Wu et al., 2019), simple mean squared
1094
+ error (MSE) (Fujimoto & Gu, 2021), or by re-weighting
1095
+ behavioral policy actions with the estimated advantages
1096
+ (Nair et al., 2020). Others directly regularize Q-values
1097
+ by lowering return estimates for OOD actions, preventing
1098
+ gated
1099
+ concat_first
1100
+ concat_last
1101
+ concat_full
1102
+ bilinear_first
1103
+ bilinear_last
1104
+ bilinear_full
1105
+ torch_bilinear_first
1106
+ torch_bilinear_last
1107
+ torch_bilinear_full
1108
+ film_full
1109
+ film_first
1110
+ film_last
1111
+ prior
1112
+ gated
1113
+ concat_first
1114
+ concat_last
1115
+ concat_full
1116
+ bilinear_first
1117
+ bilinear_last
1118
+ bilinear_full
1119
+ torch_bilinear_first
1120
+ torch_bilinear_last
1121
+ torch_bilinear_full
1122
+ film_full
1123
+ film_first
1124
+ film_last
1125
+ predictor
1126
+ 0.2
1127
+ 0.4
1128
+ 0.6
1129
+ 0.8
1130
+ 1.0
1131
+ 1.2
1132
+ Figure 5. Mean squared error between actions of the actor trained
1133
+ with different conditioning for the predictor & prior and actions
1134
+ of the behavioral policy. We use the halfcheetah, walker2d and
1135
+ hopper medium datasets, with 3 seeds each. It can be seen that
1136
+ conditioning on each layer is beneficial for the priors, while for the
1137
+ predictors, it is better to condition on the last layer. Note that this
1138
+ experiment is in the setting of Section 4, that is, without a critic.
1139
+ overestimation for unseen actions. For instance, Kumar
1140
+ et al. (2020), Ghasemipour et al. (2022) and Rezaeifar et al.
1141
+ (2022) explicitly introduce an optimization term that lowers
1142
+ Q-values for OOD actions, while An et al. (2021) penalizes
1143
+ implicitly by utilizing the lower-confidence bound (LCB)
1144
+ of Q-values. Alternatively, the evaluation of OOD actions
1145
+ can be avoided altogether by using the upper expectile value
1146
+
1147
+ Anti-Exploration by Random Network Distillation
1148
+ function (Kostrikov et al., 2021) or by policy optimization
1149
+ within a latent action space (Chen et al., 2022; Zhou et al.,
1150
+ 2021; Akimov et al., 2022).
1151
+ In our work, we follow the anti-exploration approach
1152
+ (Rezaeifar et al., 2022). In contrast to An et al. (2021);
1153
+ Ghasemipour et al. (2022); Yang et al. (2022), we com-
1154
+ pletely eliminate ensembles for uncertainty estimation, thus
1155
+ reducing computational overhead without sacrificing perfor-
1156
+ mance. Moreover, unlike Rezaeifar et al. (2022), we have
1157
+ succeeded in using an RND for novelty detection in offline
1158
+ RL for continuous action spaces.
1159
+ Estimation bias in Q-learning. In both offline and on-
1160
+ line reinforcement learning, off-policy Q-learning methods
1161
+ suffer from an overestimation bias in the temporal differ-
1162
+ ence learning target (Van Hasselt et al., 2016; Fujimoto
1163
+ et al., 2018). This phenomenon is orthogonal to overes-
1164
+ timation due to unseen actions in offline RL, as it occurs
1165
+ even in the presence of strong conservatism constraints. It
1166
+ is mainly caused by target prediction errors for seen transi-
1167
+ tions and their propagation due to the maximum operation
1168
+ maxa′∈AQ(s′, a′). To address this problem, Fujimoto et al.
1169
+ (2018) introduced clipped double Q learning (Van Hasselt
1170
+ et al., 2016) in TD3, which uses a minimum of two critics.
1171
+ This approach was later used by Haarnoja et al. (2018) in
1172
+ SAC to improve stability and accelerate convergence.
1173
+ In our work, we use clipped double Q-learning (Fujimoto
1174
+ et al., 2018), since SAC-RND is based on SAC (Haarnoja
1175
+ et al., 2018), and found it beneficial for stability. However,
1176
+ to ensure that it does not introduce additional conservatism,
1177
+ which can be a confounding factor for the impact of RND,
1178
+ we always set the number of critics to two.
1179
+ Uncertainty estimation in offline RL. Uncertainty estima-
1180
+ tion is a popular technique in reinforcement learning and is
1181
+ used for a variety of purposes such as exploration, planning,
1182
+ and robustness. In offline RL, its use is mostly limited to
1183
+ modeling epistemic uncertainty (Clements et al., 2019), in-
1184
+ cluding measuring the prediction confidence of dynamics
1185
+ models (Yu et al., 2020; Kidambi et al., 2020) or critics (An
1186
+ et al., 2021; Rezaeifar et al., 2022). This approach can be
1187
+ further used to induce uncertainty-aware penalization during
1188
+ training.
1189
+ Alternatively, uncertainty can help overcome suboptimal
1190
+ conservatism by designing more flexible offline approaches,
1191
+ e.g., conditioning on different levels of confidence to dy-
1192
+ namically adjust the level of conservatism during evaluation
1193
+ (Hong et al., 2022) or using Bayesian perspective to design
1194
+ an optimal adaptive offline RL policy (Ghosh et al., 2022).
1195
+ In our work, we estimate epistemic uncertainty and use it as
1196
+ an anti-exploration bonus to induce conservatism. Unlike
1197
+ previous approaches, we do not use ensembles to estimate
1198
+ epistemic uncertainty.
1199
+ Efficient ensembles Ensembles are a powerful and sim-
1200
+ ple non-Bayesian baseline for uncertainty estimation that
1201
+ outperform Bayesian neural networks in practice (Lakshmi-
1202
+ narayanan et al., 2017). However, training deep ensembles
1203
+ can be both memory intensive and computationally demand-
1204
+ ing, making the design of efficient ensembles an attractive
1205
+ research direction for which numerous methods have been
1206
+ developed. For example, Gal & Ghahramani (2016) pro-
1207
+ posed to use dropout to approximate Bayesian inference in
1208
+ deep Gaussian processes, and Durasov et al. (2021) derived
1209
+ a method to interpolate between dropout and full ensembles
1210
+ with fixed masks and controllable overlap between them.
1211
+ Meanwhile, Wen et al. (2020) significantly reduced the cost
1212
+ by defining each weight matrix as the Hadamard product
1213
+ of a shared weight among all ensemble members and a
1214
+ rank-one matrix per member.
1215
+ Recently, Ghasemipour et al. (2022) showed that, in offline
1216
+ RL, none of the most popular approaches for efficient en-
1217
+ sembles are capable of delivering performance that is com-
1218
+ parable to naive ensembles, and that more work is needed in
1219
+ this research direction. In our work, we chose an alternative
1220
+ path for uncertainty estimation with RND, which was shown
1221
+ to a fast and competitive counterpart to ensembles (Ciosek
1222
+ et al., 2019).
1223
+ 8. Conclusion
1224
+ In this work, we revisited the results from previous research
1225
+ (Rezaeifar et al., 2022), showing that with a naive choice
1226
+ of conditioning for the RND prior, it becomes infeasible
1227
+ for the actor to effectively minimize the anti-exploration
1228
+ bonus and discriminativity is not an issue. To solve this,
1229
+ we proposed conditioning based on FiLM, which led us
1230
+ to a new ensemble-free method called SAC-RND. We em-
1231
+ pirically validated that it achieves results comparable to
1232
+ ensemble-based methods and outperforms its ensemble-free
1233
+ counterparts. As such, we believe that our work is a valuable
1234
+ contribution to anti-exploration and uncertainty estimation
1235
+ in offline RL.
1236
+ References
1237
+ Akimov, D., Kurenkov, V., Nikulin, A., Tarasov, D., and
1238
+ Kolesnikov, S. Let offline rl flow: Training conserva-
1239
+ tive agents in the latent space of normalizing flows. In
1240
+ 3rd Offline RL Workshop: Offline RL as a”Launchpad”,
1241
+ 2022.
1242
+ An, G., Moon, S., Kim, J.-H., and Song, H. O. Uncertainty-
1243
+ based offline reinforcement learning with diversified q-
1244
+ ensemble. Advances in neural information processing
1245
+ systems, 34:7436–7447, 2021.
1246
+ Ba, J. L., Kiros, J. R., and Hinton, G. E. Layer normalization.
1247
+
1248
+ Anti-Exploration by Random Network Distillation
1249
+ arXiv preprint arXiv:1607.06450, 2016.
1250
+ Badia, A. P., Piot, B., Kapturowski, S., Sprechmann, P.,
1251
+ Vitvitskyi, A., Guo, Z. D., and Blundell, C. Agent57:
1252
+ Outperforming the atari human benchmark. In Interna-
1253
+ tional Conference on Machine Learning, pp. 507–517.
1254
+ PMLR, 2020.
1255
+ Baker, B., Akkaya, I., Zhokhov, P., Huizinga, J., Tang, J.,
1256
+ Ecoffet, A., Houghton, B., Sampedro, R., and Clune, J.
1257
+ Video pretraining (vpt): Learning to act by watching un-
1258
+ labeled online videos. arXiv preprint arXiv:2206.11795,
1259
+ 2022.
1260
+ Berner, C., Brockman, G., Chan, B., Cheung, V., D˛ebiak, P.,
1261
+ Dennison, C., Farhi, D., Fischer, Q., Hashme, S., Hesse,
1262
+ C., et al. Dota 2 with large scale deep reinforcement
1263
+ learning. arXiv preprint arXiv:1912.06680, 2019.
1264
+ Bradbury, J., Frostig, R., Hawkins, P., Johnson, M. J., Leary,
1265
+ C., Maclaurin, D., Necula, G., Paszke, A., VanderPlas, J.,
1266
+ Wanderman-Milne, S., and Zhang, Q. JAX: composable
1267
+ transformations of Python+NumPy programs, 2018. URL
1268
+ http://github.com/google/jax.
1269
+ Burda, Y., Edwards, H., Storkey, A., and Klimov, O. Ex-
1270
+ ploration by random network distillation. arXiv preprint
1271
+ arXiv:1810.12894, 2018.
1272
+ Chen, X., Ghadirzadeh, A., Yu, T., Gao, Y., Wang, J., Li,
1273
+ W., Liang, B., Finn, C., and Zhang, C. Latent-variable
1274
+ advantage-weighted policy optimization for offline rl.
1275
+ arXiv preprint arXiv:2203.08949, 2022.
1276
+ Ciosek, K., Fortuin, V., Tomioka, R., Hofmann, K., and
1277
+ Turner, R. Conservative uncertainty estimation by fitting
1278
+ prior networks. In International Conference on Learning
1279
+ Representations, 2019.
1280
+ Clements, W. R., Van Delft, B., Robaglia, B.-M., Slaoui,
1281
+ R. B., and Toth, S. Estimating risk and uncertainty in deep
1282
+ reinforcement learning. arXiv preprint arXiv:1905.09638,
1283
+ 2019.
1284
+ Dumoulin, V., Perez, E., Schucher, N., Strub, F., Vries,
1285
+ H. d., Courville, A., and Bengio, Y. Feature-wise trans-
1286
+ formations. Distill, 2018. doi: 10.23915/distill.00011.
1287
+ https://distill.pub/2018/feature-wise-transformations.
1288
+ Durasov, N., Bagautdinov, T., Baque, P., and Fua, P.
1289
+ Masksembles for uncertainty estimation. In Proceed-
1290
+ ings of the IEEE/CVF Conference on Computer Vision
1291
+ and Pattern Recognition, pp. 13539–13548, 2021.
1292
+ Fu, J., Kumar, A., Nachum, O., Tucker, G., and Levine,
1293
+ S. D4rl: Datasets for deep data-driven reinforcement
1294
+ learning. arXiv preprint arXiv:2004.07219, 2020.
1295
+ Fujimoto, S. and Gu, S. S. A minimalist approach to offline
1296
+ reinforcement learning. Advances in neural information
1297
+ processing systems, 34:20132–20145, 2021.
1298
+ Fujimoto, S., Hoof, H., and Meger, D. Addressing function
1299
+ approximation error in actor-critic methods. In Interna-
1300
+ tional conference on machine learning, pp. 1587–1596.
1301
+ PMLR, 2018.
1302
+ Fujimoto, S., Meger, D., and Precup, D. Off-policy deep
1303
+ reinforcement learning without exploration. In Interna-
1304
+ tional conference on machine learning, pp. 2052–2062.
1305
+ PMLR, 2019.
1306
+ Gal, Y. and Ghahramani, Z. Dropout as a bayesian approx-
1307
+ imation: Representing model uncertainty in deep learn-
1308
+ ing. In international conference on machine learning, pp.
1309
+ 1050–1059. PMLR, 2016.
1310
+ Ghasemipour, S. K. S., Gu, S. S., and Nachum, O. Why so
1311
+ pessimistic? estimating uncertainties for offline rl through
1312
+ ensembles, and why their independence matters. arXiv
1313
+ preprint arXiv:2205.13703, 2022.
1314
+ Ghosh, D., Ajay, A., Agrawal, P., and Levine, S. Offline
1315
+ rl policies should be trained to be adaptive. In Interna-
1316
+ tional Conference on Machine Learning, pp. 7513–7530.
1317
+ PMLR, 2022.
1318
+ Haarnoja, T., Zhou, A., Abbeel, P., and Levine, S. Soft
1319
+ actor-critic: Off-policy maximum entropy deep reinforce-
1320
+ ment learning with a stochastic actor. In International
1321
+ conference on machine learning, pp. 1861–1870. PMLR,
1322
+ 2018.
1323
+ Hong, J., Kumar, A., and Levine, S. Confidence-conditioned
1324
+ value functions for offline reinforcement learning. arXiv
1325
+ preprint arXiv:2212.04607, 2022.
1326
+ Jarrett, D., Tallec, C., Altché, F., Mesnard, T., Munos, R.,
1327
+ and Valko, M. Curiosity in hindsight. arXiv preprint
1328
+ arXiv:2211.10515, 2022.
1329
+ Jayakumar, S. M., Czarnecki, W. M., Menick, J., Schwarz,
1330
+ J., Rae, J., Osindero, S., Teh, Y. W., Harley, T., and
1331
+ Pascanu, R. Multiplicative interactions and where to find
1332
+ them. 2020.
1333
+ Kidambi, R., Rajeswaran, A., Netrapalli, P., and Joachims,
1334
+ T. Morel: Model-based offline reinforcement learning.
1335
+ Advances in neural information processing systems, 33:
1336
+ 21810–21823, 2020.
1337
+ Kingma, D. P. and Ba, J. Adam: A method for stochastic
1338
+ optimization. arXiv preprint arXiv:1412.6980, 2014.
1339
+ Kostrikov, I., Nair, A., and Levine, S. Offline reinforce-
1340
+ ment learning with implicit q-learning. arXiv preprint
1341
+ arXiv:2110.06169, 2021.
1342
+
1343
+ Anti-Exploration by Random Network Distillation
1344
+ Kumar, A., Fu, J., Soh, M., Tucker, G., and Levine, S.
1345
+ Stabilizing off-policy q-learning via bootstrapping error
1346
+ reduction. Advances in Neural Information Processing
1347
+ Systems, 32, 2019.
1348
+ Kumar, A., Zhou, A., Tucker, G., and Levine, S. Con-
1349
+ servative q-learning for offline reinforcement learning.
1350
+ Advances in Neural Information Processing Systems, 33:
1351
+ 1179–1191, 2020.
1352
+ Kumar, A., Agarwal, R., Geng, X., Tucker, G., and Levine,
1353
+ S. Offline q-learning on diverse multi-task data both
1354
+ scales and generalizes. arXiv preprint arXiv:2211.15144,
1355
+ 2022.
1356
+ Kurenkov, V. and Kolesnikov, S. Showing your offline
1357
+ reinforcement learning work: Online evaluation budget
1358
+ matters. In International Conference on Machine Learn-
1359
+ ing, pp. 11729–11752. PMLR, 2022.
1360
+ Lakshminarayanan, B., Pritzel, A., and Blundell, C. Simple
1361
+ and scalable predictive uncertainty estimation using deep
1362
+ ensembles. Advances in neural information processing
1363
+ systems, 30, 2017.
1364
+ Lee, K.-H., Nachum, O., Yang, M., Lee, L., Freeman,
1365
+ D., Xu, W., Guadarrama, S., Fischer, I., Jang, E.,
1366
+ Michalewski, H., et al. Multi-game decision transformers.
1367
+ arXiv preprint arXiv:2205.15241, 2022.
1368
+ Levine, S., Kumar, A., Tucker, G., and Fu, J. Offline rein-
1369
+ forcement learning: Tutorial, review, and perspectives on
1370
+ open problems. arXiv preprint arXiv:2005.01643, 2020.
1371
+ Lillicrap, T. P., Hunt, J. J., Pritzel, A., Heess, N., Erez,
1372
+ T., Tassa, Y., Silver, D., and Wierstra, D. Continuous
1373
+ control with deep reinforcement learning. arXiv preprint
1374
+ arXiv:1509.02971, 2015.
1375
+ Lyu, J., Ma, X., Li, X., and Lu, Z. Mildly conservative q-
1376
+ learning for offline reinforcement learning. arXiv preprint
1377
+ arXiv:2206.04745, 2022.
1378
+ Nair, A., Gupta, A., Dalal, M., and Levine, S. Awac: Accel-
1379
+ erating online reinforcement learning with offline datasets.
1380
+ arXiv preprint arXiv:2006.09359, 2020.
1381
+ Nikulin, A., Kurenkov, V., Tarasov, D., Akimov, D., and
1382
+ Kolesnikov, S. Q-ensemble for offline rl: Don’t scale
1383
+ the ensemble, scale the batch size.
1384
+ arXiv preprint
1385
+ arXiv:2211.11092, 2022.
1386
+ Perez, E., Strub, F., De Vries, H., Dumoulin, V., and
1387
+ Courville, A. Film: Visual reasoning with a general con-
1388
+ ditioning layer. In Proceedings of the AAAI Conference
1389
+ on Artificial Intelligence, volume 32, 2018.
1390
+ Reed, S., Zolna, K., Parisotto, E., Colmenarejo, S. G.,
1391
+ Novikov, A., Barth-Maron, G., Gimenez, M., Sulsky,
1392
+ Y., Kay, J., Springenberg, J. T., et al. A generalist agent.
1393
+ arXiv preprint arXiv:2205.06175, 2022.
1394
+ Rezaeifar, S., Dadashi, R., Vieillard, N., Hussenot, L.,
1395
+ Bachem, O., Pietquin, O., and Geist, M. Offline rein-
1396
+ forcement learning as anti-exploration. In Proceedings
1397
+ of the AAAI Conference on Artificial Intelligence, vol-
1398
+ ume 36, pp. 8106–8114, 2022.
1399
+ Schrittwieser, J., Antonoglou, I., Hubert, T., Simonyan, K.,
1400
+ Sifre, L., Schmitt, S., Guez, A., Lockhart, E., Hassabis,
1401
+ D., Graepel, T., et al. Mastering atari, go, chess and shogi
1402
+ by planning with a learned model. Nature, 588(7839):
1403
+ 604–609, 2020.
1404
+ Smith, L., Kostrikov, I., and Levine, S.
1405
+ A walk in the
1406
+ park: Learning to walk in 20 minutes with model-free
1407
+ reinforcement learning. arXiv preprint arXiv:2208.07860,
1408
+ 2022.
1409
+ Srivastava, R. K., Shyam, P., Mutz, F., Ja´skowski, W., and
1410
+ Schmidhuber, J. Training agents using upside-down re-
1411
+ inforcement learning. arXiv preprint arXiv:1912.02877,
1412
+ 2019.
1413
+ Tarasov, D., Nikulin, A., Akimov, D., Kurenkov, V., and
1414
+ Kolesnikov, S. CORL: Research-oriented deep offline
1415
+ reinforcement learning library. In 3rd Offline RL Work-
1416
+ shop: Offline RL as a ”Launchpad”, 2022. URL https:
1417
+ //openreview.net/forum?id=SyAS49bBcv.
1418
+ Van Hasselt, H., Guez, A., and Silver, D. Deep reinforce-
1419
+ ment learning with double q-learning. In Proceedings of
1420
+ the AAAI conference on artificial intelligence, volume 30,
1421
+ 2016.
1422
+ Wen, Y., Tran, D., and Ba, J. Batchensemble: an alterna-
1423
+ tive approach to efficient ensemble and lifelong learning.
1424
+ arXiv preprint arXiv:2002.06715, 2020.
1425
+ Wu, Y., Tucker, G., and Nachum, O.
1426
+ Behavior regu-
1427
+ larized offline reinforcement learning. arXiv preprint
1428
+ arXiv:1911.11361, 2019.
1429
+ Yang, R., Bai, C., Ma, X., Wang, Z., Zhang, C., and Han,
1430
+ L. Rorl: Robust offline reinforcement learning via con-
1431
+ servative smoothing. arXiv preprint arXiv:2206.02829,
1432
+ 2022.
1433
+ Yu, T., Thomas, G., Yu, L., Ermon, S., Zou, J. Y., Levine, S.,
1434
+ Finn, C., and Ma, T. Mopo: Model-based offline policy
1435
+ optimization. Advances in Neural Information Processing
1436
+ Systems, 33:14129–14142, 2020.
1437
+ Zhou, W., Bajracharya, S., and Held, D. Plas: Latent action
1438
+ space for offline reinforcement learning. In Conference
1439
+ on Robot Learning, pp. 1719–1735. PMLR, 2021.
1440
+
1441
+ Anti-Exploration by Random Network Distillation
1442
+ A. Previous Research Results
1443
+ Figure 6. Anti-exploration bonus on walker2d-medium dataset for RND and CVAE. Note that figure taken from Rezaeifar et al. (2022) for
1444
+ a convenient comparison with our results in Figure 2.
1445
+ B. Toy Dataset
1446
+ 0.0
1447
+ 0.5
1448
+ 1.0
1449
+ 1.5
1450
+ 2.0
1451
+ 2.5
1452
+ 3.0
1453
+ 3.5
1454
+ 4.0
1455
+ a0
1456
+ 0.0
1457
+ 0.5
1458
+ 1.0
1459
+ 1.5
1460
+ 2.0
1461
+ 2.5
1462
+ 3.0
1463
+ 3.5
1464
+ 4.0
1465
+ a1
1466
+ State
1467
+ 0
1468
+ 1
1469
+ 2
1470
+ 3
1471
+ Figure 7. Toy dataset visualization introduced in Section 6.3. This toy dataset consists of four categorical states for each corner of the
1472
+ limited 2D actions grid. For each state, we uniformly sample 4096 two-dimensional actions within a limited square. We use one-hot
1473
+ encoding for the states during RND training.
1474
+ C. Implementation Details
1475
+ In our experiments, we use hyperparameters from Table 4 where possible and sweep over α to pick the best value for each
1476
+ dataset. We implement all of our models using the Jax (Bradbury et al., 2018) framework. For the exact implementation
1477
+ of conditioning variants for predictor and prior networks, refer to our code at https://github.com/tinkoff-ai/
1478
+ sac-rnd. Similarly to Nikulin et al. (2022); Kumar et al. (2022); Smith et al. (2022), we add Layer Normalization (Ba
1479
+ et al., 2016) to the critic after each layer as it greatly improves stability and convergence speed. For SAC-N in Section 4 we
1480
+ use the implementation from the CORL library (Tarasov et al., 2022). All experiments were performed on V100 and A100
1481
+ GPUs. With our implementation, each training for 3 million training steps usually takes ∼ 40 minutes to run (∼ 15 minutes
1482
+ for the typical 1 million steps).
1483
+ Gym Domain.
1484
+ We use the v2 version of each dataset.
1485
+ We follow the An et al. (2021) approach and run our
1486
+ algorithms for 3 million training steps and report the final normalized average score over 10 evaluation episodes.
1487
+ For the final experiments, we use 4 seeds, while using less for hyperparameter tuning.
1488
+ We tune the α co-
1489
+ efficient over the {1.0, 3.0, 4.0, 5.0, 8.0, 9.0, 10.0, 13.0, 15.0, 20.0, 25.0} range for the walker and hopper datasets.
1490
+ We found that the halfcheetah datasets require a lower level of conservatism, which is why we tune over the
1491
+ {0.001, 0.1, 0.3, 0.5, 0.8, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0} range for these datasets while keeping the same number of candidates.
1492
+ We follow the Ghasemipour et al. (2022) approach and choose the best α for each dataset (see Table 5).
1493
+ AntMaze Domain. We use the v1 version of each dataset due to the fact that the v0 version has major problems and
1494
+ bugs during generation (e.g., some trajectories have discontinuities where the agent teleports from one part of the maze to
1495
+
1496
+ CVAE
1497
+ RND
1498
+ 300000-
1499
+ 400000
1500
+ Dataset actions
1501
+ Shuffled actions
1502
+ Random actions
1503
+ 300000
1504
+ Dataset actions + Gaussian noise (std = 0.25)
1505
+ 200000
1506
+ Dataset actions + Gaussian noise (std = O.5)
1507
+ 200000
1508
+ 100000
1509
+ 100000
1510
+ 0
1511
+ 0
1512
+ 0.5
1513
+ 1.0
1514
+ 1.5
1515
+ 2.0
1516
+ 0.0
1517
+ 0.2
1518
+ 0.4
1519
+ 0.6
1520
+ 0.8
1521
+ 1.0
1522
+ 0.0
1523
+ 1e-7Anti-Exploration by Random Network Distillation
1524
+ another 3). We follow the An et al. (2021) approach and run our algorithms for 3 million training steps and report the final
1525
+ normalized average score over 100 evaluation episodes. Same as Chen et al. (2022), we scale the reward by 100.0. We found
1526
+ that actor and critic require different levels of conservatism in these tasks, which is why we chose to decouple α and use
1527
+ separate values (the same approach was used in Rezaeifar et al. (2022)). We tune the α for the actor in the {0.5, 1.0, 1.5}
1528
+ range, and α for the critic in the {0.001, 0.01, 0.1} range. We follow the Ghasemipour et al. (2022) approach and choose
1529
+ the best α for each dataset (see Table 6).
1530
+ D. Hyperparameters
1531
+ Table 4. SAC-RND general hyperparameters.
1532
+ Parameter
1533
+ Value
1534
+ optimizer
1535
+ Adam (Kingma & Ba, 2014)
1536
+ batch size
1537
+ 1024 (256 on antmaze-*)
1538
+ learning rate (all networks)
1539
+ 1e-3 (3e-4 on antmaze-*)
1540
+ tau (τ)
1541
+ 5e-3
1542
+ hidden dim (all networks)
1543
+ 256
1544
+ num layers (all networks)
1545
+ 4
1546
+ RND embedding dim (all tasks)
1547
+ 32
1548
+ target entropy
1549
+ -action_dim
1550
+ gamma (γ)
1551
+ 0.99 (0.999 on antmaze-*)
1552
+ nonlinearity
1553
+ ReLU
1554
+ Table 5. SAC-RND best hyperparameters used in D4RL Gym domain.
1555
+ Task Name
1556
+ α
1557
+ halfcheetah-random
1558
+ 0.1
1559
+ halfcheetah-medium
1560
+ 0.3
1561
+ halfcheetah-expert
1562
+ 6.0
1563
+ halfcheetah-medium-expert
1564
+ 0.1
1565
+ halfcheetah-medium-replay
1566
+ 0.1
1567
+ halfcheetah-full-replay
1568
+ 3.0
1569
+ hopper-random
1570
+ 5.0
1571
+ hopper-medium
1572
+ 25.0
1573
+ hopper-expert
1574
+ 20.0
1575
+ hopper-medium-expert
1576
+ 15.0
1577
+ hopper-medium-replay
1578
+ 8.0
1579
+ hopper-full-replay
1580
+ 3.0
1581
+ walker2d-random
1582
+ 1.0
1583
+ walker2d-medium
1584
+ 8.0
1585
+ walker2d-expert
1586
+ 4.0
1587
+ walker2d-medium-expert
1588
+ 25.0
1589
+ walker2d-medium-replay
1590
+ 8.0
1591
+ walker2d-full-replay
1592
+ 3.0
1593
+ Table 6. SAC-RND best hyperparameters used in D4RL AntMaze domain.
1594
+ Task Name
1595
+ α (actor)
1596
+ α (critic)
1597
+ antmaze-umaze
1598
+ 1.0
1599
+ 0.1
1600
+ antmaze-umaze-diverse
1601
+ 1.0
1602
+ 0.1
1603
+ antmaze-medium-play
1604
+ 0.5
1605
+ 0.001
1606
+ antmaze-medium-diverse
1607
+ 1.0
1608
+ 0.01
1609
+ antmaze-large-play
1610
+ 1.0
1611
+ 0.01
1612
+ antmaze-large-diverse
1613
+ 0.5
1614
+ 0.01
1615
+ 3https://github.com/Farama-Foundation/D4RL/issues/77
1616
+
1617
+ Anti-Exploration by Random Network Distillation
1618
+ E. Sensitivty to Hyperparameters
1619
+ 1
1620
+ 2
1621
+ 3
1622
+ 4
1623
+ 5
1624
+ 6
1625
+ 7
1626
+ 8
1627
+ 9
1628
+ 10
1629
+ Policies Evaluated Online
1630
+ 30
1631
+ 40
1632
+ 50
1633
+ 60
1634
+ 70
1635
+ 80
1636
+ 90
1637
+ Expected Online Performance
1638
+ HalfCheetah
1639
+ Hopper
1640
+ Walker2D
1641
+ AntMaze
1642
+ Figure 8. Expected Online Performance (Kurenkov & Kolesnikov, 2022) under uniform offline policy selection. It can be seen, that for
1643
+ satisfactory results in all domains a budget of at least five policies for online evaluations is needed.
1644
+ F. Pseudocode
1645
+ Algorithm 1 Soft Actor-Critic with Random Network Distillation (SAC-RND)
1646
+ Initialize policy parameters θ, Double Q-function parameters {φ1, φ2}, RND predictor and prior parameters {ψ, ψ′}, and
1647
+ offline replay buffer D
1648
+ for desired number of pretraining steps do
1649
+ Sample a mini-batch B = {(s, a)} from D
1650
+ Update RND predictor weights ψ with gradient descent using
1651
+ ∇ψ
1652
+ 1
1653
+ |B|
1654
+
1655
+ s∈B
1656
+
1657
+ ∥fψ(s, a) − ¯f ¯
1658
+ ψ(s, a)∥2
1659
+ 2
1660
+
1661
+ end for
1662
+ for desired number of training steps do
1663
+ Sample a mini-batch B = {(s, a, r, s′)} from D
1664
+ Compute target Q-values (shared by all Q-functions):
1665
+ y(r, s′) = r + γ
1666
+
1667
+ min
1668
+ j=1,2 Q ¯φi(s′, a′) − β log πθ(a′|s′) − αb(s′, a′)
1669
+
1670
+ where a′ ∼ πθ(·|s′) and b(s′, a′) is an anti-exploration bonus defined by Eq. (3).
1671
+ Update each Q-function Qφi with gradient descent using
1672
+ ���φi
1673
+ 1
1674
+ |B|
1675
+
1676
+ (s,a,r,s′)∈B
1677
+
1678
+ Qφi(s, a) − y(r, s′)
1679
+ �2
1680
+ Update policy with gradient ascent using
1681
+ ∇θ
1682
+ 1
1683
+ |B|
1684
+
1685
+ s∈B
1686
+
1687
+ min
1688
+ j=1,2 Qφi(s, ˜aθ(s)) − β log π(˜aθ(s)|s) − αb(s, ˜aθ(s))
1689
+
1690
+ where ˜aθ(s) is a sample from π(·|s) which is differentiable w.r.t. θ via the reparametrization trick.
1691
+ Update target networks with ¯φi ← (1 − ρ) ¯φi + ρφi
1692
+ end for
1693
+
1694
+ Anti-Exploration by Random Network Distillation
1695
+ Algorithm 2 Simplified SAC-RND (without a critic) used in experiments for Section 4 and Section 6.4.
1696
+ Initialize policy parameters θ, RND predictor and prior parameters {ψ, ψ′}, and offline replay buffer D
1697
+ for desired number of pretraining steps do
1698
+ Sample a mini-batch B = {(s, a)} from D
1699
+ Update RND predictor weights ψ with gradient descent using
1700
+ ∇ψ
1701
+ 1
1702
+ |B|
1703
+
1704
+ s∈B
1705
+
1706
+ ∥fψ(s, a) − ¯f ¯
1707
+ ψ(s, a)∥2
1708
+ 2
1709
+
1710
+ end for
1711
+ for desired number of training steps do
1712
+ Sample a mini-batch B = {(s, a, r, s′)} from D
1713
+ Update policy with gradient descent using
1714
+ ∇θ
1715
+ 1
1716
+ |B|
1717
+
1718
+ s∈B
1719
+
1720
+ β log π(˜aθ(s)|s) + b(s, ˜aθ(s))
1721
+
1722
+ where ˜aθ(s) is a sample from π(·|s) which is differentiable w.r.t. θ via the reparametrization trick and b(s′, a′) is an
1723
+ anti-exploration bonus defined by Eq. (3).
1724
+ end for
1725
+
9NFRT4oBgHgl3EQfqTc6/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
ANE0T4oBgHgl3EQfPgBb/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8227c1dbb359d8ef14fe96ca4fd227e282d83603900a743e865fee98f3d1a5fc
3
+ size 3473453
ANE0T4oBgHgl3EQfPgBb/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fe4d1b36566acc777b048f8926930921eb1203610522bfd2ae0298c7ab05bac
3
+ size 129851
AdAyT4oBgHgl3EQf3_pa/content/2301.00778v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01098c0acfe09288ea83c5764d5542a4c5016650f61083de6182a739e25e8ebb
3
+ size 458957
AdAyT4oBgHgl3EQf3_pa/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce5fc543ea5dbb5294c5cc4e6abe5cc49486e018d6e4b4ee39628da1ff2456a3
3
+ size 155261
B9E5T4oBgHgl3EQfTg8R/content/2301.05536v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68db8e1a9962aece0bebffce81fc27636209db70cd7024f43c67fb9b730a65cd
3
+ size 1415259
B9E5T4oBgHgl3EQfTg8R/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b26de7f97b32b70d3f5a56a888549147ca03ebc2c7756169891b8f1d51c50d9
3
+ size 179626
BdFIT4oBgHgl3EQf_ywO/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf,len=372
2
+ page_content='FEATURE SPACE EXPLORATION AS AN ALTERNATIVE FOR DESIGN SPACE EXPLORATION BEYOND THE PARAMETRIC SPACE TOMAS CABEZON PEDROSO1 and JINMO RHEE2 and DARAGH BYRNE3 1,2,3Carnegie Mellon University, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
3
+ page_content=' 1tcabezon@andrew.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
4
+ page_content='cmu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
5
+ page_content='edu, 0000-0002-5483-2676 2jinmor@andrew.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
6
+ page_content='cmu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
7
+ page_content='edu, 0000-0003-4710-7385 3daraghb@andrew.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
8
+ page_content='cmu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
9
+ page_content='edu, 0000-0001-7193-006X Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
10
+ page_content=' This paper compares the parametric design space with a feature space generated by the extraction of design features using deep learning (DL) as an alternative way for design space exploration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
11
+ page_content=' In this comparison, the parametric design space is constructed by creating a synthetic dataset of 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
12
+ page_content='000 elements using a parametric algorithm and reducing its dimensions for visualization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
13
+ page_content=' The feature space — reduced-dimensionality vector space of embedded data features — is constructed by training a DL model on the same dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
14
+ page_content=' We analyze and compare the extracted design features by reducing their dimension and visualizing the results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
15
+ page_content=' We demonstrate that parametric design space is narrow in how it describes the design solutions because it is based on the combination of individual parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
16
+ page_content=' In comparison, we observed that the feature design space can intuitively represent design solutions according to complex parameter relationships.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
17
+ page_content=' Based on our results, we discuss the potential of translating the features learned by DL models to provide a mechanism for intuitive design exploration space and visualization of possible design solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
18
+ page_content=' Keywords.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
19
+ page_content=' Deep Learning, VAE, Design Space, Feature Design Space, Parametric Design Space, Design Exploration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
20
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
21
+ page_content=' Introduction Parametric modeling has acquired widespread acceptance among creative practitioners as it allows the synthesis of various design options and solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
22
+ page_content=' Changing the parameters in this modeling process, either manually or randomly, can rapidly create a vast set of design variations (Toulkeridou, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
23
+ page_content=' Navigating the resulting parametric design space — where the design variants are topologically placed by their parameters — is part of the design exploration process — a crucial step in the development of new alternatives and design solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
24
+ page_content=' Exploration of the parametric design space allows creative practitioners T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
25
+ page_content=' CABEZON PEDROSO, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
26
+ page_content=' RHEE AND D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
27
+ page_content=' BYRNE many benefits: to reach satisfying solutions, better define design problems, and understand the opportunities and limitations of the possible solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
28
+ page_content=' Despite these benefits, design exploration is laborious within the parametric space and challenged along two fronts: comparison and selection (Fuchkina et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
29
+ page_content=', 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
30
+ page_content=' Parametric design exploration is an iterative process that focuses on the variation of these individual parameters, rather than on the relationship among them (Yamamoto and Nakakoji, 2005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
31
+ page_content=' Hence, comparing one design solution with others by their parameters alone does not always result in a superior solution;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
32
+ page_content=' for example, the variants generated by the local combination of parameters might not match the design requirements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
33
+ page_content=' Moreover, infinite alternative design solutions can be generated by inputting new parameter values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
34
+ page_content=' Thus, the parametric design space consists of a huge amount of design variants that cannot be fully or sufficiently explored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
35
+ page_content=' We propose an alternative way to construct and examine the design space, by extracting features from a DL model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
36
+ page_content=' By comparing and analyzing how the DL feature design space differs from the parametric design space, we illustrate the potential of feature design space for design practitioners during the design exploration process and provide a new way to compare, examine and select the design alternatives based on the exploration of a properly constrained design space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
37
+ page_content=' No previous approach to compare the parametric design space and feature design space as design exploration tools has been found.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
38
+ page_content=' To demonstrate how the feature space compares to the parametric space, we designed an experiment to construct both a parametric design space and a feature design space using the same dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
39
+ page_content=' The dataset consists of 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
40
+ page_content='000 synthetic 3D models produced by a parametric algorithm with five parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
41
+ page_content=' This parametric design space consists of five axes;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
42
+ page_content=' each axis corresponds to each of the parameters that are used as inputs of the parametric algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
43
+ page_content=' Subsequently, this same dataset is used to train a DL model to compress the data into a feature vector of 128 dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
44
+ page_content=' Both the parametric space (five-axes) and the feature space (128 axes) are not directly visualizable due to their high dimensionality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
45
+ page_content=' Nevertheless, as visual feedback plays an important role in design exploration (Bradner, Iorio and Davis, 2014), we employ a dimensionality reduction algorithm (t-SNE) to the design space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
46
+ page_content=' We are able to illustrate the design exploration space, showing how the data is distributed across both the parametric and feature design spaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
47
+ page_content=' In the next section, we describe the generation of the dataset, as well as the construction of parametric design space and its visualization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
48
+ page_content=' In Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
49
+ page_content=', we illustrate how training a DL model resulted in a feature space for design exploration and comparison with the parametric approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
50
+ page_content=' Then, in Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
51
+ page_content=', we will compare, contrast, and discuss the characteristics of the DL feature space and the parametric space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
52
+ page_content=' (Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
53
+ page_content=') FEATURE SPACE EXPLORATION AS AN ALTERNATIVE FOR DESIGN SPACE EXPLORATION BEYOND THE PARAMETRIC SPACE Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
54
+ page_content=' The overall process of comparing parametric design space and feature space from deep learning 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
55
+ page_content=' Constructing Parametric Design Space 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
56
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
57
+ page_content=' DATASET GENERATION To conduct a design space comparison, a simple parametric modeling system was designed: a parametric algorithm for generating different styles of vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
58
+ page_content=' As with handcraft of pottery wheel throwing, a simple Bezier curve with three control points was turned around an axis to generate each 3D digital vessels;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
59
+ page_content=' the form of each vessel is specified by the five parameters that were used as inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
60
+ page_content=' These parameters, as can be seen in Figure 2, are: the height of the vessel, the width of the base, the width of the top opening, and the horizontal and vertical coordinates of the central control point of the Bezier curve that are used to create the curve of the form.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
61
+ page_content=' The five parameters are represented as a vector, and each vector corresponds to a specific 3D model of a vessel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
62
+ page_content=' Using this system, we created a 3D vessel dataset by randomly generating a total of 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
63
+ page_content='000 different vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
64
+ page_content=' The total shape of the parametric representation of the vessel dataset is [15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
65
+ page_content='000, 5], however, as it will be explained in the next section, Parametric 5 algorithm Parametric Data parameters Design Space Comparison Feature VAE DesignSpace ENCODER DECODERT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
66
+ page_content=' CABEZON PEDROSO, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
67
+ page_content=' RHEE AND D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
68
+ page_content=' BYRNE only 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
69
+ page_content='000 vessels were used for the space exploration and visualization, so this will be a design space of size [3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
70
+ page_content='000, 5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
71
+ page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
72
+ page_content=' Upper: An illustration of the dataset parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
73
+ page_content=' Lower: Three illustrative examples from the dataset with the parameters and the resulting 3D form side-by-side.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
74
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
75
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
76
+ page_content=' DIMENSIONALITY REDUCTION As a five-dimensional space makes it hard to compare models and to visualize and compare the characteristics, we employed a dimensionality reduction process to reduce the space to two-dimensions and enable the objects to be plotted and compared to one another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
77
+ page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
78
+ page_content=' shows the overall process of visualizing the space using t-Distributed Stochastic Neighbour Embedding (t-SNE) algorithm (van der Maaten and Hinton, 2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
79
+ page_content=' t-SNE is a popular dimensionality-reduction algorithm for visualizing high-dimensional data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
80
+ page_content=' The hyper-parameters used for this reduction are: perplexity: 30;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
81
+ page_content=' learning rate: 200;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
82
+ page_content=' and iterations: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
83
+ page_content='000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
84
+ page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
85
+ page_content=' Illustration of the dimensional reduction process for the 3D vessel dataset, and the construction of a parametric design space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
86
+ page_content=' After dimensional reduction, each point in the plot represents the corresponding embedding of a vessel in the parametric design space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
87
+ page_content=' Each point is expressed as 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
88
+ page_content=' Pot top width 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
89
+ page_content=' Pot bottom width 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
90
+ page_content=' Control point horizontal coordinate 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
91
+ page_content=' Control point vertical coordinate 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
92
+ page_content=' Pot height 2TopLevelApp (400 x 400) Parameters: bottomWidth: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
93
+ page_content='452 topWidth: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
94
+ page_content='943 height: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
95
+ page_content='593 thickness: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
96
+ page_content='050 bMp_x: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
97
+ page_content='797 bMp_y: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
98
+ page_content='093 bLp_x: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
99
+ page_content="943 ≤69'0 :K_d7q Figure 1 Press: d, to display the mesh s, to savethe,sti file 100 75 50 25 0 25 50 75 100 100 50 100 50 0 50 50 100 100 x=-93." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
100
+ page_content='6943,y=126.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
101
+ page_content='5305,z=148.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
102
+ page_content='6895TopLevelApp(400x400) Parameters: bottomWidth: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
103
+ page_content='652 topWidth: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
104
+ page_content='676 height: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
105
+ page_content='595 thickness: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
106
+ page_content='050 bMp_x: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
107
+ page_content='675 bMp_y: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
108
+ page_content='019 bLp_xc 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
109
+ page_content='676 bLp_y: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
110
+ page_content='595 Figure1 Press: d, to display the mesh bezier s, to save the,sti flle 100 50 0 -50 -100 100 50 -100 0 -50 50 0 50 -100 100 x=-71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
111
+ page_content='5373,y=146.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
112
+ page_content='2703,z=159.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
113
+ page_content='6157TopLevelApp(400x400) Parameters: bottomWidth: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
114
+ page_content='273 height: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
115
+ page_content='755 thickness: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
116
+ page_content='050 bMp_x: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
117
+ page_content='719 bMp_y: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
118
+ page_content='524 bLp_xc 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
119
+ page_content='904 bLp_y: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
120
+ page_content='755 Figure1 Press: d, to display the mesh bezier s, to save the,sti flle 75 50 25 0 -25 50 75 100 75 50 25 -100 0 -75 _50 -25 -25 0 50 25 75 50 75 100 100[3k,5] [3k,2] dimension 1 PLOT .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
121
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
122
+ page_content=' t-SNE x Parametric Dimensional dimension 0 Design Space ReductionFEATURE SPACE EXPLORATION AS AN ALTERNATIVE FOR DESIGN SPACE EXPLORATION BEYOND THE PARAMETRIC SPACE a 2D image of the profile cut section of the corresponding vessel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
123
+ page_content=' Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
124
+ page_content=' represents the reduced parametric design space of the dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
125
+ page_content=' Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
126
+ page_content=' A 2D visualization of the parametric design space of the vessel dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
127
+ page_content=' Inset image: a detailed section for a subset of the models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
128
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
129
+ page_content=' Constructing the Feature Space To construct the design space based on the features and not the parameters, we used a Variational Autoencoder (VAE) as a tool for extracting the morphological features of the vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
130
+ page_content=' VAEs (Kingma and Welling, 2013) are a type of generative deep neural network used for statistical inference problems as they generalize a probabilistic distribution of the given dataset and synthesize new data samples from that distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
131
+ page_content=' VAEs are composed of two modules: encoder and decoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
132
+ page_content=' The encoder abstracts the input data into smaller dimensional vectors, latent vectors, and the decoder reconstructs the latent vector back into a 3D shape.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
133
+ page_content=' During the encoding process, the network captures and extracts the features of the input data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
134
+ page_content=' These features can be topologically placed in the data space, namely, latent space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
135
+ page_content=' In the latent space, the distance between two data points represents the degree of resemblance of data: the closer points, the more resembled.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
136
+ page_content=' We translate this latent space as the feature space for an alternative way to explore the design space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
137
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
138
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
139
+ page_content=' DATA PRE-PROCESSING Different representations of 3D data have been used in DL research, like point clouds (Achlioptas et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
140
+ page_content=', 2018), meshes (Ranjan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
141
+ page_content=', 2018), or voxels (Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
142
+ page_content=', 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
143
+ page_content=' As resolutions of the data is not key for our purpose rather than the extracted features of it;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
144
+ page_content=' and because we will implement a VAE for this experiment that needs fixed space inputs for the Convolutional Neural Networks (CNNs), we will be representing our 3D data with voxels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
145
+ page_content=' Voxels are discretized three- dimensional grids containing a binary value of volumetric occupancy of an object;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
146
+ page_content=' they distinguish between the elements on the grid that are filled with material and those that are empty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
147
+ page_content=' The size of the voxel will determine the number of divisions 1 4 I -- I 1 1 1 1 1 I !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
148
+ page_content=' 1 I 1 1 1 I 1 1 1 I 1 I =T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
149
+ page_content=' CABEZON PEDROSO, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
150
+ page_content=' RHEE AND D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
151
+ page_content=' BYRNE of the grid, consequently, the resolution at which we represent our 3D models;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
152
+ page_content=' the larger size, the more detailed 3D models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
153
+ page_content=' In this experiment, we used 32-sized voxels so that a 3D vessel model is represented by 32x32x32 grid, the shape of the entire dataset is [15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
154
+ page_content='000, 32, 32, 32].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
155
+ page_content=' Finally, the dataset was then divided into two groups: 80% of the dataset (12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
156
+ page_content='000 vessels) was used for training the DL model, and the remaining 20% (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
157
+ page_content='000 vessels) was used for testing the model and the parametric and feature space analysis and comparison.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
158
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
159
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
160
+ page_content=' TRAINING For training the model, we adopted the VAE architecture implemented in ‘Adversarial Generation of Continuous Implicit Shape Representations’ (Kleineberg, Fey and Weichert, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
161
+ page_content=' The encoder consists of four residual blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
162
+ page_content=' Each residual block is composed of a 3D convolution layer, followed by a batch normalization and a Leaky ReLu activation layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
163
+ page_content=' The decoder, on the contrary, comprises four residual blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
164
+ page_content=' Each block starts with a batch normalization, followed by a Leaky ReLu activation layer, and finally a 3D transposed convolution layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
165
+ page_content=' The following hyper-parameters are used for training the VAE with the voxelized vessel dataset: batch size 32, Adam optimizer (Kingma and Ba, 2015), learning rate 5e-.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
166
+ page_content=' The model was trained in Google Colab Pro using the Nvidia Tesla T4 GPU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
167
+ page_content=' Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
168
+ page_content=' Training process losses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
169
+ page_content=' The model was trained for a total of 240 epochs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
170
+ page_content=' We early stopped the model before the model started to overfit, Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
171
+ page_content=' The loss function used during training was a combination of two losses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
172
+ page_content=' The first one, is the Kullback–Leibler divergence (KLD) loss (Kullback and Leibler, 1951), with a weight in the total loss formula of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
173
+ page_content=' This function is a measurement of the difference between two statistical distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
174
+ page_content=' The second loss is the Minimum Square Distance (MSE) loss (Sammut and Webb, 2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
175
+ page_content=' It is used as the reconstruction loss and measures the error between the input voxels and the reconstructed output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
176
+ page_content=' Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
177
+ page_content=' shows the reasonable quality of the reconstruction of the training result after 240 epochs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
178
+ page_content=' To ensure the performance of the model, it was evaluated using the test set and showed that the model maintained the accuracy with the new dataset, which shows 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
179
+ page_content='07 IMSE Loss 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
180
+ page_content='06 KLD Loss Total Loss Total Loss 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
181
+ page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
182
+ page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
183
+ page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
184
+ page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
185
+ page_content='01 0 50 100 150 200 250 EpochsFEATURE SPACE EXPLORATION AS AN ALTERNATIVE FOR DESIGN SPACE EXPLORATION BEYOND THE PARAMETRIC SPACE that the model generalizes well to new data and is able to encode never seen before 3D vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
186
+ page_content=' Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
187
+ page_content=' Two examples of reconstructions from the trained VAE: the section slides and 3D voxels of the ground truth (the top row of each example) and the reconstruction (bottom of each example).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
188
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
189
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
190
+ page_content=' DIMENSIONALITY REDUCTION Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
191
+ page_content=' Feature space generation and visualization diagram.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
192
+ page_content=' Once the VAE is trained, the encoder is used to extract the features of each vessel in the test dataset from 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
193
+ page_content='768 dimensions, the size of each voxelized vessel, into 128-dimensional vectors, the latent vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
194
+ page_content=' Consequently, the entire test dataset of the vessels is represented into vectors whose total shape is [3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
195
+ page_content='000, 128].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
196
+ page_content=' Like in the parametric case, 128 dimensions are non-visualizable so the same process as in Section 2 is followed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
197
+ page_content=' t-SNE algorithm is used to reduce the dimensionality of each vector and plot the resulting two dimensions in an image with the section of each of the vessels (Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
198
+ page_content=').' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
199
+ page_content=' The hyper-parameters used for this reduction are: perplexity: 50;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
200
+ page_content=' learning rate: 700;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
201
+ page_content=' and iterations: 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
202
+ page_content=' Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
203
+ page_content=' shows the results of distributed feature vectors in the reduced dimensional space, the feature space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
204
+ page_content=' 1- Input (Ground truth) 10 10 10 20 30 05 20 20 20 20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
205
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
206
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
207
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
208
+ page_content='0 Output 10 to 10 O1 1o 20 20 20 20 20 20 30 20 0 20 20 20 20 20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
209
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
210
+ page_content='0 20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
211
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
212
+ page_content='0 2- Input (Ground truth) 10 10 10 10 10 20 20 20 20 30 20 20 20 20 20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
213
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
214
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
215
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
216
+ page_content='0 Output 10 OT 10 10 10 10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
217
+ page_content='6 20 20 20 20 20 20 20 30 20 20 20 20 0 20 o 20 0 20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
218
+ page_content='0- 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
219
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
220
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
221
+ page_content='0[3k,128] [3k,2] dimension ENCODER PLOT voxel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
222
+ page_content='npy t-SNE : dimension 0 Latent Dimensional object.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
223
+ page_content='stl Space ReductionT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
224
+ page_content=' CABEZON PEDROSO, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
225
+ page_content=' RHEE AND D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
226
+ page_content=' BYRNE Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
227
+ page_content=' A 2D visualization of the feature design space of the vessel dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
228
+ page_content=' Inset image: a detailed section for a subset of the models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
229
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
230
+ page_content=' Comparison Between the spaces Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
231
+ page_content=' shows that similar vessels have been clustered together.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
232
+ page_content=' Thinner vessels are located at the top right of the image, in contrast to the opposite lower bottom corner with the bigger vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
233
+ page_content=' The figure illustrates how the VAE model is able to understand the relationship between the parameters and their influence on the output morphological shape.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
234
+ page_content=' On the contrary, in the parametric space (Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
235
+ page_content=' ), we can see how concave vessels were gathered at the bottom of the image, however, if the height of the vessels is considered, we can see that this parameter was not considered when clustering the vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
236
+ page_content=' Parametric space is based on each parameter independently, and not on the relationship among them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
237
+ page_content=' Therefore, we observe that parametric design space insufficiently expresses the final form characteristics of the vessels by the combinations of the parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
238
+ page_content=' On the contrary, in Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
239
+ page_content=', the feature space, a gradual change in the shape or concavity as well as height or width is observed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
240
+ page_content=' To further examine and compare the characteristics of both design spaces, we used a clustering, algorithm: a Density-Based Spatial Clustering of Applications with Noise (DBSCAN) (Ester M et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
241
+ page_content=', 1996).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
242
+ page_content=' It is one of the most common clustering algorithms that finds core samples of high density and expands clusters with them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
243
+ page_content=' Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
244
+ page_content=' shows the results of this clustering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
245
+ page_content=' The parametric design space has a total of seven clusters: three of them large, and four of them small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
246
+ page_content=' It shows how the parametric design space doesn’t provide enough information to intuitively compare the design variants locally, this space shows extreme changes in vessel forms even in the same cluster.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
247
+ page_content=' "FEATURE SPACE EXPLORATION AS AN ALTERNATIVE FOR DESIGN SPACE EXPLORATION BEYOND THE PARAMETRIC SPACE The feature design space, on the contrary, has a total of nine clusters: six main big clusters, and three smaller ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
248
+ page_content=' In the feature design space, we can trace smooth changes in the forms as we move through the different clusters (local changes) and along the whole image (global changes).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
249
+ page_content=' Shorter vessels are located on the top, while taller ones are on the bottom.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
250
+ page_content=' If we move on the horizontal axis, the curve that generates the vessels goes from a concave shape on the right to a convex shape on the left.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
251
+ page_content=' This gives the designer the ability to locally compare similar design alternatives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
252
+ page_content=' Parametric Design Space: Feature Design Space: Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
253
+ page_content=' Final visualization and clusters of the parametric and feature design spaces with representative vessels of each group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
254
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
255
+ page_content=' Conclusion and Future work We constructed the parametric and the feature design spaces using a custom synthetic dataset and a VAE model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
256
+ page_content=' By comparing the parametric and feature design spaces, we observed improved distributions of design alternatives in the later.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
257
+ page_content=' When the multi-dimensional parametric design space is projected into a 2D space (Figures 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
258
+ page_content=' and 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
259
+ page_content=' left), the clusters are insufficiently relevant to the morphological characteristics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
260
+ page_content=' On the other hand, when the multi-dimensional feature space is projected into a 2D space (Figures 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
261
+ page_content=' and 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
262
+ page_content=' right), the clusters show sufficient relevance to the features of the data they represent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
263
+ page_content=' Based on this comparison, we conclude that combination of individual parameters in the parametric design space is limited in representing the morphological characteristics of the shapes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
264
+ page_content=' However, we showed that DL models can be used to extract design features from 3D models and that the extracted features are more complex than the combinations of individual parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
265
+ page_content=' Hence, we conclude that the extracted features, that include information of the relationships between the parameters, can construct a well-distributed design space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
266
+ page_content=' For that reason, we propose feature design space as a tool for design space exploration that creative practitioners can use as a new way for looking at objects beyond the parametric design space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
267
+ page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
268
+ page_content=' CABEZON PEDROSO, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
269
+ page_content=' RHEE AND D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
270
+ page_content=' BYRNE Our results and implications are limited to a single dataset and DL model, however the results seem promising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
271
+ page_content=' Future work will expand on this study with more diverse datasets generated by more complex parametric algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
272
+ page_content=' Accordingly, to perform the feature extraction, we would like to train other types of DL models to investigate different potentials of DL in design.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
273
+ page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
274
+ page_content=' References Achlioptas, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
275
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
276
+ page_content=' (2018) ‘Learning Representations and Generative Models for 3D Point Clouds’, In International conference on machine learning (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
277
+ page_content=' 40-49).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
278
+ page_content=' PMLR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
279
+ page_content=' Bradner, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
280
+ page_content=', Iorio, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
281
+ page_content=' and Davis, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
282
+ page_content=' (2014) ‘Parameters Tell the Design Story: Ideation and Abstraction in Design Optimization’, In Proceedings of the symposium on simulation for architecture & urban design (Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
283
+ page_content=' 26) Ester M et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
284
+ page_content=' (1996) ‘A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise.’, KDD’96 Proceedings of the Second International Conference on Knowledge Discovery and Data Mining, 96, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
285
+ page_content=' 226–231.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
286
+ page_content=' Fuchkina, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
287
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
288
+ page_content=' (2018) ‘Design Space Exploration Framework’, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
289
+ page_content=' 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
290
+ page_content=' Kingma, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
291
+ page_content='P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
292
+ page_content=' and Ba, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
293
+ page_content='L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
294
+ page_content=' (2015) ‘Adam: A method for stochastic optimization’, in 3rd International Conference on Learning Representations, ICLR 2015 - Conference Track Proceedings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
295
+ page_content=' International Conference on Learning Representations, ICLR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
296
+ page_content=' Available at: https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
297
+ page_content='org/abs/1412.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
298
+ page_content='6980v9 (Accessed: 30 May 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
299
+ page_content=' Kingma, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
300
+ page_content='P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
301
+ page_content=' and Welling, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
302
+ page_content=' (2013) ‘Auto-Encoding Variational Bayes’.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
303
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
304
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
305
+ page_content='48550/arXiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
306
+ page_content='1312.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
307
+ page_content='6114.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
308
+ page_content=' Kleineberg, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
309
+ page_content=', Fey, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
310
+ page_content=' and Weichert, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
311
+ page_content=' (2020) ‘Adversarial Generation of Continuous Implicit Shape Representations’.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
312
+ page_content=' arXiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
313
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
314
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
315
+ page_content='48550/arXiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
316
+ page_content='2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
317
+ page_content='00349.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
318
+ page_content=' Kullback, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
319
+ page_content=' and Leibler, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
320
+ page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
321
+ page_content=' (1951) ‘On Information and Sufficiency’, The Annals of Mathematical Statistics, 22(1), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
322
+ page_content=' 79–86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
323
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
324
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
325
+ page_content='1214/aoms/1177729694.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
326
+ page_content=' van der Maaten, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
327
+ page_content=' and Hinton, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
328
+ page_content=' (2008) ‘Viualizing data using t-SNE’, Journal of Machine Learning Research, 9, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
329
+ page_content=' 2579–2605.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
330
+ page_content=' Ranjan, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
331
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
332
+ page_content=' (2018) ‘Generating 3D Faces using Convolutional Mesh Autoencoders’, in.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
333
+ page_content=' Proceedings of the European Conference on Computer Vision (ECCV), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
334
+ page_content=' 704–720.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
335
+ page_content=' Available at: https://openaccess.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
336
+ page_content='thecvf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
337
+ page_content='com/content_ECCV_2018/html/Anurag_Ranjan_Generating_3 D_Faces_ECCV_2018_paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
338
+ page_content='html (Accessed: 7 December 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
339
+ page_content=' Sammut, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
340
+ page_content=' and Webb, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
341
+ page_content='I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
342
+ page_content=' (eds) (2010) ‘Mean Squared Error’, in Encyclopedia of Machine Learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
343
+ page_content=' Boston, MA: Springer US, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
344
+ page_content=' 653–653.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
345
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
346
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
347
+ page_content='1007/978-0-387-30164-8_528.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
348
+ page_content=' Toulkeridou, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
349
+ page_content=' (2019) ‘Steps towards AI augmented parametric modeling systems for supporting design exploration’, in Blucher Design Proceedings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
350
+ page_content=' 37 Education and Research in Computer Aided Architectural Design in Europe and XXIII Iberoamerican Society of Digital Graphics, Joint Conference (N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
351
+ page_content=' 1), Porto, Portugal: Editora Blucher, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
352
+ page_content=' 81–92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
353
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
354
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
355
+ page_content='5151/proceedings-ecaadesigradi2019_602.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
356
+ page_content=' Wu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
357
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
358
+ page_content=' (2017) ‘Learning a Probabilistic Latent Space of Object Shapes via 3D Generative-Adversarial Modeling’.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
359
+ page_content=' arXiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
360
+ page_content=' Available at: http://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
361
+ page_content='org/abs/1610.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
362
+ page_content='07584 (Accessed: 7 December 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
363
+ page_content=' Yamamoto, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
364
+ page_content=' and Nakakoji, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
365
+ page_content=' (2005) ‘Interaction design of tools for fostering creativity in the early stages of information design’, International Journal of Human-Computer Studies, 63(4–5), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
366
+ page_content=' 513–535.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
367
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
368
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
369
+ page_content='1016/j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
370
+ page_content='ijhcs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
371
+ page_content='2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
372
+ page_content='04.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
373
+ page_content='023.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BdFIT4oBgHgl3EQf_ywO/content/2301.11416v1.pdf'}
BtAzT4oBgHgl3EQfTfyw/content/2301.01251v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aac16015bb7d946d56d1ea22305cc976dd3485319d6be5899e7462c79695e161
3
+ size 781054
BtAzT4oBgHgl3EQfTfyw/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef9dd80ed6f027e746665375bf71bb6b918b85b2e59e1b214caa953ecffcf7de
3
+ size 3211309
C9E2T4oBgHgl3EQfSAeL/content/tmp_files/2301.03788v1.pdf.txt ADDED
@@ -0,0 +1,1595 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1
2
+ A Fundamental Tradeoff Among Storage,
3
+ Computation, and Communication for
4
+ Distributed Computing over Star Network
5
+ Qifa Yan Member, IEEE, Xiaohu Tang Senior Member, IEEE,
6
+ Meixia Tao Fellow, IEEE, and Qin Huang Senior Member, IEEE
7
+ Abstract
8
+ Coded distributed computing can alleviate the communication load by leveraging the redundant
9
+ storage and computation resources with coding techniques in distributed computing. In this paper, we
10
+ study a MapReduce-type distributed computing framework over star topological network, where all
11
+ the workers exchange information through a common access point. The optimal tradeoff among the
12
+ normalized number of the stored files (storage load), computed intermediate values (computation load)
13
+ and transmitted bits in the uplink and downlink (communication loads) are characterized. A coded
14
+ computing scheme is proposed to achieve the Pareto-optimal tradeoff surface, in which the access point
15
+ only needs to perform simple chain coding between the signals it receives, and information-theretical
16
+ bound matching the surface is also provided.
17
+ Index Terms
18
+ Storage, coded computing, communication, MapReduce, star network
19
+ I. INTRODUCTION
20
+ The rapid growth of computationally intensive applications on mobile devices has attracted
21
+ much research interest in designing efficient distributed computing frameworks. One of the most
22
+ important programing models for distributed computing is MapReduce [1], [2], which has been
23
+ utilized to deal with computation tasks with data sizes as large as tens of terabytes.
24
+ MapReduce framework allows to assign multiple computation tasks to distributed nodes, where
25
+ each node only stores a subset of files. This is done by decomposing each function to be computed
26
+ into a set of “map” functions and a “reduce” function, where each map function can be computed
27
+ from a batch of data, with the output called intermediate values (IVs), while the computation of
28
+ a “reduce” function needs to collect the IVs from all the data as inputs. The whole procedure is
29
+ composed of three phases, i.e., map, shuffle and reduce. In the map phase, each distributed node
30
+ computes the map functions on its local file batch assigned by the server and generates output
31
+ IVs; in the shuffle phase, the nodes exchange their computed IVs to facitate each node to obtain
32
+ the IVs needed by its assigned reduce functions; in the reduce phase, each node computes its
33
+ assigned reduce functions by decoding all the corresponding IVs.
34
+ Q. Yan and X. Tang are with the Information Coding & Transmission Key Lab of Sichuan Province, CSNMT Int. Coop. Res.
35
+ Centre (MoST), Southwest Jiaotong University, Chengdu 611756, China(email: [email protected], [email protected]).
36
+ M.
37
+ Tao
38
+ is
39
+ with
40
+ the
41
+ Department
42
+ of
43
+ Electronic
44
+ Engineering,
45
+ Shanghai
46
+ Jiao
47
+ Tong
48
+ University,
49
+ Shanghai
50
+ 200240,
51
+ China(email:[email protected]).
52
+ Q. Huang is with the School of Electronic and Information Engineering, Beihang University, Beijing 100191, China
53
+ (email:[email protected]).
54
+ arXiv:2301.03788v1 [cs.IT] 10 Jan 2023
55
+
56
+ 2
57
+ Recently, a coded distributed computing (CDC) scheme was proposed by Li et al. [3], where
58
+ the files are stored multiple times across the distributed nodes in the map phase. The IVs are also
59
+ computed multiple times accordingly, such that multicast opportunities are created for the shuffle
60
+ phase. As a result, the communication load was reduced significantly compared to traditional
61
+ uncoded scheme. It was proved in [3] that the scheme achieves the optimal communication load
62
+ for a given total storage requirements. Interestingly, the normalized number of files stored across
63
+ the nodes was termed computation load by Li et al, because each node calculates all the IVs that
64
+ can be obtained from the data stored at that node in the model therein, no matter if these IVs
65
+ are used or not in the subsequent phases. Subsequently, Ezzeldin [4] and Yan et al [5], [6] found
66
+ that some IVs are computed but not used in the model. For this reason, Yan et al reformulated
67
+ the problem as a tradeoff between storage, computation, and communication loads in [7], which
68
+ allows each node to choose any subset of IVs to compute from its stored files.
69
+ Some interesting works that extend CDC have been proposed, for example, the technique was
70
+ combined with maximum distance separable (MDS) code in matrix-vector multiplication tasks to
71
+ resist stragglers in [8]; stragglers with general functions are considered in [9], [10]; the optimal
72
+ resource allocations are considered in [11]; [12]–[14] investigated the iterative procedures of data
73
+ computing and shuffling; [15] studied the case when each node has been randomly allocated
74
+ files; [16] investigated the case with random connectivity between nodes.
75
+ The coded distributed computing technique is extended to wireless distributed computing
76
+ [17], [18], where the computation is typically carried out by the wireless devices. Due to the
77
+ decentralized natural of the wireless networks, the nodes in wireless networks normally need a
78
+ central Access Point (AP) to exchange data, which leads to uplink and downlink communications.
79
+ For example, smart-phone end users typically communicate with each other through a base station
80
+ in cellular networks, which operates in a star network. In [19] and [20], Li et al. investigated
81
+ distributed computing in a wireless network where the nodes performs data shuffling through an
82
+ AP. The optimal storage-communication tradeoff was characterized for both uplink and downlink
83
+ transmissions.
84
+ In this paper, following the conventions of Ezzeldin [4] and Yan et al [7], we investigate
85
+ a distributed computing system with star network, where all nodes exchange IVs through an
86
+ AP, but each node is allowed to choose any arbitrary subset of IVs to compute from its stored
87
+ files. In particular, in addition to the storage and computation loads as considered in [7], the
88
+ communication load includes both upload and download. The main contribution of this paper is
89
+ the characterization of the Pareto-optimal surface in the storage-computation-upload-download
90
+ space for distributed computing over star network. The idea is to form the same multicast
91
+ signals as in CDC scheme but compute less IVs by ignoring the un-used IVs in the map phase
92
+ in the uplink, and combine them through a simple chain coding to form the downlink signals at
93
+ the AP. It turns out that, for any given storage-computation pair, both the optimal upload and
94
+ download communication costs can be simultaneously achieved by a coded computing scheme
95
+ that oriented from CDC. The information-theoretical bound matching the Pareto-optimal surface
96
+ is also presented.
97
+ Paper Organization: Section II presents the system model. Section III summarizes the main
98
+ results. Section IV presents the coded computing scheme that achieves the optimal surface, and
99
+ Section V provides information-theoretical bound. Finally, Section VI concludes the paper.
100
+ Notations: Let N+ be the set of positive integers, and F2 be the binary field. For m, n ∈ N+,
101
+ denote the n-dimensional vector space over F2 by Fn
102
+ 2, and the integer set {1, . . . , n} by [n]. If
103
+ m < n, we use [m : n] to denote the set {m, m + 1, . . . , n}. We also use interval notations, e.g.,
104
+
105
+ 3
106
+ Fig. 1: A Distributed Computing System with Star Network
107
+ [a, b] ≜ {x : a ≤ x ≤ b} and [a, b) ≜ {x : a ≤ x < b} for real numbers a, b such that a < b. The
108
+ bitwise exclusive OR (XOR) operation is denoted by ⊕. For sets we use upper case calligraphic
109
+ font, e.g., A, and for collections (sets of sets) we use upper case Greek letters with bold font,
110
+ e.g., Ω. We denote a point in two or three dimensional Euclidean space by an upper case letter.
111
+ A line segment with end points A1, A2 or a line through the points A1, A2 is denoted by A1A2.
112
+ A triangle with vertices A1, A2, A3 is denoted by △A1A2A3. A trapezoid with the four edges
113
+ A1A2, A2A3, A3A4, and A4A1, where A1A2 is parallel to A3A4, is denoted by ⊟A1A2A3A4.
114
+ Let F be a set of facets, if the facets in F form a continuous surface, then we refer to this
115
+ surface simply as F.
116
+ II. SYSTEM MODEL
117
+ Let K, N, W, U, V be given positive integers. Consider a star network consisting of K dis-
118
+ tributed computing nodes {1, . . . , K} that can communicate with each other through a common
119
+ AP, as illustrated in Fig. 1. Each of the K nodes can transmit signals to the AP through an
120
+ uplink channel, while the AP can broadcast signals to all the K nodes via a downlink channel.
121
+ Each of the K nodes aims to compute an individual function from a set of N files,
122
+ W = {w1, . . . , wN},
123
+ wn ∈ FW
124
+ 2 , ∀ n ∈ [N],
125
+ each of size W bits. Node k aims to compute an output function
126
+ φk : FNW
127
+ 2
128
+ → FU
129
+ 2 ,
130
+ which maps all the files to a bit stream
131
+ uk = φk(w1, . . . , wN) ∈ FU
132
+ 2
133
+ of length U. Assume that each output function φk decomposes as:
134
+ φk(w1, . . . , wN) = hk(fk,1(w1), . . . , fk,N(wN)),
135
+ (1)
136
+ where
137
+
138
+ Files
139
+ Map
140
+ Node 3
141
+ IVs
142
+ Reduce
143
+ The set of files
144
+ X(X1, X2, X3)
145
+ X1
146
+ X2
147
+
148
+ Reduce
149
+ Reduce
150
+ Files
151
+ Files
152
+ Map
153
+ Map
154
+ IVs
155
+ IVs
156
+ Node 1
157
+ Node 24
158
+ • Each “map” function fk,n is of the form
159
+ fk,n : FW
160
+ 2 → FV
161
+ 2 ,
162
+ and maps the file wn into the IV
163
+ vk,n ≜ fk,n(wn) ∈ FV
164
+ 2 .
165
+ • The “reduce” function hk is of the form
166
+ hk : FNV
167
+ 2
168
+ → FU
169
+ 2 ,
170
+ and maps the IVs
171
+ Vk ≜ {vk,n : n ∈ [N]}
172
+ into the output stream
173
+ uk = hk(vk,1, . . . , vk,N).
174
+ Notice that one trivial decompositon is that, the map functions are identity functions and the
175
+ reduce functions are the output functions, i.e., gk,n(wn) = wn, and hk = φk, ∀ n ∈ [N], k ∈ [K].
176
+ But in practice, many output functions can be decomposed such that the main computation load is
177
+ dominated by the map functions. For example, in federated learning, it typically needs to collect
178
+ the sum of the gradients over all data blocks, where the map functions are used to compute the
179
+ gradients of the loss functions over a data block, while the reduce function is the sum operation.
180
+ The described structure of the output functions φ1, . . . , φK, allows the nodes to perform their
181
+ computation in the following three-phase procedure.
182
+ 1) Map Phase: Each node k ∈ [K] chooses to store a subset of files Mk ⊆ W. For each file
183
+ wn ∈ Mk, node k computes a subset of IVs
184
+ Ck,n = {vq,n : q ∈ Zk,n},
185
+ where Zk,n ⊆ [K]. Denote the set of IVs computed at node k by Ck, i.e.,
186
+ Ck ≜
187
+
188
+ n:wn∈Mk
189
+ Ck,n.
190
+ (2)
191
+ 2) Shuffle Phase: The K nodes exchange some of their computed IVs through the AP via
192
+ upload and download sub-phases:
193
+ In the upload sub-phase, each node k generates a coded signal
194
+ Xk = ϕk (Ck)
195
+ of some length lk ∈ N and sends it to the AP, using a function
196
+ ϕk : F|Ck|V
197
+ 2
198
+ → Flk
199
+ 2 .
200
+ In the download sub-phase, receiving all the signals {X1, . . . , XK}, the AP generates a signal
201
+ X = χ(X1, X2, . . . , XK)
202
+ (3)
203
+ of length l ∈ N, and broadcasts it to all nodes, where the encoding function is
204
+ χ : Fl1+l2+...+lK
205
+ 2
206
+ → Fl
207
+ 2.
208
+
209
+ 5
210
+ 3) Reduce Phase: Using the received signal X broadcast from the AP in the shuffle phase
211
+ and its own IVs Ck computed locally in the map phase, each node k now computes the IVs
212
+ (vk,1, . . . , vk,N) = ψk (X, Ck) ,
213
+ (4)
214
+ for some function
215
+ ψk : Fl
216
+ 2 × F|Ck|V
217
+ 2
218
+ → FNV
219
+ 2
220
+ .
221
+ Finally, it computes
222
+ uk = hk(vk,1, . . . , vk,N).
223
+ (5)
224
+ To measure the storage, computation, and communication costs of the described procedure,
225
+ following the convention in [7], we introduce the following definitions.
226
+ Definition 1 (Storage load). Storage load r is defined as the total number of files stored across
227
+ the K nodes normalized by the total number of files N:
228
+ r ≜
229
+ �K
230
+ k=1 |Mk|
231
+ N
232
+ .
233
+ (6)
234
+ Definition 2 (Computation load). Computation load c is defined as the total number of map
235
+ functions computed across the K nodes, normalized by the total number of map functions NK:
236
+ c ≜
237
+ �K
238
+ k=1 |Ck|
239
+ NK
240
+ .
241
+ (7)
242
+ Definition 3 (Communication Load). The communication load is characterized by the tuple
243
+ (L, D), where L (resp. D) is the upload (resp. download) defined as the total number of the bits
244
+ sent by the K nodes (resp. AP) during the upload (resp. download) sub-phase, normalized by
245
+ the total length of all intermediate values NKV :
246
+ L ≜
247
+ �K
248
+ k=1 lk
249
+ NKV ,
250
+ D ≜
251
+ l
252
+ NKV .
253
+ Remark 1 (Nontrivial Regime). In general, the non-trivial regime in our setup is
254
+ 1 ≤ c ≤ r ≤ K,
255
+ (8a)
256
+ 0 ≤ D ≤ L ≤ 1 − r
257
+ K .
258
+ (8b)
259
+ For completeness, we justify them by the following observations.
260
+ • Justification of (8a): Since each IV needs to be computed at least once somewhere, we
261
+ have c ≥ 1. Moreover, the definition of Ck in (2) implies that |Ck| ≤ |Mk|K, and thus by
262
+ (6) and (7), c ≤ r. Finally, the regime r > K is not interesting, because in this case each
263
+ node stores all the files, and can thus locally compute all the IVs required to compute its
264
+ output function. In this case, c ≥ 1, D ≥ 0 and L ≥ 0, can be arbitrary.
265
+ • Justification of (8b): D ≥ 0 is trivial. By (3), as the down-link signal X is created from the
266
+ upload signals X1, . . . , XK, D = L is sufficient to communicate all the received information.
267
+ Finally, each node k can trivially compute |Mk| of its desired IVs locally and thus only
268
+ needs to receive N − |Mk| IVs from other nodes. Thus, such an uncoded manner requires
269
+ an upload of L =
270
+ �K
271
+ k=1(N−|Mk|)V
272
+ NKV
273
+ = 1 − r
274
+ K.
275
+
276
+ 6
277
+ In the trivial case that the AP simply forwards all the receiving signals, i.e., X = (X1, . . . , XK),
278
+ then D = L, and the model degrades to the distributed model without the AP as in [7], where
279
+ the non-trivial region on the triple (r, c, L) was 1 ≤ c ≤ r ≤ K, 0 ≤ L ≤ 1 − r
280
+ K.
281
+ Definition 4 (Fundamental SCC Region). A Storage-Computation-Communication1 (SCC) quadru-
282
+ ple (r, c, L, D) satisfying (8) is achievable if for any ϵ > 0 and sufficiently large N, W, V , there
283
+ exist map, shuffle, and reduce procedures with storage load, computation load, upload and
284
+ download less than r + ϵ, c + ϵ, L + ϵ and D + ϵ, respectively. The fundamental SCC region is
285
+ defined as the set of all feasible SCC quadruple:
286
+ R = {(r, c, L, D) : (r, c, L, D) is feasible}.
287
+ Definition 5 (Optimal Tradeoff Surface). An SCC quadruple (r, c, L, D) is called Pareto-optimal
288
+ if it is feasible and if no feasible SCC quadruple (r′, c′, L′, D′) exists so that r′ ≤ r, c′ ≤ c, L′ ≤ L
289
+ and D ≤ D′ with one or more of the inequalities being strict. The set of all Pareto-optimal SCC
290
+ quadruples is defined as the optimal tradeoff surface:
291
+ O ≜ {(r, c, L, D) : (r, c, L, D) is Pareto-optimal}.
292
+ The goal of this paper is to characterize the fundamental SCC region R and the optimal
293
+ tradeoff surface O in our setup.
294
+ III. MAIN RESULTS
295
+ Before we present the main theorem, let us provide a toy example to illustrate the key idea
296
+ of the proposed achievable scheme.
297
+ A. An Toy Example for Achievable Scheme
298
+ Consider the case, where there aorre K = 3 nodes and N = 6 files. Each node wants to
299
+ compute an individual function from the N = 6 files as in (1). Fig. 2 illustrates the strategy
300
+ achieving the Pareto-optimal point (r, c, L, D) = (2, 4
301
+ 3, 1
302
+ 6, 1
303
+ 9), where the uplink and downlink
304
+ transmissions are illustrated in Fig.2(a) and 2(b), respectively.
305
+ In Fig. 2, the three nodes are denoted by three boxes with red, green and blue edges re-
306
+ spectively. The top-most lines in each of the three boxes indicate the files stored at the node.
307
+ The rectangle below this line indicates the map functions at the node. The computed IVs
308
+ are depicted below the rectangle, where red circles, green squares, and blue triangles indi-
309
+ cate IVs {v1,1, · · · , v1,6}, {v2,1, · · · , v2,6}, and {v3,1, · · · , v3,6}, respectively. The dashed cir-
310
+ cles/squares/triangles stand for the IVs that are not computed from the stored files. The last
311
+ line of each box indicates the IVs that the node needs to learn during the shuffle phase.
312
+ The N = 6 files
313
+ W = {w1, w2, w3, w4, w5, w6}
314
+ are partitioned into
315
+ �K
316
+ r
317
+
318
+ = 3 batches, i.e., {w1, w2}, {w3, w4}, {w5, w6}. In the map phase, the
319
+ files {w1, w2} are simultaneously stored at nodes 1 and 3; the files {w3, w4} at nodes 1 and 2;
320
+ and the files {w5, w6} at nodes 2 and 3. For each node, the computed IVs can be classified into
321
+ two types: the IVs that will be used by its own reduce function (the first line below the “map”
322
+ 1The communication load includes both upload and download.
323
+
324
+ 7
325
+ rectangle) and the IVs that will be used for transmission or decoding (the second and third lines
326
+ below the “map” rectangle).
327
+ In the shuffle phase, during the upload sub-phase, each node creates a coded signal by XORing
328
+ two IVs and sends it to the AP as illustrated in Fig. 2(a), i.e., Nodes 1, 2 and 3 sends coded
329
+ IVs v1,1 ⊕ v3,3, v3,4 ⊕ v2,5 and v6,2 ⊕ v2,1, respectively; during the download sub-phase, the AP
330
+ combines the three received signals by a simple chain coding, i.e., the two downlink signals are
331
+ formed by XORing the signals from nodes 1 and 2, and the signals from 2 and 3, respectively.
332
+ The combined signals are sent to all the three nodes.
333
+ In the reduce phase, for each node, since the two chain coded signals involve a coded signal
334
+ transmitted by itself, the node can decode the two coded signals from the other two nodes.
335
+ Moreover, from each of the coded singals, the node can further decode an IVs it needs, by
336
+ XORing the coding signal with one of its computed IV. For example, Node 1 first decodes the
337
+ two signals v3,4 ⊕ v2,5 and v2,6 ⊕ v1,2, then it can further decodes the IVs v2,5 and v2,6, since the
338
+ IVs v3,4 and v1,2 have been computed locally. Finally, each node collects all IVs for its assigned
339
+ reduce function, and computes the final output.
340
+ B. Fundamental SCC Region and Optimal Tradeoff Surface
341
+ For each i ∈ [K], define two SCC quadrules
342
+ Pi ≜
343
+
344
+ i, i
345
+
346
+ 1 − i − 1
347
+ K
348
+
349
+ , 1
350
+ i
351
+
352
+ 1 − i
353
+ K
354
+
355
+ ,
356
+ 1
357
+ i + 1
358
+
359
+ 1 − i
360
+ K
361
+ ��
362
+ ,
363
+ Qi ≜
364
+
365
+ i, i, 1
366
+ i
367
+
368
+ 1 − i
369
+ K
370
+
371
+ ,
372
+ 1
373
+ i + 1
374
+
375
+ 1 − i
376
+ K
377
+ ��
378
+ .
379
+ In the following, we will use P u
380
+ i , Qu
381
+ i , P d
382
+ i , Qd
383
+ i to denote the projections of Pi, Qi into the uplink
384
+ and downlink SCC subspaces2, i.e.,
385
+ P u
386
+ i ≜
387
+
388
+ i, i
389
+
390
+ 1 − i − 1
391
+ K
392
+
393
+ , 1
394
+ i
395
+
396
+ 1 − i
397
+ K
398
+ ��
399
+ ,
400
+ Qu
401
+ i ≜
402
+
403
+ i, i, 1
404
+ i
405
+
406
+ 1 − i
407
+ K
408
+ ��
409
+ ,
410
+ P d
411
+ i ≜
412
+
413
+ i, i
414
+
415
+ 1 − i − 1
416
+ K
417
+
418
+ ,
419
+ 1
420
+ i + 1
421
+
422
+ 1 − i
423
+ K
424
+ ��
425
+ ,
426
+ (9)
427
+ Qd
428
+ i ≜
429
+
430
+ i, i,
431
+ 1
432
+ i + 1
433
+
434
+ 1 − i
435
+ K
436
+ ��
437
+ .
438
+ The main result of this paper is summarized in the following theorem, where the proofs are
439
+ provided in the following sections.
440
+ Theorem 1. The fundamental SCC region R is given by
441
+ R =
442
+
443
+ (r, c, L, D) : 1 ≤ c ≤ r ≤ K, L∗(r, c) ≤ L ≤ 1 − r
444
+ K , D∗(r, c) ≤ D ≤ L
445
+
446
+ ,
447
+ where L∗(r, c) is a function such that {(r, c, L∗(r, c)) : 1 ≤ c ≤ r ≤ K} forms the surface
448
+ Fu ≜ △P u
449
+ 1 P u
450
+ 2 Qu
451
+ 2 ∪
452
+ K−1
453
+
454
+ i=2 △P u
455
+ i−1P u
456
+ i P u
457
+ K ∪
458
+ K−1
459
+
460
+ i=2 ⊟P u
461
+ i Qu
462
+ i Qu
463
+ i+1P u
464
+ i+1
465
+ 2In this paper, we will refer r-c-L subspace as the uplink SCC subspace, and the r-c-D subspace the downlink SCC subspace.
466
+ The superscripts “u” and “d” indicate “uplink” and “downlink”, respectively.
467
+
468
+ 8
469
+ (a)
470
+ (b)
471
+ Fig. 2: Illustration of the CDC for star network: (a) Uplink (b) Downlink
472
+ in the uplink SCC subspace, and D∗(r, c) is a function such that {(r, c, D∗(r, c)) : 1 ≤ c ≤ r ≤
473
+ K} forms the surface
474
+ Fd ≜ △P d
475
+ 1 P d
476
+ 2 Qd
477
+ 2 ∪
478
+ K−1
479
+
480
+ i=2 △P d
481
+ i−1P d
482
+ i P d
483
+ K ∪
484
+ K−1
485
+
486
+ i=2 ⊟P d
487
+ i Qd
488
+ i Qd
489
+ i+1P d
490
+ i+1
491
+ in the downlink SCC subspace. The optimal tradeoff surface is given by
492
+ O =
493
+ K−1
494
+
495
+ i=2 {θ1Pi−1 + θ2Pi + θ3PK : θ1, θ2, θ3 ∈ [0, 1], θ1 + θ2 + θ3 = 1}.
496
+ (10)
497
+ In Fig. 3, the functions L∗(r, c) and D∗(r, c) are ploted for K = 10 nodes. Notice that, by
498
+ setting r = c, we recover the optimal upload and download as investigated in [20]3, i.e.,
499
+ 3The measurement of communication load is up to a scalar “K” in [20] compared Definition 3, and a slightly difference in
500
+ assumption in [20] is that each node has a fixed storage load.
501
+
502
+ Files
503
+ 25
504
+ 6
505
+ Map
506
+ Node 3
507
+ Computes
508
+ 12
509
+ 56
510
+ 1:256
511
+ Needs
512
+ 3
513
+ Files
514
+ 3
515
+ 2
516
+ 4
517
+ 3
518
+ 4
519
+ Files
520
+ T9
521
+ 1
522
+ Map
523
+ 1④3
524
+ Map
525
+ 3456
526
+ Computes
527
+ Computes 3 4 .5..6
528
+ 14
529
+ 3:4:60
530
+ Needs
531
+ Needs
532
+ 12
533
+ Node 1
534
+ Node 2Files
535
+ 25
536
+ 6
537
+ Map
538
+ Node 3
539
+ Computes
540
+ 12
541
+ 1256
542
+ Needs
543
+ 3
544
+ 2
545
+ Files
546
+ 2
547
+ 3
548
+ 4
549
+ Files
550
+ 3
551
+ 4
552
+ 5
553
+ T9
554
+ Map
555
+ Map
556
+ 3451
557
+ 16
558
+ Computes
559
+ Computes 3 4 .5..6
560
+ 3:4:60
561
+ Needs
562
+ Needs
563
+ 12
564
+ Node 1
565
+ Node 29
566
+ 1) the optimal upload for given storage is given by
567
+ L∗(r) ≜ Conv
568
+ �1
569
+ r
570
+
571
+ 1 − r
572
+ K
573
+ ��
574
+ ,
575
+ which corresponds the curve formed by the line segments Qu
576
+ 1Qu
577
+ 2, Qu
578
+ 2Qu
579
+ 3, . . . , Qu
580
+ K−1Qu
581
+ K.
582
+ 2) the optimal download for given storage is given by
583
+ D∗(r) ≜ Conv
584
+
585
+ 1
586
+ r + 1
587
+
588
+ 1 − r
589
+ K
590
+ ��
591
+ ,
592
+ which corresponds to the curve formed by the line segments Qd
593
+ 1Qd
594
+ 2, Qd
595
+ 2Qd
596
+ 3, . . . , Qd
597
+ K−1Qd
598
+ K.
599
+ Observe that, the line segments Qu
600
+ i P u
601
+ i in the uplink SCC space and Qd
602
+ i P d
603
+ i in the downlink
604
+ space (i = 2, 3, . . . , K) are parellel to the c-axis, which indicate that the computation load can be
605
+ saved to achieve L∗(r). The length of the line segments indicates the amount of the computation
606
+ load that can be saved. Thus, with larger storage load r, the saving of computation load to
607
+ achieve L∗(r) and D∗(r) is larger. It will be clear later that the saving on the computation load
608
+ is due to the fact that, under the assumption that each not computes all IVs it can computes,
609
+ some of the IVs computed are not used in neither generating the signal, nor in the decoding
610
+ process.
611
+ The projections of the Pareto-optimal surface O into the uplink and downlink SCC space
612
+ correspond to the surfaces
613
+ Ou ≜ {P u
614
+ i−1P u
615
+ i P u
616
+ K : i ∈ [2 : K − 1]}
617
+ and
618
+ Od ≜ {P d
619
+ i−1P d
620
+ i P d
621
+ K : i ∈ [2 : K − 1]},
622
+ respectively. Observe that, for a given feasible (r, c) pair, the optimal upload is strictly larger
623
+ than the optimal download. We will see that this is achieved by performing some simple chain
624
+ coding at the AP to combine the signals from different nodes. Interestingly, both the upload and
625
+ download can be simultaneously achieved for a fixed (r, c) pair.
626
+ Remark 2 (Relation to Results in [7]). One can observe that, the surfaces composing L∗(r, c) and
627
+ Ou concide with the optimal communication load and the Pareto-optimal SCC tradeoff surface
628
+ in the setup where the nodes directly connect to each other through a shared link (c.f. [7, Fig.
629
+ 2]), respectively. It was showed in [7], by dropping the computations of the IVs that are not
630
+ used in the CDC scheme [3] as in [4], the resultant coded computing scheme can achieve the
631
+ corner points of the Pareto-optimal SCC tradeoff surface (which is same as Ou). In fact, in
632
+ our proposed scheme, each node performs the same map procedures as in [4], but the signals
633
+ are sent to the AP. The AP performs a simple chain coding on the received signals to further
634
+ compress the length of the signals, which leads to a further decrease of the download compared
635
+ to the upload. We will present the whole process in Section IV.
636
+ IV. ACHIEVABILITY
637
+ Since the set O is exactly all the Pareto-optimal points of the set R (Appendix A), we
638
+ only need to prove the achievability of the hypersurface O. We will derive a coded computing
639
+ scheme that achieves the SCC quadruple Pi. Moreover, for any fixed θ1, θ2, θ3 ∈ [0, 1] such that
640
+
641
+ 10
642
+ Fig. 3: The functions L∗(r, c) and D∗(r, c).
643
+ θ1+θ2+θ3 = 1, divide the N files into three groups of sizes4 θ1N, θ2N and θ3N. By applying the
644
+ scheme achieving the points Pi−1, Pi and PK on the three groups of files, the resultant scheme
645
+ achieves the point P = θ1Pi−1 + θ2Pi + θPK. Thus, we only need to prove the achievability of
646
+ Pi, i ∈ [K].
647
+ A. Coded Distributed Computing for Star Network
648
+ We now describe the scheme achieving Pi for a fixed i ∈ [K].
649
+ Define
650
+ Ωi ≜ {T ⊆ [K] : |T | = i} ,
651
+ ∀ i ∈ [K].
652
+ For i = K, PK = (K, 1, 0, 0) is trivial, since each node can simply store all the files and
653
+ computes their IVs as well as their reduce functions locally, with no communication loads.
654
+ Consider a fixed i ∈ [K − 1], the N files are partitioned into
655
+ �K
656
+ i
657
+
658
+ batches, each containing
659
+ ηi = N
660
+ �K
661
+ i
662
+
663
+ (11)
664
+ files. Each batch is then associated with a subset T of [K] of cardinality i, i.e., an element in
665
+ Ωi. Let WT denote the batch of the ηi files associated with set T . Then,
666
+ W = {w1, . . . , wN} =
667
+
668
+ T ∈Ωi
669
+ WT .
670
+ 4This requires that θ1, θ2, θ3 have to be rational. If any one is irrational, one can replace it by a rational number arbitrarily
671
+ close to it.
672
+
673
+ Optimal Upload and Download, K = 10
674
+ pu
675
+ 0.9 -
676
+ ..The Function L*(r, c)
677
+ (/T)
678
+ 0.8 ~
679
+ -- The Function D*(r,c)
680
+ 0.7
681
+ -Plane r = c
682
+ Communication load (
683
+ 0.6 ~
684
+ Pf,
685
+ 0.5
686
+ Q
687
+ 0.4 ~
688
+ 0.3 ~
689
+ Qu
690
+ 0.2 -
691
+ 0.1
692
+ 0.
693
+ Storage load (r)s
694
+ Pl0/ P10
695
+ 2
696
+ 3
697
+ 5
698
+ 4
699
+ 10 Q1 /Qil 0
700
+ 6
701
+ 6
702
+ 8
703
+ >
704
+ Computation load (c)11
705
+ Further let UT ,k be the set of IVs for output function φk that can be computed from the files in
706
+ WT :
707
+ UT ,k ≜ {vk,n : wn ∈ WT }.
708
+ We now describe the map, shuffle, and reduce procedures.
709
+ 1) Map Phase: Each node k stores
710
+ Mk =
711
+
712
+ T ∈Ωi:k∈T
713
+ WT ,
714
+ and computes the IVs
715
+ Ck = C1
716
+ k ∪ C2
717
+ k,
718
+ (12)
719
+ where
720
+ C1
721
+ k =
722
+
723
+ T ∈Ωi:k∈T
724
+ UT ,k,
725
+ (13a)
726
+ C2
727
+ k =
728
+
729
+ T ∈Ωi:k∈T
730
+
731
+ q∈K\T
732
+ UT ,q.
733
+ (13b)
734
+ In other words, for each batch T , each node k computes all the IVs for its own function
735
+ k, and all the IVs for the function q if node q does not have the batch T .
736
+ 2) Shuffle Phase: For each element T ∈ Ωi and each index j ∈ K\T , we partition the set
737
+ UT ,j into i smaller subsets
738
+ UT ,j =
739
+
740
+ U k
741
+ T ,j : k ∈ T
742
+
743
+ (14)
744
+ of equal size.
745
+ In the upload sub-phase, for each S ∈ Ωi+1 and k ∈ S, by (13b), node k can compute
746
+ the signal
747
+ Xk
748
+ S ≜
749
+
750
+ l∈S\{k}
751
+ U k
752
+ S\{l},l
753
+ from the IVs calculated during the map phase. Node k thus sends the multicast signal
754
+ Xk =
755
+
756
+ Xk
757
+ S : S ∈ Ωi+1 such that k ∈ S
758
+
759
+ to the AP R. Thus, the AP R receives the signals X1, . . . , XK.
760
+ In the download sub-phase, for each S = {k1, . . . , ki+1} ∈ Ωi+1, the AP R creates a
761
+ signal,
762
+ XS ≜ (Xk1
763
+ S ⊕ Xk2
764
+ S , Xk2
765
+ S ⊕ Xk3
766
+ S , . . . , Xki
767
+ S ⊕ Xki+1
768
+ S
769
+ ).
770
+ (15)
771
+ Then the AP broadcast the signal
772
+ X ≜ {XS : S ∈ Ωi+1}.
773
+ (16)
774
+ 3) Reduce Phase: Notice that C2
775
+ k only contains the IVs vq,n where q ̸= k. Thus, by (12) and
776
+
777
+ 12
778
+ (13a), during the shuffle phase each node k needs to learn all the IVs in
779
+
780
+ T ∈Ωi : k/∈T
781
+ UT ,k.
782
+ Fix an arbitrary T ∈ Ωi such that k /∈ T . From the received multicast message XT ∪{k},
783
+ since the signal Xk
784
+ T ∪{k} is generated by node k, by (15), node k can decode Xj
785
+ T ∪{k} for
786
+ all j ∈ T , where the signal
787
+ Xj
788
+ T ∪{k} =
789
+
790
+ l∈T ∪{k}\{j}
791
+ U j
792
+ T ∪{k}\{l},l
793
+ is sent by node j during the shuffle phase. For any fixed j ∈ T , node k can recover the
794
+ missing IV U j
795
+ T ,k through a simple XOR operation:
796
+ U j
797
+ T ,k = Xj
798
+ T ∪{k} ⊕
799
+
800
+ l∈T \{j}
801
+ U j
802
+ T ∪{k}\{l},l,
803
+ (17)
804
+ where U j
805
+ T ∪{k}\{l},l is calculated at node k by (13b) and (14) for all l ∈ T \{j}. Moreover,
806
+ node k can decode UT ,k from
807
+
808
+ Xj
809
+ T ∪{k} : j ∈ T
810
+
811
+ .
812
+ by (14) and (17). After collecting all the missing IVs, node k can proceed to compute the
813
+ reduce function (5).
814
+ Remark 3 (Comparison with [20]). Compared to the coded computing scheme in [20], two
815
+ differences of the above scheme are:
816
+ 1) In the map phase, each node only needs to compute the IVs described in (12) and (13),
817
+ because only those IVs are useful for creating or decoding the coded signals, while in
818
+ [20], all the IVs pertaining to the files in Mk are computed, i.e., node k computes
819
+ �C ≜
820
+
821
+ T ∈Ωi,k∈T
822
+
823
+ q∈[K]
824
+ UT ,q.
825
+ (18)
826
+ This scheme in fact achieves the point Qi, which is inferior to Pi for i > 0 in computation
827
+ load. The idea of removing the redundancy has been proposed in the setup where the nodes
828
+ connects to each other directly through a bus link by Ezzeldin [4] and Yan et al [7].
829
+ 2) In (15), for any node set S of size i + 1, we used a simple chain coding on the signals to
830
+ form i signals, while in [20], it uses random coding on the signals {Xk
831
+ S : k ∈ S} to form
832
+ i coded signals. The advantage of chain coding in (15) is obvious:
833
+ a) It has smaller encoding and decoding complexities;
834
+ b) It can be operated on the binary field F2;
835
+ c) The order of nodes in the chain can be arbitrary. It makes sense in some scenarios:
836
+ the signals {Xk
837
+ S : k ∈ S} may arrive at different time points. Consider the case that
838
+ the signals arrive in the ordder Xk1
839
+ S , Xk2
840
+ S , . . . , Xki+1
841
+ S
842
+ , to perform the encoding (15), at
843
+ any time the AP only needs to keep one signal in its buffer, because each coordinate
844
+ in (15) only depends on two consecutive signals. While with random linear coding,
845
+ the AP typically have to wait for all signals {Xk
846
+ S : k ∈ S}. Thus, the chain coding
847
+ can reduce the buffer size at the AP and the node to node delay.
848
+
849
+ 13
850
+ Remark 4 (PDA framework). In [7], [21], [22], a coded computing scheme was derived based on
851
+ placement delivery array (PDA), which was proposed in [23] to explore coded caching schemes
852
+ with uncoded placement [24]. In particular, it turns out that the Maddah-Ali and Niesen’s coded
853
+ caching scheme corresponds to a special structure of PDA (referred to as MAN-PDA). It was
854
+ showed in [7] that, with any given PDA belonging to a special class (defined as PDA for
855
+ distributed computing (Comp-PDA)), one can always obtain a coded computing scheme. The class
856
+ of PDAs achieving the Pareto-optimal tradeoff surface was characterized in [7]. The advantage of
857
+ establishing the PDA framework is, various known PDA structure, e.g., the constructions in [23],
858
+ [25], [26] can be directly utilized to obtain coded computing schemes with low file complexity5.
859
+ In our setup, similar connections between coded computing schemes and Comp-PDA can be
860
+ established, by following the same steps as in [7] for upload singals, and incoporating the chain
861
+ coding (15) on all multicast signals from the Comp-PDA for the downlink signals. For example,
862
+ the scheme described in Fig. 2 can be derived from the PDA
863
+
864
+
865
+
866
+ 1
867
+
868
+ 1
869
+
870
+
871
+
872
+
873
+ 1
874
+
875
+ � ,
876
+ for details of forming the upload signals in Fig. 2(a), one can refer to [7, Example 4].
877
+ B. Performance Analysis
878
+ We analyze the performance of the scheme.
879
+ 1) Storage Load: The number of batches in Mk is
880
+ �K−1
881
+ i−1
882
+
883
+ , each consisting of ηi files. Thus,
884
+ the storage load is
885
+ r = 1
886
+ N · K ·
887
+ �K − 1
888
+ i − 1
889
+
890
+ · ηi = i.
891
+ (19)
892
+ 2) Computation Load: Since C1
893
+ k ∩ C2
894
+ k = ∅, we have |Ck| = |C1
895
+ k| + |C2
896
+ k|. From (11), (13a), and
897
+ (13b), we have
898
+ |C1
899
+ k| =
900
+ �K − 1
901
+ i − 1
902
+
903
+ · ηi = iN
904
+ K ,
905
+ |C2
906
+ k| =
907
+ �K − 1
908
+ i − 1
909
+
910
+ · (K − i) · ηi
911
+ =
912
+
913
+ 1 − i
914
+ K
915
+
916
+ · i · N.
917
+ Thus, the computation load is
918
+ c =
919
+ �K
920
+ k=1 |Ck|
921
+ NK
922
+ = i
923
+
924
+ 1 − i − 1
925
+ K
926
+
927
+ .
928
+ (20)
929
+ 3) Communication Load: The number of signals that each node k transmits is
930
+ �K−1
931
+ i
932
+
933
+ , each
934
+ of size ηi·V
935
+ i
936
+ bits. Thus, the length of the signal Xk is lk =
937
+ �K−1
938
+ i
939
+ � ηi·V
940
+ i
941
+ bits. Therefore, the
942
+ 5The file complexity of a coded computing scheme is defined as the smallest number of files required to implement the
943
+ scheme, e.g., the file complexity of the proposed scheme achiving Pi is
944
+ �K
945
+ i
946
+
947
+ .
948
+
949
+ 14
950
+ upload is
951
+ L =
952
+ �K
953
+ k=1 lk
954
+ NKV
955
+ = 1
956
+ i ·
957
+
958
+ 1 − i
959
+ K
960
+
961
+ .
962
+ (21)
963
+ By (15) and (16), the AP R transmits
964
+ � K
965
+ i+1
966
+
967
+ · i signals, each of size
968
+ ηi·V
969
+ i
970
+ bits, thus the
971
+ download is
972
+ D =
973
+ 1
974
+ NKV ·
975
+ � K
976
+ i + 1
977
+
978
+ · i · ηi · V
979
+ i
980
+ =
981
+ 1
982
+ i + 1
983
+
984
+ 1 − i
985
+ K
986
+
987
+ .
988
+ (22)
989
+ From (19), (20), (21) and (22), we show the achievability of the SCC quadruple Pi.
990
+ V. CONVERSE
991
+ We need to prove that for any achievable (r, c, L, D) satisfying (8),
992
+ L ≥ L∗(r, c),
993
+ (23a)
994
+ D ≥ D∗(r, c).
995
+ (23b)
996
+ Consider a coded distributed computing scheme achieving (r, c, L, D), with file allocations M[K],
997
+ IV allocations C[K], uplink signals X[K] and downlink signal X. By the decoding condition (4),
998
+ H(Vk|X, Ck) = 0,
999
+ ∀ k ∈ [K].
1000
+ Thus for any k ∈ [K],
1001
+ H(Vk|X1, . . . , XK, Ck)
1002
+ (a)
1003
+ = H(Vk|X1, . . . , XK, X, Ck)
1004
+ ≤ H(Vk|X, Ck)
1005
+ = 0,
1006
+ where (a) follows since the downlink signal X is determined by the uplink signals X[K] by (3).
1007
+ That is, with the signals X1, . . . , XK and the locally computed IVs Ck, node k can decode
1008
+ all the IVs it needs. As a result, the file allocations M[K], IV allocations C[K] and the uplink
1009
+ singals X[K] consisitute an valid scheme for the distributed computing system where the nodes
1010
+ are connected through a bus shared link directly, as investigated in [7]. Therefore, by the results
1011
+ in [7, Theorem 2], we have proved (23a).
1012
+ We proceed to prove the (23b). For any k ∈ [K] and nonempty S ⊆ [K]\{k}, define
1013
+ Bk,S ≜ {vk,n : vk,n is exclusively computed by the nodes in S},
1014
+ �Bk ≜ {vk,n : vk,n is the computed by node k}.
1015
+ Let bk,S be the cardinality of the set Bk,S and ˜bk be the cardinality of �Bk. Obviously, the subsets
1016
+ {Bk,S : S ⊆ [K]\{k}, S ̸= ∅} and �Bk form a partition of the IVs Vk, thus
1017
+ ˜bk +
1018
+
1019
+ S⊆[K],S̸=∅
1020
+ bk,S = N.
1021
+
1022
+ 15
1023
+ For each j ∈ [K − 1], the set of IVs not computed locally but exclusively computed by j other
1024
+ nodes are
1025
+ Bj =
1026
+
1027
+ k∈[K]
1028
+
1029
+ S⊆[K]\{k},|S|=j
1030
+ Bk,S.
1031
+ Then the cardinality of set Bj is given by
1032
+ bj ≜
1033
+
1034
+ k∈[K]
1035
+
1036
+ S⊆[K]\{k},|S|=j
1037
+ bk,S,
1038
+ ∀j ∈ [K − 1].
1039
+ (24)
1040
+ To prove the lower bound in (23b), we need the following two lemmas.
1041
+ Lemma 1. The entropy of the download signal X satisfy
1042
+ H(X) ≥ V
1043
+ K−1
1044
+
1045
+ j=1
1046
+ bj
1047
+ j + 1.
1048
+ Proof: Assume that the AP holds all IVs Vk, then the access point can create the signal X.
1049
+ Consider the data exchange problem6 formed by the AP and the K nodes, where only the AP
1050
+ sends the signal X to all the K nodes. Notice that, in this system, each bits in Bk,S is cached
1051
+ at the AP and the nodes in S, but only demanded by node k. Thus, by the lower bound in [27,
1052
+ Theorem 1],
1053
+ H(X) ≥ V
1054
+
1055
+ k∈[K]
1056
+
1057
+ S⊆[K]\{k}
1058
+ 1
1059
+ (|S| + 1) + 1 − 1 · bk,S
1060
+ = V
1061
+ K
1062
+
1063
+ k=1
1064
+ K−1
1065
+
1066
+ j=1
1067
+
1068
+ S⊆[K]\{k},|S|=j
1069
+ 1
1070
+ j + 1 · bk,S
1071
+ (a)
1072
+ = V
1073
+ K−1
1074
+
1075
+ j=1
1076
+ 1
1077
+ j + 1
1078
+ K
1079
+
1080
+ k=1
1081
+
1082
+ S⊆[K]\{k},|S|=j
1083
+ bk,S
1084
+ = V
1085
+ K−1
1086
+
1087
+ j=1
1088
+ bj
1089
+ j + 1,
1090
+ where in (a), we utilized (24).
1091
+ The following lemma was proved in [7, Lemma 2].
1092
+ Lemma 2. The parameters b1, . . . , bK−1 defined in (24) satisfy
1093
+ K−1
1094
+
1095
+ j=1
1096
+ bj ≥ N(K − r),
1097
+ K−1
1098
+
1099
+ j=1
1100
+ (j − 1)bj ≤ (c − 1)NK.
1101
+ 6Data exchange problem was defined in [27], where each of the nodes holds a subset of the information bits, and request
1102
+ another subset of information bits.
1103
+
1104
+ 16
1105
+ For a fixed r ∈ [1 : K], and each i ∈ [K], define
1106
+ ci ≜ 1 +
1107
+
1108
+ 1 − r
1109
+ K
1110
+
1111
+ (i − 1).
1112
+ (25)
1113
+ Let λi, µi ∈ R+ such that
1114
+ λix + µi|x=ci−1 =
1115
+ 1
1116
+ ci−1 + 1 − 2r/K ·
1117
+
1118
+ 1 − r
1119
+ K
1120
+ �2
1121
+ = 1
1122
+ i
1123
+
1124
+ 1 − r
1125
+ K
1126
+
1127
+ ,
1128
+ (26a)
1129
+ λix + µi|x=ci =
1130
+ 1
1131
+ ci + 1 − 2r/K ·
1132
+
1133
+ 1 − r
1134
+ K
1135
+ �2
1136
+ =
1137
+ 1
1138
+ i + 1
1139
+
1140
+ 1 − r
1141
+ K
1142
+
1143
+ .
1144
+ (26b)
1145
+ From (26a) and (26b), the following relationships hold:
1146
+ λi = −
1147
+ 1
1148
+ i(i + 1) < 0,
1149
+ (27a)
1150
+ µi = 2i − 1
1151
+ i(i + 1)
1152
+
1153
+ 1 − r
1154
+ K
1155
+
1156
+ +
1157
+ 1
1158
+ i(i + 1) > 0,
1159
+ (27b)
1160
+ λi + µi = 2i − 1
1161
+ i(i + 1)
1162
+
1163
+ 1 − r
1164
+ K
1165
+
1166
+ > 0.
1167
+ (27c)
1168
+ Moreover, by its convexity over x ∈ [1, ∞), the function
1169
+ 1
1170
+ x + 1 − 2r/K
1171
+
1172
+ 1 − r
1173
+ K
1174
+ �2
1175
+ − (λix + µi)
1176
+ must be nonnegative outside the interval formed by the two zero points, i.e.,
1177
+ 1
1178
+ x + 1 − 2r/K
1179
+
1180
+ 1 − r
1181
+ K
1182
+ �2
1183
+ ≥ λix + µi,
1184
+ ∀ x ∈ [1, ci−1] ∪ [ci, ∞).
1185
+ Therefore,
1186
+ 1
1187
+ cj + 1 − 2r/K
1188
+
1189
+ 1 − r
1190
+ K
1191
+ �2
1192
+ ≥ λicj + µi,
1193
+ ∀ j ∈ [K − 1].
1194
+ (28)
1195
+ Now, we are ready to derive the lower bound for the download D:
1196
+ D ≥ H(X)
1197
+ NKV
1198
+ (a)
1199
+
1200
+ K−1
1201
+
1202
+ j=1
1203
+ bj
1204
+ NK ·
1205
+ 1
1206
+ j + 1
1207
+ (b)
1208
+ =
1209
+ 1
1210
+ N(K − r)
1211
+ K−1
1212
+
1213
+ j=1
1214
+ bj ·
1215
+ 1
1216
+ cj + 1 − 2r/K
1217
+
1218
+ 1 − r
1219
+ K
1220
+ �2
1221
+ (c)
1222
+
1223
+ 1
1224
+ N(K − r)
1225
+ K−1
1226
+
1227
+ j=1
1228
+ bj(λicj + µi)
1229
+
1230
+ 17
1231
+ (d)
1232
+ =
1233
+ 1
1234
+ N(K − r)
1235
+ K−1
1236
+
1237
+ j=1
1238
+ bj
1239
+
1240
+ λi
1241
+
1242
+ 1 +
1243
+
1244
+ 1 − r
1245
+ K
1246
+
1247
+ (j − 1)
1248
+
1249
+ + µi
1250
+
1251
+ =
1252
+ λi
1253
+ NK ·
1254
+ K−1
1255
+
1256
+ j=1
1257
+ (j − 1)bj +
1258
+ λi + µi
1259
+ N(K − r) ·
1260
+ K−1
1261
+
1262
+ j=1
1263
+ bj
1264
+ (e)
1265
+
1266
+ λi
1267
+ NK · (c − 1)NK +
1268
+ λi + µi
1269
+ N(K − r) · N(K − r)
1270
+ = λic + µi
1271
+ = −
1272
+ 2i − 1
1273
+ Ki(i + 1)r −
1274
+ 1
1275
+ i(i + 1)c +
1276
+ 2
1277
+ i + 1.
1278
+ (29)
1279
+ where (a) follows from Lemma 1; (b) and (d) follow from the definition of ci in (25); (c) follows
1280
+ from (28); and (e) follows from Lemma 2 and the signs of λi and λi + µi in (27).
1281
+ Notice that the three points P d
1282
+ i−1, P d
1283
+ i and P d
1284
+ K defined in (9) satisfy (29) with equality. Thus, the
1285
+ inequalities above indicate that all the feasible points (r, c, L, D) must satisfy that the projection
1286
+ into the download SCC space (r, c, D) must lie above the plane containing △P d
1287
+ i−1P d
1288
+ i P d
1289
+ K.
1290
+ Furthmore, D should be lower bounded by the optimal download even if each node computes
1291
+ all the IVs that can be computed locally from their stored file, i.e., a similar setup as in [20].
1292
+ The converse in [20] indicates that L is lower bounded as follows in the r-D plane7:
1293
+ D ≥ Conv
1294
+ �1
1295
+ r
1296
+
1297
+ 1 − r
1298
+ K
1299
+ ��
1300
+ ,
1301
+ r ∈ {1, 2, . . . , K}.
1302
+ (30)
1303
+ Finally, by the lower bounds in (29) for i = 2, 3, . . . , K − 1 and (30), D is lower bounded by
1304
+ D∗(r, c), i.e., the lower bound (23b) is proved.
1305
+ VI. CONCLUSION
1306
+ In this paper, the Pareto-optimal storage-computation-upload-download tradeoff surface is
1307
+ characterized for the MapReduce distributed computing system, where the nodes have to exchage
1308
+ intermediate values through an access point that can broadcast signals to all nodes. It turns
1309
+ out that, for a given storage-computation pair (r, c), the optimal upload and download can be
1310
+ simultaneously achieved. Information-theoretical bounds matching the achievable communication
1311
+ load are provided for both uplink and downlink.
1312
+ APPENDIX A
1313
+ THE RELATION OF HYPERSURFACE O AND REGION R
1314
+ We now prove that O is the Pareto-optimal surface of the region R. Obviously, all Pareto-
1315
+ optimal points must lie on the surface
1316
+ F = {(r, c, L∗(r, c), D∗(r, c)) : 1 ≤ c ≤ r ≤ K}.
1317
+ 7Although the setup in [20] assumes a fixed storage capacity at each node, the proof the following inequality do not rely on
1318
+ this assumption.
1319
+
1320
+ 18
1321
+ 1
1322
+ 2
1323
+ 3
1324
+ 4
1325
+ 5
1326
+ 6
1327
+ 7
1328
+ 8
1329
+ 9
1330
+ 10
1331
+ 1
1332
+ 2
1333
+ 3
1334
+ 4
1335
+ 5
1336
+ 6
1337
+ 7
1338
+ 8
1339
+ 9
1340
+ 10
1341
+ Fig. 4: The projections of Pi and Qi (i ∈ [K]) to the storage-computation subspace (r-c plane).
1342
+ Let the projections of points Pi and Qi into the r-c plane be P ′
1343
+ i and Q′
1344
+ i (i ∈ [K]), respectively8,
1345
+ i.e.,
1346
+ P ′
1347
+ i =
1348
+
1349
+ i, i
1350
+
1351
+ 1 − i − 1
1352
+ K
1353
+ ��
1354
+ ,
1355
+ Q′
1356
+ i = (i, i).
1357
+ Let the projection of the surface F to the r-c plane be
1358
+ F′ ≜ {(r, c) : 1 ≤ c ≤ r ≤ K} = △P ′
1359
+ 1P ′
1360
+ KQ′
1361
+ K.
1362
+ Notice that, here the “projection” map is one-to-one. Moreover, F′ can be decomposed into (see
1363
+ Fig. 4)
1364
+ F′ = △P ′
1365
+ 1P ′
1366
+ 2Q′
1367
+ 2 ∪
1368
+ K−1
1369
+
1370
+ i=2 △P ′
1371
+ i−1P ′
1372
+ iP ′
1373
+ K ∪
1374
+ K−1
1375
+
1376
+ i=2 ⊟P ′
1377
+ iQ′
1378
+ iQ′
1379
+ i+1P ′
1380
+ i+1.
1381
+ Since the triangle △P u
1382
+ 1 P u
1383
+ 2 Qu
1384
+ 2 and the trapezoids ⊟P u
1385
+ i Qu
1386
+ i Qu
1387
+ i+1P u
1388
+ i+1 in the uplink SCC space
1389
+ (i ∈ [2 : K − 1]) are parallel to c-axis, and so as the triangle △P d
1390
+ 1 P d
1391
+ 2 Qd
1392
+ 2 and the trapezoids
1393
+ ⊟P d
1394
+ i Qd
1395
+ i Qd
1396
+ i+1P d
1397
+ i+1 in the downlink SCC space, all the points (r, c, L∗(r, c), D∗(r, c)) ∈ F such
1398
+ that
1399
+ (r, c) ∈ △P ′
1400
+ 1P ′
1401
+ 2Q′
1402
+ 2 ∪
1403
+ K−1
1404
+
1405
+ i=2 ⊟P ′
1406
+ iQ′
1407
+ iQ′
1408
+ i+1P ′
1409
+ i+1\
1410
+ K−1
1411
+
1412
+ i=2 △P ′
1413
+ i−1PiP ′
1414
+ K
1415
+ (31)
1416
+ cannot be Pareto-optimal. In the following, we prove that, all the points (r, c, L∗(r, c), D∗(r, c))
1417
+ such that
1418
+ (r, c) ∈
1419
+ K−1
1420
+
1421
+ i=2 △P ′
1422
+ i−1P ′
1423
+ iP ′
1424
+ K
1425
+ (32)
1426
+ are Pareto-optimal.
1427
+ Now fix a quadruple (r1, c1, L∗(r1, c1), D∗(r1, c1)) ∈ F that satisfies (32). We show that it is
1428
+ Pareto-optimal. To this end, consider any other triple (r2, c2, L2, D2) ∈ R that satisfies
1429
+ r2 ≤ r1,
1430
+ c2 ≤ c1,
1431
+ (33a)
1432
+ L2 ≤ L∗(r1, c1),
1433
+ D2 ≤ D∗(r1, c1).
1434
+ (33b)
1435
+ We show by contradiction that all four inequalities must hold with equality. Notice that, (r2, c2)
1436
+ either satisfies (31) or (32).
1437
+ 8Notice that the projections of P u
1438
+ i , Qu
1439
+ i and P d
1440
+ i , Qd
1441
+ i into the r-c plane are the same as the ones of the points Pi and Qi. As a
1442
+ result, the projections of △P u
1443
+ 1 P u
1444
+ 2 Qu
1445
+ 2/△P d
1446
+ 1 P d
1447
+ 2 Qd
1448
+ 2, △P u
1449
+ i−1P u
1450
+ i P u
1451
+ K/△P u
1452
+ i−1P u
1453
+ i P u
1454
+ K, and ⊟P u
1455
+ i Qu
1456
+ i Qu
1457
+ i+1P u
1458
+ i+1/⊟P d
1459
+ i Qd
1460
+ i Qd
1461
+ i+1P d
1462
+ i+1 into
1463
+ the r-c plane are △P ′
1464
+ 1P ′
1465
+ 2Q′
1466
+ 2, △P ′
1467
+ i−1P ′
1468
+ iP ′
1469
+ K and ⊟P ′
1470
+ iQ′
1471
+ iQ′
1472
+ i+1P ′
1473
+ i+1, respectively.
1474
+
1475
+ 19
1476
+ 1) Assume that (r2, c2) satisfies (32). If r2 < r1 or c2 < c1, then consider the uplink SCC
1477
+ subspace, one can verify that the points P u
1478
+ i−1, P u
1479
+ i and P u
1480
+ K are on the surface
1481
+ L = −
1482
+ 1
1483
+ i(i − 1)c − 2
1484
+ Kir + 2i − 1
1485
+ i(i − 1).
1486
+ (34)
1487
+ Therefore, it must hold that
1488
+ L∗(r2, c2) > L∗(r1, c1),
1489
+ (35)
1490
+ because all the surfaces containing △P u
1491
+ i−1P u
1492
+ i P u
1493
+ K (i ∈ [2 : K − 1]) have positive directional
1494
+ derIVtives along (r2 − r1, c2 − c1) by (34). Since (r2, c2, L2, D2) ∈ R, we have L2 ≥
1495
+ L∗(r2, c2) and thus by (35), L2 > L∗(r1, c1), which contradicts (33). Therefore, it must
1496
+ hold that r2 = r1 and c2 = c1. Then obviously, L2 ≥ L∗(r2, c2) = L∗(r1, c1) and D2 ≥
1497
+ D∗(r2, c2) = D∗(r1, c1), thus all equalities in (33) hold.
1498
+ 2) Assume now that (r2, c2) satisfies (31). Then, (r2, c2) must lie on at least one of the K −1
1499
+ facets
1500
+ △P ′
1501
+ 1P ′
1502
+ 2Q′
1503
+ 2 or ⊟ P ′
1504
+ iQ′
1505
+ iQ′
1506
+ i+1P ′
1507
+ i+1,
1508
+ i ∈ [2 : K − 1],
1509
+ and it must not lie on the line segments P ′
1510
+ i−1P ′
1511
+ i, i ∈ [2 : K]. As the facets △P u
1512
+ 1 P u
1513
+ 2 Qu
1514
+ 2,
1515
+ ⊟P u
1516
+ i Qu
1517
+ i Qu
1518
+ i+1P u
1519
+ i+1 (i ∈ [2 : K −1]) in the uplink SCC subspace are all parellel to the c-axis,
1520
+ and so as the facets △P d
1521
+ 1 P d
1522
+ 2 Qd
1523
+ 2, ⊟P d
1524
+ i Qd
1525
+ i Qd
1526
+ i+1P d
1527
+ i+1 (i ∈ [2 : K − 1]) in the downlink SCC
1528
+ facets, there exists c3 < c2 ≤ c1 such that (r2, c3) satisfies (32), and
1529
+ L∗(r2, c3) = L∗(r2, c2), D∗(r2, c3) = D∗(r2, c2).
1530
+ Therefore,
1531
+ L2 ≥ L∗(r2, c2) = L∗(r2, c3)
1532
+ (a)
1533
+ > L∗(r1, c1),
1534
+ (36)
1535
+ where (a) follows by proof step 1). But (36) contradicts with (33).
1536
+ From the above analysis, we conclude that, the set of all Pareto-optimal points of R is exactly
1537
+ all the quadruples (r, c, L∗(r, c), D∗(r, c)) ∈ F satisfying (32). Notice that those points are exactly
1538
+ the surface O defined in (10).
1539
+ REFERENCES
1540
+ [1] J. Dean and S. Ghemawat, “MapReduce: Simplified data processing on large clusters,” Sixth USENIX OSDI, Dec. 2004.
1541
+ [2] M. Isard, M. Budiu, Y. Yu, A. Birrell, and D. Fetterly, “Dryad: distributed data-parallel programs from sequential building
1542
+ blocks,” in Proc. the 2nd ACM SIGOPS/EuroSys’07, Mar. 2007.
1543
+ [3] S. Li, M. A. Maddah-Ali, Q. Yu, and A. S. Avestimehr, “A fundamental tradeoff between computation and communication
1544
+ in distributed computing,” IEEE Trans. Inf. Theory, vol. 64, no. 1, pp. 109–128, Jan. 2018.
1545
+ [4] Y. H. Ezzeldin, M. Karmoose, and C. Fragouli, “Communication vs distributed computation: An alternative trade-off curve,”
1546
+ in Proc. IEEE Inf. Theory Workshop (ITW), Kaohsiung, Taiwan, pp. 279–283, Nov. 2017.
1547
+ [5] Q. Yan, S. Yang, and M. Wigger, “A storage-computation-communication tradeoff for distributed computing,” in Proc. 2018
1548
+ 15th Int. Symp. Wireless Commun. Sys. (ISWCS), Lisbon, Portugal, Aug. 28-31, 2018.
1549
+ [6] Q. Yan, S. Yang, and M. Wigger, “Storage, computation and communication: A fundamental tradeoff in distributed
1550
+ computing,” in Proc. IEEE Inf. Thoery Workshop (ITW), Guangzhou, China, Nov. 2018.
1551
+ [7] Q. Yan, S. Yang, and M. Wigger, “Storage-computation-communication tradeoff in distributed computing: Fundamental
1552
+ limits and complexity,” IEEE Trans. Inf. Theory, vol. 68, no. 8, pp. 5496-5512, Aug. 2022.
1553
+ [8] S. Li, M. A. Maddah-Ali, and A. S. Avestimehr, “A unified coding framework for distributed computing with straggling
1554
+ servers,” in Proc. IEEE Globecom Works (GC Wkshps), Washington, DC, USA, Dec. 2016.
1555
+ [9] Q. Yan, M. Wigger, S. Yang, and X. Tang, “A fundamental storage-communication tradeoff for distriubted computing with
1556
+ straggling nodes,” IEEE Trans. Commun. , vol. 68., no. 12, Dec. 2020.
1557
+
1558
+ 20
1559
+ [10] Q. Yan, M. Wigger, S. Yang, and X. Tang, “A fundamental storage-communication tradeoff for distriubted computing with
1560
+ straggling nodes,” In Proc. IEEE Int. Symp. Inf. Theory (ISIT), Paris, France, Jul. 7-12, 2019.
1561
+ [11] Q. Yu, S. Li, M. A. Maddah-Ali, and A. S. Avestimehr, “How to optimally allocate resources for coded distributed
1562
+ computing,” in Proc. IEEE Int. Conf. Commun. (ICC), 2017, Paris, France, 21–25, May 2017.
1563
+ [12] M. A. Attia and R. Tandon, “On the worst-case communication overhead for distributed data shuffling,” in Proc. 54th
1564
+ Allerton Conf. Commun., Control, Comput., Monticello, IL, USA, pp. 961–968, Sep. 2016.
1565
+ [13] M. A. Attia and R. Tandon, “Information theoretic limits of data shuffling for distributed learning,” in Proc. IEEE Glob.
1566
+ Commun. Conf. (Globlcom), Washington, DC, USA, Dec. 2016.
1567
+ [14] A. Elmahdy and S. Mohajer, “On the fundamental limits of coded data shuffling,” in Proc. IEEE Int. Symp. Inf. Theory,
1568
+ Vail, CO, USA, pp. 716–720, Jun. 2018.
1569
+ [15] L. Song, S. R. SrinIVsavaradhan, and C. Fragouli, “The benefit of being flexible in distributed computation,” in Proc.
1570
+ IEEE Inf. Theory Workshop (ITW), Kaohsiung, Taiwan, pp. 289–293, Nov. 2017.
1571
+ [16] S. R. SrinIVsavaradhan, L. Song, and C. Fragouli, “Distributed computing trade-offs with random connectivity,” in Proc.
1572
+ IEEE Int. Symp. Inf. Theory, Vail, CO, USA, pp. 1281–1285, Jun. 2018.
1573
+ [17] F. Li, J. Chen, and Z. Wang, “Wireless Map-Reduce distributed computing,” in Proc. IEEE Int. Symp. Inf. Theory, Vail,
1574
+ CO, USA, pp. 1286–1290, Jun. 2018.
1575
+ [18] E. Parrinello, E. Lampiris, and P. Elia, “Coded distributed computing with node cooperation substantially increases speedup
1576
+ factors,” in Proc. IEEE Int. Symp. Inf. Theory, Vail, CO, USA, pp. 1291–1295, Jun. 2018.
1577
+ [19] S. Li, Q. Yu, M. A. Maddah-Ali, and A. S. Avestimehr, “Edge-facilitated wireless distributed computing,” in Proc. IEEE
1578
+ Glob. Commun. Conf. (Globlcom), Washington, DC, USA, Dec. 2016.
1579
+ [20] S. Li, Q. Yu, M. A. Maddah-Ali, and A. S. Avestimehr, “A scalable framework for wireless distributed computing,”
1580
+ IEEE/ACM Trans. Netw., vol. 25, no. 5, pp. 2643–2653, Oct. 2017.
1581
+ [21] Q. Yan, X. Tang, and Q. Chen, “Placement delivery array and its applications,” in Proc. IEEE Inf. Theory Workshop (ITW),
1582
+ Guangzhou, China, Nov. 2018.
1583
+ [22] V. Ramkumar and P. V. Kumar, “Coded mapreduce schemes based on placement delivery array,” in Proc. IEEE Int. Symp.
1584
+ Inf. Theory, Paris, France, pp. 3087–3091, Jul. 2019.
1585
+ [23] Q. Yan, M. Cheng, X. Tang, and Q. Chen, “On the placement delivery array design for centralized coded caching scheme,”
1586
+ IEEE Trans. Inf. Theory, vol. 63, no. 9, pp. 5821–5833, Sep. 2017.
1587
+ [24] M. A. Maddah-Ali and U. Niesen, “Fundamental limits of caching,” IEEE Trans. Inf. Theory, vol. 60, no. 5, pp. 2856–2867,
1588
+ May 2014.
1589
+ [25] C. Shangguan, Y. Zhang, and G. Ge, “Centralized coded caching schemes: A hypergraph theoretical approach.” IEEE
1590
+ Trans. Inf. Theory, vol. 64, no. 8, pp. 5755-5766, Aug. 2018.
1591
+ [26] Q. Yan, X. Tang, Q. Chen, and M. Cheng, “Placement delivery array design through strong edge coloring of bipartite
1592
+ graphs,” IEEE Commun. Lett., vol. 22, no. 2, pp. 236–239, Feb. 2018.
1593
+ [27] P. Krishnan, L. Natarajan, and V. Lalitha, “An umbrella for data exchange: Applied to caching, computing, shuffling &
1594
+ rebalancing,” 2020 IEEE Inf. Theory Workshop (ITW), RIV del Garda, Italy, Apr., 2021.
1595
+
C9E2T4oBgHgl3EQfSAeL/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
D9A0T4oBgHgl3EQfAv_u/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:054c3064d9963aaa6e07811a4e4d8c3d0b61f478ea8b70719d8fc3756992c07c
3
+ size 4128813
DdAyT4oBgHgl3EQf4vob/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df18f3fb7958d48c96f920e50f7bf96a9551272aa1189a3f47d5ca649d54bfb6
3
+ size 7143469
ENAyT4oBgHgl3EQf4vqx/content/tmp_files/2301.00793v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
ENAyT4oBgHgl3EQf4vqx/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
EdFRT4oBgHgl3EQfyTjG/content/tmp_files/2301.13645v1.pdf.txt ADDED
@@ -0,0 +1,1073 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.13645v1 [math.AP] 31 Jan 2023
2
+ Existence, uniqueness and
3
+ L2
4
+ t(H2
5
+ x) ∩ L∞
6
+ t (H1
7
+ x) ∩ H1
8
+ t (L2
9
+ x) regularity of the
10
+ gradient flow of the Ambrosio-Tortorelli functional
11
+ Tommaso Cortopassi ∗
12
+ Abstract
13
+ We consider the gradient flow of the Ambrosio-Tortorelli functional at
14
+ fixed ǫ > 0, proving existence and uniqueness of a solution in dimension 2.
15
+ The strategy of the proof essentially follows the one in the first part of [9],
16
+ but as suggested in a footnote by the authors of [9] it employs a different
17
+ and simpler technique, which is used for a different equation in [4] and in the
18
+ end it allows to prove better estimates than the ones obtained in the original
19
+ article. In particular we prove that if U ⊂ R2 is a bounded Lipshitz domain,
20
+ the initial data (u0, z0) ∈ [H1(U)]2 and 0 ≤ z0 ≤ 1, then for every T > 0
21
+ there exists a unique gradient flow (u(t), z(t)) of the Ambrosio-Tortorelli
22
+ functional such that
23
+ (u, z) ∈ [L2(0, T; H2(U)) ∩ L∞(0, T; H1(U)) ∩ H1(0, T; L2(U))]2.
24
+ The basic difference from [9], as already said, is a better regularity result
25
+ and a simpler proof: while in [9] they used a localization argument based
26
+ on an idea by Struwe (see [19]), here crucial estimates on the fourth powers
27
+ of the L4
28
+ t (L4
29
+ x) norms of the gradients will be obtained employing a suitable
30
+ version of the Meyers theorem due to Gallouet and Monier (see [15], [11]).
31
+ 1
32
+ Introduction
33
+ The Mumford-Shah functional, introduced in [16], is defined as
34
+ E(u, Γ) = 1
35
+ 2
36
+
37
+ U\Γ |∇u|2 + (u − g)2dx + H1(Γ),
38
+ (1.1)
39
+ where u ∈ H1(U \ Γ), U ⊂ R2 open and bounded, Γ ⊂ U is closed and H1
40
+ is the one dimensional Hausdorff measure. This functional has been extensively
41
+ studied for its applications in image segmentation and fracture mechanics (a de-
42
+ tailed survey can be found in [5]). The definition of the functional depends on the
43
+ model it is used for, in (1.1) we gave the original definition of [16] which is suited
44
+ ∗Scuola Normale Superiore, 56126 Pisa, Italy. E-mail: [email protected]
45
+ 1
46
+
47
+ for image segmentation models, the interested reader can see the seminal paper
48
+ [10] about the fracture mechanic case. The idea is rather simple: given a gray
49
+ image g (i.e. a scalar function), we want to find (˜u, ˜Γ) such that
50
+ (˜u, ˜Γ) = arg min
51
+ u∈H1(U\Γ)
52
+ Γ⊂U closed
53
+ E(u, Γ).
54
+ (1.2)
55
+ In this case, the function u will approximate in a “smooth way” the image g
56
+ while Γ will be the contour set. From a theoretical point of view, in [6] the authors
57
+ proved the existence of a solution for (1.2) by restricting to functions u ∈ SBV (U)
58
+ (see [1]) and Γ = Su, i.e. the set of discontinuity jump points of u.
59
+ From a numerical point perspective, since the functional involves the measure
60
+ of the singular jump set of a (unknown) function u, its direct numerical implemen-
61
+ tation is often not possible or not feasible. A standard approach is to minimize a
62
+ more regular functional proposed by Ambrosio and Tortorelli in [2] defined as
63
+ 1
64
+ 2
65
+
66
+ U[(ηǫ + z2)|∇u|2 + (u − g)2] +
67
+
68
+ U
69
+ �(1 − z)2
70
+
71
+ + ǫ|∇z|2
72
+
73
+ (1.3)
74
+ which approximates the Mumford-Shah functional in Γ-convergence as ǫ → 0
75
+ in an even more general setting, i.e. not being restricted to the 2 dimensional case,
76
+ but considering U ⊂ Rn open and the (n − 1)-dimensional Hausdorff measure for
77
+ Γ. The Ambrosio-Tortorelli functional is the way with which one usually finds
78
+ (approximate) minima points for (1.1), in particular one can use a gradient flow
79
+ approach for (1.3). As in [9], we’ll consider the gradient flow of (1.3), which is
80
+ given by
81
+
82
+
83
+
84
+
85
+
86
+
87
+
88
+
89
+
90
+
91
+
92
+
93
+
94
+ ∂tu = div((ηǫ + z2)∇u) − (u − g) in (0, T) × U
95
+ ∂tz = 2ǫ∆z − z|∇u|2 + 1−z
96
+ 2ǫ in (0, T) × U
97
+
98
+ ∂nu =
99
+
100
+ ∂nz = 0 in (0, T) × ∂U
101
+ u(0, ·) = u0 and z(0, ·) = z0 in {0} × U
102
+ (1.4)
103
+ with ηǫ, ǫ > 0 fixed. As already mentioned, our goal is to study the existence,
104
+ uniqueness and regularity of solutions of (1.4), and the novelty lies in an approach
105
+ (already suggested in a footnote of [9] and used in [4] for a different equation)
106
+ which simplifies the proof of [9] while also gaining better regularity results. In
107
+ particular in [9] they only manage to prove the L2
108
+ t(H2
109
+ x) regularity for a short time
110
+ T1, since the crucial estimate in their argument is a local energy estimate, inspired
111
+ by a technique due to Struwe [19], which only holds for sufficiently small times.
112
+ 2
113
+
114
+ 2
115
+ Main result
116
+ Let U ⊂ R2 be a Lipshitz bounded domain and let (u0, z0) ∈ [H1(U)]2 with
117
+ 0 ≤ z0 ≤ 1, g ∈ L2(U). As stated in the introduction, we want to prove existence,
118
+ uniqueness and regularity of solutions of the gradient flow of (1.3), given by
119
+
120
+
121
+
122
+
123
+
124
+
125
+
126
+
127
+
128
+
129
+
130
+
131
+
132
+ ∂tu = div((ηǫ + z2)∇u) − (u − g) in (0, T) × U
133
+ ∂tz = 2ǫ∆z − z|∇u|2 + 1−z
134
+ 2ǫ ) in (0, T) × U
135
+
136
+ ∂nu =
137
+
138
+ ∂nz = 0 in (0, T) × ∂U
139
+ u(0, ·) = u0 and z(0, ·) = z0 in {0} × U.
140
+ (2.1)
141
+ However we will mostly work with the slightly modified system
142
+
143
+
144
+
145
+
146
+
147
+
148
+
149
+
150
+
151
+
152
+
153
+
154
+
155
+ ∂tu = div((ηǫ + φ(z)2)∇u) − (u − g) in (0, T) × U
156
+ ∂tz = 2ǫ∆z − φ′(z)φ(z)|∇u|2 + 1−z
157
+ 2ǫ in (0, T) × U
158
+
159
+ ∂nu =
160
+
161
+ ∂nz = 0 in (0, T) × ∂U
162
+ u(0, ·) = u0 and z(0, ·) = z0 in {0} × U
163
+ (2.2)
164
+ where φ is a cutoff. The reason to introduce such a cutoff, as we will show later,
165
+ is to bound the L∞ norm of ηǫ+φ(zN)2 when considering Galerkin approximations
166
+ zN. To be precise, a good working definition of φ might be
167
+ φ(s) =
168
+
169
+
170
+
171
+
172
+
173
+
174
+
175
+ −1 if s ≤ −1
176
+ s if − 1 < s < 2
177
+ 2 if s ≥ 2
178
+ but you could actually do the cuts at levels −δ1 and 1 + δ2 for every δ1, δ2 > 0
179
+ and nothing would change. A modified Ambrosio-Tortorelli functional (of which
180
+ (2.2) is the gradient flow of) will be denoted as ATǫ and defined as
181
+ ATǫ(u(t), z(t)) = 1
182
+ 2
183
+
184
+ U[(ηǫ+φ(z(t))2)|∇u(t)|2+(u(t)−g)2]+
185
+
186
+ U
187
+ �(1 − z(t))2
188
+
189
+ + ǫ|∇z(t)|2
190
+
191
+ .
192
+ (2.3)
193
+ First of all, let’s define the notion of strong solution, following [9].
194
+ Definition 2.1. Strong solution
195
+ A couple (u, z) is a strong solution of (2.1) if it satisfies the system in a dis-
196
+ tributional sense, i.e. for every ψ ∈ H1(U) it holds
197
+
198
+ U
199
+ u(t)ψdx =
200
+
201
+ U
202
+ u0ψdx −
203
+ � t
204
+ 0
205
+
206
+ U
207
+ (ηǫ + z(s)2)⟨∇u(s), ∇ψ⟩dxds −
208
+ � t
209
+ 0
210
+
211
+ U
212
+ (u(s) − g)ψdxds
213
+
214
+ U
215
+ z(t)ψdx =
216
+
217
+ U
218
+ z0ψdx − 2ǫ
219
+ � t
220
+ 0
221
+
222
+ U
223
+ ⟨∇z(s), ∇ψ⟩dxds −
224
+ � t
225
+ 0
226
+
227
+ U
228
+ z(s)ψ|∇u(s)|2 + 1 − z(s)
229
+
230
+ ψdxds
231
+ for almost every t ∈ (0, T), and moreover we require
232
+ (u, z) ∈ [L2(0, T; H2(U)) ∩ L∞(0, T; H1(U)) ∩ H1(0, T; L2(U))]2.
233
+ We start with a proposition which shows how a solution of (2.2) is also solution
234
+ of (2.1) if 0 ≤ z0 ≤ 1.
235
+ 3
236
+
237
+ Proposition 2.1. Let 0 ≤ z0 ≤ 1. If a strong solution (u, z) for (2.2) exists, it
238
+ must be 0 ≤ z(t, x) ≤ 1 for every t ∈ (0, T) and for a.e. x ∈ U. So (u, z) will also
239
+ be a solution of (2.1).
240
+ Proof. If we test the equation for z in (2.2) with z itself it gives
241
+ 1
242
+ 2
243
+ d
244
+ dt||z(t)||2
245
+ L2(U) = −2ǫ||∇z(t)||2
246
+ L2(U)−
247
+
248
+ U φ′(z(t))φ(z(t))z(t)|∇u(t)|2ds+
249
+
250
+ U
251
+ 1 − z(t)
252
+
253
+ z(t)ds
254
+ (2.4)
255
+ where the existence of the time derivative is ensured by Lions-Magenes lemma
256
+ (see [14]). Consider f0(z) = max{−z, 0} and f1(z) = max{z − 1, 0}, so f0 and f1
257
+ are defined as
258
+ f0(s) =
259
+
260
+
261
+
262
+ −s if s ≤ 0
263
+ 0 if s > 0
264
+ and f1(s) =
265
+
266
+
267
+
268
+ 0 if s ≤ 1
269
+ s − 1 if s > 1
270
+ .
271
+ It’s easy to check that:
272
+ 1
273
+ 2
274
+ d
275
+ dt||f0(z(t))||2
276
+ L2(U) =
277
+
278
+ U f0(z(t))f ′
279
+ 0(z(t))∂tz(t)dx =
280
+
281
+ U χ{z(t)<0} z(t) ∂tz(t)dx,
282
+ and noting that χ{z(t)<0}z(t) = −f0(z(t)) is a Sobolev function (see Lemma 7.6
283
+ in [12]) we are allowed to use it as a test function in (2.2), getting:
284
+ 1
285
+ 2
286
+ d
287
+ dt||f0(z(t))||2
288
+ L2(U) = ⟨∂tz(t), −f0(z(t))⟩L2(U) =
289
+ = χ{z(t)<0}
290
+
291
+ −2ǫ||∇z(t)||2
292
+ L2(U) −
293
+
294
+ U φ′(z(t))φ(z(t))z(t)|∇u(t)|2 +
295
+
296
+ U
297
+ 1 − z(t)
298
+
299
+ z(t)
300
+
301
+ ≤ 0.
302
+ Since f0(z(0)) = 0 because z0 ≥ 0, we have f0(z(t)) ≡ 0 for every t. In the
303
+ same way we can prove z ≤ 1 by considering f1(z(t)).
304
+ The strategy will be to make use of Galerkin approximates. Consider an or-
305
+ thogonal basis of H1(U) composed of eigenfunctions of −∆ on U with homoge-
306
+ neous Neumann boundary conditions normalized with respect to the L2(U) norm,
307
+ and denote it as {ei}i∈N. So
308
+
309
+
310
+
311
+ −∆ei = λiei in U
312
+ ∂nei = 0 in ∂U
313
+ and ||ei||L2(U) = 1 for every i.
314
+ We want to find Galerkin approximates (uN, zN) such that they solve (in dis-
315
+ tributional sense) in
316
+ VN = Span({e1, . . . , eN})
317
+ (2.5)
318
+ the system
319
+ 4
320
+
321
+
322
+
323
+
324
+
325
+
326
+
327
+
328
+
329
+
330
+
331
+
332
+
333
+
334
+ ∂tuN = πN[div((ηǫ + φ(zN)2)∇uN)] − (uN − gN) in (0, T) × U
335
+ ∂tzN = 2ǫ∆zN − πN[φ′(zN)φ(zN)|∇uN|2] + 1−zN
336
+
337
+ in (0, T) × U
338
+
339
+ ∂nuN =
340
+
341
+ ∂nzN = 0 in (0, T) × ∂U
342
+ uN(0, ·) = πN[u0] and zN(0, ·) = πN[z0] in {0} × U
343
+ (2.6)
344
+ with πN the orthogonal projection on VN. Notice that by the orthogonality
345
+ properties of {ei}N
346
+ i=1 this is actually a 2N system in u(1)(t), . . . , u(N)(t), z(1)(t), . . . , z(N)(t),
347
+ with
348
+ uN(t, x) =
349
+ N
350
+
351
+ i=1
352
+ u(i)(t)ei(x) and zN(t, x) =
353
+ N
354
+
355
+ i=1
356
+ z(i)(t)ei(x)
357
+ which by Cauchy-Lipshitz admits a unique local solution. Indeed if we test
358
+ with ei it holds that:
359
+ u(i)(t) = (u0)(i) −
360
+ � t
361
+ 0
362
+ ��
363
+ U(ηǫ + φ(zN(s))2)⟨∇uN(s), ∇ei⟩ + (uN(s) − gN)eidx
364
+
365
+ ds
366
+ z(i)(t) = (z0)(i) −
367
+ � t
368
+ 0
369
+ ��
370
+ U 2ǫ⟨∇zN(s), ∇ei⟩ − φ′(zN(s))φ(zN(s))|∇uN(s)|2ei + 1 − zN(s)
371
+
372
+ eidx
373
+
374
+ ds
375
+ for every 1 ≤ i ≤ N. However there are strong non linearities at play, so a
376
+ priori for every N we only have a local solution in [0, tN) without being able to
377
+ extend it immediately to [0, T]. In order to gain existence in [0, T] of Galerkin
378
+ approximates we’ll use the following a priori estimates, which hold in a slightly
379
+ more general situation with less regular initial data:
380
+ Proposition 2.2. Existence of weak approximate solutions in [0,T]
381
+ Given (u0, z0) ∈ [L2(U)]2 and a solution (uN, zN) of (2.6) in VN, it holds
382
+ sup
383
+ 0≤t≤T[||uN(t)||2
384
+ L2(U)] +
385
+ � T
386
+ 0 ||∇uN(s)||2
387
+ L2(U)ds ≤ C
388
+ and
389
+ sup
390
+ 0≤t≤T[||zN(t)||2
391
+ L2(U)] +
392
+ � T
393
+ 0 ||∇zN(s)||2
394
+ L2(U)ds ≤ C
395
+ with C a positive constant independent of N.
396
+ Proof. Test the equation for uN in (2.6) with uN itself, to get
397
+ d
398
+ dt||uN||2
399
+ L2(U) = −
400
+
401
+ U(ηǫ + φ(zN)2)|∇uN|2dx −
402
+
403
+ U uN(uN − gN)dx.
404
+ (2.7)
405
+ So
406
+ d
407
+ dt||uN||2
408
+ L2(U) ≤ ||gN||L2(U)||uN||L2(U) ≤ ||g||L2(U)(1 + ||uN||2
409
+ L2(U))
410
+ 5
411
+
412
+ and you get uniform boundedness of ||uN||L2(U) by Gronwall’s lemma. Going
413
+ back to (2.7) you easily conclude by integrating in time. The same holds if we test
414
+ the equation in zN with zN itself, obtaining
415
+ sup
416
+ 0≤t≤T[||zN(t)||2
417
+ L2(U)] + 2ǫ
418
+ � T
419
+ 0
420
+ ≤ T
421
+ 8ǫ.
422
+ By orthogonality of the ei s this can be rewritten as
423
+ ||uN(t)||2
424
+ L2(U) =
425
+ N
426
+
427
+ i=1
428
+ [u(i)(t)]2 and ||zN(t)||2
429
+ L2(U) =
430
+ N
431
+
432
+ i=1
433
+ [z(i)(t)]2
434
+ so we cannot have a blow-up in finite time and we have thus proved existence
435
+ up to time T for every T > 0.
436
+ Now that we have existence of solutions of (2.6) in [0, T] for every N, let’s prove
437
+ stronger inequalities exploiting the variational characterization of the problem.
438
+ Proposition 2.3. A priori energy estimates
439
+ Assume (u0, z0) ∈ [H1(U)]2, then
440
+ sup
441
+ t∈[0,T]
442
+ [ATǫ(uN(t), zN(t))] +
443
+ � T
444
+ 0 ||∂tuN(s)||2
445
+ L2(U) + ||∂tzN(s)||2
446
+ L2(U)ds =
447
+ = sup
448
+ t∈[0,T]
449
+ [ATǫ(uN(t), zN(t))] +
450
+ � T
451
+ 0 ||πN[∇ATǫ(uN, zN)]||2
452
+ L2(U)ds ≤
453
+ ≲ ATǫ(u0, z0) + ||g||2
454
+ L2(U).
455
+ In particular we have (∂tuN, ∂tzN) ∈ [L2(0, T; L2(U))]2 and (uN, zN) ∈ [L∞(0, T; H1(U))]2,
456
+ both uniformly bounded independently from N.
457
+ Proof. Derive ATǫ(uN, zN) in time and get:
458
+ d
459
+ dtATǫ(uN, zN) = −||πN∇ATǫ||2
460
+ L2(U) = −||∂tuN||2
461
+ L2(U) − ||∂tzN||2
462
+ L2(U).
463
+ Integrating this equality:
464
+ ATǫ(uN, zN) +
465
+ � T
466
+ 0 ||∂tuN(s)||2
467
+ L2(U) + ||∂tzN(s)||2
468
+ L2(U)ds = ATǫ(πNu0, πNz0) ≤ C.
469
+ (2.8)
470
+ Notice that a priori we have no control in N for the quantity
471
+
472
+ U(ηǫ + πN[z0]2)|∇πN[u0]|2dx.
473
+ But since we truncated with |φ| ≤ 2 we have
474
+
475
+ U(ηǫ + 4)|∇πN[u0]|2dx ≤
476
+
477
+ U(ηǫ + 4)|∇u0|2dx.
478
+ 6
479
+
480
+ At this point we still can’t prove the weak convergence of the non linear parts of
481
+ the equation, in particular div((ηǫ+φ(zN)2)∇uN) and φ′(zN)φ(zN)|∇u|2. Stronger
482
+ estimates are needed. In the next Proposition we’ll prove uniform L2(0, T; H2(U))
483
+ boundedness of (uN, zN).
484
+ Proposition 2.4. Uniform L2(0, T; H2(U)) estimates
485
+ Let U ⊂ R2 be a bounded Lipshitz domain and let (u0, z0) ∈ [H1(U)]2. Consider
486
+ solutions (uN, zN) of (2.6). It holds that
487
+ sup
488
+ t∈[0,T]
489
+ [||uN(t)||2
490
+ H1(U) + ||zN(t)||2
491
+ H1(U)] +
492
+ � T
493
+ 0 ||∆uN||2
494
+ L2(U) + ||∆zN||2
495
+ L2(U) ≤ C
496
+ for some C > 0 independent of N.
497
+ Proof. First of all, we want to prove an estimate like
498
+ sup
499
+ 0≤t≤T[ATǫ(uN(t), zN(t))] +
500
+ � T
501
+ 0 ||∆uN(s)||2
502
+ L2(U) + ||∆zN(s)||2
503
+ L2(U)ds ≲
504
+ ≲ C +
505
+ � T
506
+ 0 ||∇uN(s)||4
507
+ L4(U) + ||∇zN(s)||4
508
+ L4(U)ds.
509
+ (2.9)
510
+ The idea is to expand the energy equality (2.8) obtained in Proposition 2.3
511
+ with ||πN∇ATǫ(uN, zN)||2
512
+ L2(U). For the sake of readability we omit writing time
513
+ dependence, abbreviate φ(z) as φ and omit the subscripts too:
514
+ |∇ATǫ(u, z)|2 = ∂uATǫ(u, z)2 + ∂zATǫ(u, z)2 =
515
+ = [(η + φ2)∆u + 2φφ′⟨∇z, ∇u⟩ − (u − g)]2 +
516
+
517
+ 2ǫ∆z − φφ′|∇u|2 + 1 − z
518
+
519
+ �2
520
+ =
521
+ = (η + φ2)2(∆u)2
522
+
523
+ ��
524
+
525
+ 1
526
+ + 4φ2(φ′)2⟨∇z, ∇u⟩2
527
+
528
+ ��
529
+
530
+ 2
531
+ +(u − g)2 + 4φφ′(η + φ2)⟨∇z, ∇u⟩∆u
532
+
533
+ ��
534
+
535
+ 3
536
+
537
+ −4φφ′(u − g)⟨∇z, ∇u⟩
538
+
539
+ ��
540
+
541
+ 4
542
+ −2(u − g)(η + φ2)∆u
543
+
544
+ ��
545
+
546
+ 5
547
+ + 4ǫ2(∆z)2
548
+
549
+ ��
550
+
551
+ 6
552
+ +(φ′)2φ2|∇u|4 + (1 − z)2
553
+ 4ǫ2
554
+
555
+ −4ǫφφ′|∇u|2∆z
556
+
557
+ ��
558
+
559
+ 7
560
+ −φφ′(1 − z)|∇u|2
561
+ ǫ
562
+
563
+ ��
564
+
565
+ 8
566
+ + 2(1 − z)∆z
567
+
568
+ ��
569
+
570
+ 9
571
+ ,
572
+ where we highlighted all the terms we will manipulate. The strategy is very
573
+ simple: use estimates like
574
+ ab ≥ − 1
575
+ 2δa2 − δ
576
+ 2b2
577
+ (2.10)
578
+ to get
579
+ 7
580
+
581
+ C ≥ sup
582
+ 0≤t≤T[ATǫ(u(t), z(t))] +
583
+ � T
584
+ 0 ||πN∇ATǫ(uN(s), zN(s))||2
585
+ L2(U)ds ≳
586
+ sup
587
+ 0≤t≤T[ATǫ(u(t), z(t))] +
588
+ � T
589
+ 0 ||∆uN(s)||2
590
+ L2(U) + ||∆zN(s)||2
591
+ L2(U)ds−
592
+
593
+ � T
594
+ 0 ||∇uN(s)||4
595
+ L4(U) + ||∇zN(s)||4
596
+ L4(U)ds − 1,
597
+ and from this recover the desired inequality (2.9). The idea is to use (2.10)
598
+ with different suitable δ on all highlighted terms except
599
+ 1
600
+ and
601
+ 6
602
+ which will
603
+ absorb squared laplacians. You can easily see that each term can be estimated
604
+ with a sum like in (2.10) of two of the following quantities:
605
+ • A term −(∆u)2 and/or −(∆z)2;
606
+ • A term −|∇u|4 and/or −|∇z|4;
607
+ • A term which by Proposition 2.3 we know to be uniformly bounded.
608
+ The only tedious part (which we skip) is to choose δ wisely each time so that
609
+ in the end you remain with
610
+ c1(∆u)2 + c2(∆z)2 − C1|∇u|4 − C2|∇z|4 + h
611
+ with c1, c2 > 0 and h a sum of functions in L∞(0, T; L2(U)). In fact we can
612
+ reduce to estimating the L2(0, T; H2(U)) norm of uN. Testing the equation in zN
613
+ with −∆zN in (2.6) we get
614
+ 1
615
+ 2
616
+ d
617
+ dt||∇zN||2
618
+ L2(U) = −2ǫ||∆zN||2
619
+ L2(U) +
620
+
621
+ U φ′(zN)φ(zN)|∇uN|2∆zNdx −
622
+
623
+ U
624
+ 1 − zN
625
+
626
+ ∆zNdx
627
+ and from this, using again ab ≤ (δ/2)a2 + (1/2δ)b2, it’s clear we can estimate
628
+ the L2(0, T; H2(U)) norm of zN with the L4(0, T; L4(U)) norm of ∇uN. Moreover
629
+ by Gagliardo-Niremberg inequality:
630
+ ||∇zN||4
631
+ L4(U) ≤ C(1 + ||∇zN||2
632
+ L2(U)||∇2zN||2
633
+ L2(U)) ≤ C(1 + ||∇2zN||2
634
+ L2(U))
635
+ thanks to the L∞(0, T; H1(U)) estimates on zN. Then:
636
+ � T
637
+ 0 ||uN(t)||2
638
+ H2(U)dt ≲ 1 + sup
639
+ t∈[0,T]
640
+ [ATǫ(uN(t), zN(t))]+
641
+ +
642
+ � T
643
+ 0 ||∆uN(t)||2
644
+ L2(U) + ||∆zN(t)||2
645
+ L2(U)dt ≲ 1 +
646
+ � T
647
+ 0 ||∇uN(t)||4
648
+ L4(U) + ||∇zN(t)||4
649
+ L4(U)dt ≲
650
+ ≲ 1 +
651
+ � T
652
+ 0 ||∇uN(t)||4
653
+ L4(U) + ||∇2zN(t)||2
654
+ L2(U)dt ≲ 1 +
655
+ � T
656
+ 0 ||∇uN(t)||4
657
+ L4(U)dt.
658
+ (2.11)
659
+ 8
660
+
661
+ The goal will be to obtain an estimate like
662
+ � T
663
+ 0 ||∇uN(t)||4
664
+ L4(U) ≲
665
+ �� T
666
+ 0 ||uN(t)||2
667
+ H2(U)
668
+ �q/2
669
+ (2.12)
670
+ for some q < 2 so that we can get uniform bounds in (2.11) and conclude.
671
+ Notice that in (2.12) the estimate is non homogeneous, i.e. we are estimating
672
+ a fourth power with something of homogeneity q < 2. The reason why this is
673
+ possible is that the constants we are omitting in (2.12) actually depend on uN in
674
+ a way such that the homogeneity is preserved, as we will see later.
675
+ Considering the time fixed (we will thus omit writing the dependence on t
676
+ for the moment) we focus on the first equation of (2.6) and we consider uN the
677
+ solution of:
678
+
679
+
680
+
681
+ −div((ηǫ + φ(zN)2)∇uN) = f
682
+ ∂nuN = 0
683
+ where f = −∂tuN − (uN − gN).
684
+ Now we use a procedure used in [4] and suggested as a possible alternative
685
+ proof in a footnote in [9], that is using Meyers theorem (see [15]) to get H2 esti-
686
+ mates. Meyers theorem was originally proved for homogeneous Dirichlet boundary
687
+ conditions on ∂U, but in [11] it has been generalised (among others) to the case
688
+ of homogeneous Neumann boundary conditions. The proof in [11] for the Neu-
689
+ mann case consists in a series of strategies (i.e. partition of unity, extension of the
690
+ functions, etc.) in order to go back to the case of the original Meyers theorem. In
691
+ particular we want to use Theorem 2 in [11], so we consider
692
+ G : L2
693
+ m(U) �→ H1
694
+ m(U)
695
+ such that G(f) = ϕ with
696
+
697
+
698
+
699
+ −∆ϕ = f in U
700
+ ∂nϕ = 0 in ∂U
701
+ (2.13)
702
+ and
703
+ f ∈ L2
704
+ m(U) =
705
+
706
+ g ∈ L2(U)
707
+ ����
708
+
709
+ U g = 0
710
+
711
+ ;
712
+ ϕ ∈ H1
713
+ m(U) = H1(U) ∩ L2
714
+ m(U).
715
+ Notice that problem (2.13) admits a unique solution in H1
716
+ m(U) if and only if
717
+
718
+ U f = 0 (see [8]), which is our case. In particular it holds
719
+ ⟨∇G(f), ∇φ⟩L2 = ⟨f, φ⟩L2 for all φ ∈ H1(U).
720
+ (2.14)
721
+ By Theorem 2 in [11] (up to multiplicative constants we are neglecting):
722
+ ||∇uN||Lp(U) ≤ ||∇G(f)||Lp(U) for some p ∈ (2, +∞).
723
+ (2.15)
724
+ 9
725
+
726
+ Remark 1. Actually, the precise statement of Theorem 2 in [11] would give the
727
+ estimate:
728
+ ||uN||W 1,p(U) ≲ ||f||W 1,q(U)′,
729
+ with 1
730
+ p + 1
731
+ q = 1, 2 < p < +∞ and W 1,q(U)′ denoting the dual. But then we
732
+ readily have:
733
+ ||f||W 1,q(U)′ =
734
+ sup
735
+ ||φ||W 1,q(U)=1
736
+ ⟨f, φ⟩L2 =
737
+ sup
738
+ ||φ||W 1,q(U)=1
739
+ ⟨∇G(f), ∇φ⟩L2 ≤
740
+
741
+ sup
742
+ ||φ||W 1,q(U)=1
743
+ ||∇G(f)||Lp(U)||∇φ||Lq(U) ≤ ||∇G(f)||Lp(U)
744
+ and so we have the estimate (2.15). The reason why we consider ∇G(f) instead
745
+ of dealing with f is because we’ll make use of Gagliardo-Niremberg inequality and
746
+ estimates from elliptic regularity theory on ∇G(f).
747
+ Using Gagliardo-Niremberg inequality we get
748
+ ||∇uN||Lp(U) ≤ C(1 + ||∇G(f)||2/p
749
+ L2(U)||∇2G(f)||
750
+ p−2
751
+ p
752
+ L2(U)).
753
+ (2.16)
754
+ Now notice that up to modification by an additive constant we can consider
755
+ without loss of generality −∇G(f) = (ηǫ + φ(zN)2)∇uN. Indeed:
756
+ −∆G(f) = div(−∇G(f)) = f = div((ηǫ + φ(zN)2)∇uN),
757
+ so −∇G(f) = (ηǫ + φ(zN)2)∇uN ∈ L∞(0, T; L2(U)) thanks to Proposition 2.3.
758
+ Integrate in time the inequality (2.16) raised to the power 2p/(p − 2) to get:
759
+ � T
760
+ 0 ||∇uN(t)||
761
+ 2p
762
+ p−2
763
+ Lp(U)dt ≲ 1 +
764
+ � T
765
+ 0 ||∇2G(f)(t)||2
766
+ L2(U)dt ≲ 1 +
767
+ � T
768
+ 0 ||f(t)||2
769
+ L2(U)dt ≤ C,
770
+ (2.17)
771
+ where we used standard elliptic regularity theory to pass from the L2 norm of
772
+ ∇2G(f) to the L2 norm of f. We can assume without loss of generality that 2 < p <
773
+ 4, otherwise if we had p > 4 we could conclude directly by the above estimates,
774
+ indeed 2p/(p − 2) < 4 and by Hölder, (2.17) and the uniform L∞(0, T; L2(U))
775
+ bounds on ∇uN:
776
+ � T
777
+ 0 ||∇uN(t)||4
778
+ L4(U)dt =
779
+ � T
780
+ 0
781
+ ��
782
+ U |∇uN(t)|
783
+ 2p
784
+ p−2|∇uN(t)|
785
+ 2p−8
786
+ p−2 dx
787
+
788
+ dt ≤
789
+
790
+ � T
791
+ 0 ||∇uN(t)||2p/(p−2)
792
+ Lp(U)
793
+ ��
794
+ U |∇uN(t)|2dx
795
+ � p−4
796
+ p−2 dt ≤
797
+ ≤ C
798
+ � T
799
+ 0 ||∇uN(t)||2p/(p−2)
800
+ Lp(U)
801
+ dt ≤ C.
802
+ (2.18)
803
+ Of course we also assume p ̸= 4, or the thesis would follow trivially. So assume
804
+ 2 < p < 4. Applying again Gagliardo-Nirenberg, Hölder and (2.17):
805
+ 10
806
+
807
+ � T
808
+ 0 ||∇uN(t)||4
809
+ L4(U)dt ≤
810
+ � T
811
+ 0 ||∇uN(t)||p
812
+ Lp(U)||uN(t)||4−p
813
+ H2(U)dt ≤
814
+
815
+ �� T
816
+ 0 ||∇uN||
817
+ 2p
818
+ p−2
819
+ Lp(U)
820
+ � p−2
821
+ p
822
+ �� T
823
+ 0 ||uN||2
824
+ H2(U)
825
+ � 4−p
826
+ 2
827
+
828
+ �� T
829
+ 0 ||uN||2
830
+ H2(U)
831
+ � 4−p
832
+ 2
833
+ ,
834
+ (2.19)
835
+ and we can conclude since (4 − p)/2 < 2.
836
+ Remark 2. Notice the assumption n = 2 is needed in order to have the necessary
837
+ Gagliardo-Niremberg estimates in the previous Proposition. Also, notice how the
838
+ homogeneity of degree 4 is preserved both in (2.18) and in (2.19), where to conclude
839
+ we uniformly bound some quantities depending on uN, namely (
840
+
841
+ U |∇uN(t)|2dx)
842
+ p−4
843
+ p−2
844
+ and
845
+ �� T
846
+ 0 ||∇uN||
847
+ 2p
848
+ p−2
849
+ Lp(U)
850
+ � p−2
851
+ p
852
+ .
853
+ The estimates obtained in the previous Proposition actually yield uniform esti-
854
+ mates of uN and zN in L2(0, T; H2(U)) thanks to the classical fact that ||u||L2(U) +
855
+ ||∆u||L2(U) is an equivalent norm for H2(U). To recapitulate, we have (up to a
856
+ subsequence we will not rename):
857
+
858
+
859
+
860
+
861
+
862
+
863
+
864
+
865
+
866
+
867
+
868
+
869
+
870
+
871
+
872
+
873
+
874
+
875
+
876
+ (uN, zN) weakly- ∗ converging in L∞(0, T; H1(U))
877
+ (uN, zN) weakly converging in L2(0, T; H2(U))
878
+ (∂tuN, ∂tzN) weakly converging in L2(0, T; L2(U))
879
+ (uN, zN) converging in C(0, T; L2(U))
880
+ (uN, zN) converging in the strong topology in L2(0, T; H1(U))
881
+ (2.20)
882
+ where the compact embeddings in C(0, T; L2(U)) and L2(0, T; H1(U)) are ob-
883
+ tained by applying the Aubin-Lions lemma (see [3], [13], [18]). We are now ready
884
+ to prove the main result.
885
+ Theorem 2.1. Existence and uniqueness of strong solutions
886
+ Let U ⊂ R2 be a bounded Lipshitz domain and let (u0, z0) ∈ [H1(U)]2 with
887
+ 0 ≤ z0 ≤ 1. Then there exists a unique strong solution (u, z) of (2.1).
888
+ Proof. Let (u, z) be the weak limit of (uN, zN) in L2(0, T; H2(U)), let’s see how
889
+ the pair is a solution of (2.2). This will be sufficient to prove the thesis thanks to
890
+ Proposition 2.1. Let ψ ∈ VM = Span{e1, . . . , eM} be a test function for (2.6) with
891
+ N > M, so it holds:
892
+
893
+ U uN(t)ψ
894
+
895
+ ��
896
+
897
+ 1
898
+ =
899
+
900
+ U πN[u0]ψ −
901
+ � t
902
+ 0
903
+
904
+ U(ηǫ + φ(zN)2)∇uN∇ψ
905
+
906
+ ��
907
+
908
+ 2
909
+
910
+ � t
911
+ 0
912
+
913
+ U(uN − gN)ψ
914
+
915
+ U zN(t)ψ
916
+
917
+ ��
918
+
919
+ 3
920
+ =
921
+
922
+ U πN[z0]ψ − 2ǫ
923
+ � t
924
+ 0
925
+
926
+ U ∇zN∇ψ −
927
+ � t
928
+ 0
929
+
930
+ U φ′(zN)φ(zN)|∇uN|2ψ
931
+
932
+ ��
933
+
934
+ 4
935
+ +
936
+ � t
937
+ 0
938
+
939
+ U
940
+ 1 − zN
941
+
942
+ ψ,
943
+ (2.21)
944
+ 11
945
+
946
+ and we want to show we can pass to the limit in every highlighted term, since
947
+ for the others it’s trivial by weak convergence.
948
+ • As for 1 and 3 , we can pass to the limit thanks to the compactness in
949
+ C(0, T; L2(U)).
950
+ • For 2 , we have (by dominated convergence) strong convergence in L2(0, T; L2(U))
951
+ of (ηǫ + φ(zN)2), and weak convergence of ∇uN. So their product weakly
952
+ converges and we can pass to the limit.
953
+ • We already saw in Proposition 2.4 how ∇uN is uniformly bounded in L4(0, T; L4(U)),
954
+ which is the same as saying |∇uN|2 is uniformly bounded in L2(0, T; L2(U)).
955
+ Then, up to taking another subsequence, |∇uN|2 ⇀ |∇u|2 in L2(0, T; L2(U)).
956
+ Since φ′(zN)φ(zN) → φ′(z)φ(z) in the strong L2(0, T; L2(U)) topology by
957
+ dominated convergence, their product weakly converges and we can pass to
958
+ the limit in 4 .
959
+ It only remains to prove that (u, z) satisfy the homogeneous Neumann bound-
960
+ ary conditions of (2.1).
961
+ To do that we first have to make sense of ∂nu for any u ∈ H2(U). We define
962
+ ∂n : H2(U) → H1/2(∂U)
963
+ as
964
+ ∂nu(ψ) =
965
+
966
+ U ∆uΨdx +
967
+
968
+ U ∇u∇Ψdx,
969
+ where ψ ∈ H1/2(∂U) and Ψ ∈ H1(U) is an extension of ψ to the whole U. In
970
+ particular Ψ will be chosen according to the trace extension operator, i.e. Ψ = Eψ,
971
+ where E is defined as:
972
+ Theorem 2.2. Trace extension operator, see [17]
973
+ Given a bounded, Lipshitz domain Ω ⊂ Rn and 1 < p < +∞, there exists a
974
+ linear and bounded trace extension operator
975
+ E : W 1− 1
976
+ p ,p(∂Ω) → W 1,p(Ω)
977
+ such that Tr(Eu) = u for every u ∈ W 1− 1
978
+ p(∂Ω).
979
+ The operator ∂n just defined is continuous, indeed using ||Eψ||H1(U) ≤ C||ψ||H1/2(U):
980
+ ||∂nu||H1/2(∂U) =
981
+ sup
982
+ ψ∈H1/2(∂U)
983
+
984
+ ∂nu(ψ)
985
+ ||ψ||H1/2(∂U)
986
+
987
+
988
+ ≤ C
989
+ sup
990
+ ψ∈H1/2(∂U)
991
+
992
+ 1
993
+ ||Eψ||H1(U)
994
+
995
+ U ∆uEψdx +
996
+
997
+ U ∇u∇Eψdx
998
+
999
+
1000
+ ≤ C(||∆u||L2(U) + ||∇u||L2(U)).
1001
+ Moreover it is known that W 1−1/p,p(∂U) compactly embeds into Lp(∂U) (see
1002
+ [7]), so
1003
+ 12
1004
+
1005
+ ∂n : H2(U) �→ L2(∂U) is weak-strong continuous,
1006
+ meaning it sends weakly converging sequences in strong converging ones. We
1007
+ have to prove that ∂nu(t) = ∂nz(t) = 0 for almost every t. Since the argument is
1008
+ the same we’ll only show that the boundary conditions hold for u, moreover for
1009
+ simplicity we assume that u(t) ∈ H2(U) for every t. By weak-strong continuity of
1010
+ ∂n we have that for every t ∈ [0, T], modulo a subsequence (which depends on t):
1011
+ uN(t)
1012
+ H2(U)
1013
+ −−−⇀ u(t) =⇒ ∂nuN(t)
1014
+ L2(∂U)
1015
+ −−−−→ ∂nu(t) as N → +∞,
1016
+ but since ∂nuN(t) ≡ 0 for every N we have the thesis.
1017
+ References
1018
+ [1] Luigi Ambrosio, Nicola Fusco, and Diego Pallara. Functions of bounded vari-
1019
+ ation and free discontinuity problems. Courier Corporation, 2000.
1020
+ [2] Luigi Ambrosio and Vincenzo Maria Tortorelli. Approximation of functional
1021
+ depending on jumps by elliptic functional via gamma-convergence. Commu-
1022
+ nications on Pure and Applied Mathematics, 43(8):999–1036, 1990.
1023
+ [3] Jean-Pierre Aubin.
1024
+ Analyse mathematique-un theoreme de compacite.
1025
+ Comptes Rendus Hebdomadaires Des Seances De L Academie Des Sciences,
1026
+ 256(24):5042, 1963.
1027
+ [4] John W Barrett, Xiaobing Feng, and Andreas Prohl. Convergence of a fully
1028
+ discrete finite element method for a degenerate parabolic system modelling
1029
+ nematic liquid crystals with variable degree of orientation. ESAIM: Mathe-
1030
+ matical Modelling and Numerical Analysis, 40(1):175–199, 2006.
1031
+ [5] Guy David. Singular sets of minimizers for the Mumford-Shah functional,
1032
+ volume 233. Springer Science & Business Media, 2006.
1033
+ [6] E De Giorgi, M Carriero, and A Leaci. Existence theorem for a minimum
1034
+ problem with free discontinuity set. Ennio De Giorgi, page 654, 1989.
1035
+ [7] Eleonora Di Nezza, Giampiero Palatucci, and Enrico Valdinoci. Hitchhiker’s
1036
+ guide to the fractional sobolev spaces. Bulletin des sciences mathématiques,
1037
+ 136(5):521–573, 2012.
1038
+ [8] Lawrence C Evans. Partial differential equations, volume 19. American Math-
1039
+ ematical Soc., 2010.
1040
+ [9] Xiaobing Feng and Andreas Prohl. Analysis of gradient flow of a regular-
1041
+ ized mumford-shah functional for image segmentation and image inpaint-
1042
+ ing. ESAIM: Mathematical Modelling and Numerical Analysis, 38(2):291–320,
1043
+ 2004.
1044
+ 13
1045
+
1046
+ [10] Gilles A Francfort and J-J Marigo.
1047
+ Revisiting brittle fracture as an en-
1048
+ ergy minimization problem. Journal of the Mechanics and Physics of Solids,
1049
+ 46(8):1319–1342, 1998.
1050
+ [11] Thierry Gallouet and Alexis Monier. On the regularity of solutions to elliptic
1051
+ equations. Rend. Mat. Appl.(7), 19(4):471–488, 1999.
1052
+ [12] David Gilbarg, Neil S Trudinger, David Gilbarg, and NS Trudinger. Elliptic
1053
+ partial differential equations of second order, volume 224. Springer, 1977.
1054
+ [13] Jacques-Louis Lions.
1055
+ Quelques méthodes de résolution de problemes aux
1056
+ limites non linéaires. 1969.
1057
+ [14] Jacques Louis Lions and Enrico Magenes. Non-homogeneous boundary value
1058
+ problems and applications: Vol. 1, volume 181. Springer Science & Business
1059
+ Media, 2012.
1060
+ [15] Norman G Meyers. An Lp-estimate for the gradient of solutions of second
1061
+ order elliptic divergence equations. Annali della Scuola Normale Superiore di
1062
+ Pisa-Classe di Scienze, 17(3):189–206, 1963.
1063
+ [16] David Bryant Mumford and Jayant Shah. Optimal approximations by piece-
1064
+ wise smooth functions and associated variational problems. Communications
1065
+ on pure and applied mathematics, 1989.
1066
+ [17] Jindrich Necas. Les méthodes directes en théorie des équations elliptiques.
1067
+ 1967.
1068
+ [18] Jacques Simon. Compact sets in the space Lp(0, T; B). Annali di Matematica
1069
+ pura ed applicata, 146(1):65–96, 1986.
1070
+ [19] Michael Struwe. Geometric evolution problems. Nonlinear partial differential
1071
+ equations in differential geometry, 2:257–339, 1996.
1072
+ 14
1073
+
EdFRT4oBgHgl3EQfyTjG/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf,len=351
2
+ page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
3
+ page_content='13645v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
4
+ page_content='AP] 31 Jan 2023 Existence, uniqueness and L2 t(H2 x) ∩ L∞ t (H1 x) ∩ H1 t (L2 x) regularity of the gradient flow of the Ambrosio-Tortorelli functional Tommaso Cortopassi ∗ Abstract We consider the gradient flow of the Ambrosio-Tortorelli functional at fixed ǫ > 0, proving existence and uniqueness of a solution in dimension 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
5
+ page_content=' The strategy of the proof essentially follows the one in the first part of [9], but as suggested in a footnote by the authors of [9] it employs a different and simpler technique, which is used for a different equation in [4] and in the end it allows to prove better estimates than the ones obtained in the original article.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
6
+ page_content=' In particular we prove that if U ⊂ R2 is a bounded Lipshitz domain, the initial data (u0, z0) ∈ [H1(U)]2 and 0 ≤ z0 ≤ 1, then for every T > 0 there exists a unique gradient flow (u(t), z(t)) of the Ambrosio-Tortorelli functional such that (u, z) ∈ [L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
7
+ page_content=' H2(U)) ∩ L∞(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
8
+ page_content=' H1(U)) ∩ H1(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
9
+ page_content=' L2(U))]2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
10
+ page_content=' The basic difference from [9], as already said, is a better regularity result and a simpler proof: while in [9] they used a localization argument based on an idea by Struwe (see [19]), here crucial estimates on the fourth powers of the L4 t (L4 x) norms of the gradients will be obtained employing a suitable version of the Meyers theorem due to Gallouet and Monier (see [15], [11]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
11
+ page_content=' 1 Introduction The Mumford-Shah functional, introduced in [16], is defined as E(u, Γ) = 1 2 � U\\Γ |∇u|2 + (u − g)2dx + H1(Γ), (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
12
+ page_content='1) where u ∈ H1(U \\ Γ), U ⊂ R2 open and bounded, Γ ⊂ U is closed and H1 is the one dimensional Hausdorff measure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
13
+ page_content=' This functional has been extensively studied for its applications in image segmentation and fracture mechanics (a de- tailed survey can be found in [5]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
14
+ page_content=' The definition of the functional depends on the model it is used for, in (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
15
+ page_content='1) we gave the original definition of [16] which is suited ∗Scuola Normale Superiore, 56126 Pisa, Italy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
16
+ page_content=' E-mail: tommaso.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
17
+ page_content='cortopassi@sns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
18
+ page_content='it 1 for image segmentation models, the interested reader can see the seminal paper [10] about the fracture mechanic case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
19
+ page_content=' The idea is rather simple: given a gray image g (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
20
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
21
+ page_content=' a scalar function), we want to find (˜u, ˜Γ) such that (˜u, ˜Γ) = arg min u∈H1(U\\Γ) Γ⊂U closed E(u, Γ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
22
+ page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
23
+ page_content='2) In this case, the function u will approximate in a “smooth way” the image g while Γ will be the contour set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
24
+ page_content=' From a theoretical point of view, in [6] the authors proved the existence of a solution for (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
25
+ page_content='2) by restricting to functions u ∈ SBV (U) (see [1]) and Γ = Su, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
26
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
27
+ page_content=' the set of discontinuity jump points of u.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
28
+ page_content=' From a numerical point perspective, since the functional involves the measure of the singular jump set of a (unknown) function u, its direct numerical implemen- tation is often not possible or not feasible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
29
+ page_content=' A standard approach is to minimize a more regular functional proposed by Ambrosio and Tortorelli in [2] defined as 1 2 � U[(ηǫ + z2)|∇u|2 + (u − g)2] + � U �(1 − z)2 4ǫ + ǫ|∇z|2 � (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
30
+ page_content='3) which approximates the Mumford-Shah functional in Γ-convergence as ǫ → 0 in an even more general setting, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
31
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
32
+ page_content=' not being restricted to the 2 dimensional case, but considering U ⊂ Rn open and the (n − 1)-dimensional Hausdorff measure for Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
33
+ page_content=' The Ambrosio-Tortorelli functional is the way with which one usually finds (approximate) minima points for (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
34
+ page_content='1), in particular one can use a gradient flow approach for (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
35
+ page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
36
+ page_content=' As in [9], we’ll consider the gradient flow of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
37
+ page_content='3), which is given by \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 ∂tu = div((ηǫ + z2)∇u) − (u − g) in (0, T) × U ∂tz = 2ǫ∆z − z|∇u|2 + 1−z 2ǫ in (0, T) × U ∂ ∂nu = ∂ ∂nz = 0 in (0, T) × ∂U u(0, ·) = u0 and z(0, ·) = z0 in {0} × U (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
38
+ page_content='4) with ηǫ, ǫ > 0 fixed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
39
+ page_content=' As already mentioned, our goal is to study the existence, uniqueness and regularity of solutions of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
40
+ page_content='4), and the novelty lies in an approach (already suggested in a footnote of [9] and used in [4] for a different equation) which simplifies the proof of [9] while also gaining better regularity results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
41
+ page_content=' In particular in [9] they only manage to prove the L2 t(H2 x) regularity for a short time T1, since the crucial estimate in their argument is a local energy estimate, inspired by a technique due to Struwe [19], which only holds for sufficiently small times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
42
+ page_content=' 2 2 Main result Let U ⊂ R2 be a Lipshitz bounded domain and let (u0, z0) ∈ [H1(U)]2 with 0 ≤ z0 ≤ 1, g ∈ L2(U).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
43
+ page_content=' As stated in the introduction, we want to prove existence, uniqueness and regularity of solutions of the gradient flow of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
44
+ page_content='3), given by \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 ∂tu = div((ηǫ + z2)∇u) − (u − g) in (0, T) × U ∂tz = 2ǫ∆z − z|∇u|2 + 1−z 2ǫ ) in (0, T) × U ∂ ∂nu = ∂ ∂nz = 0 in (0, T) × ∂U u(0, ·) = u0 and z(0, ·) = z0 in {0} × U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
45
+ page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
46
+ page_content='1) However we will mostly work with the slightly modified system \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 ∂tu = div((ηǫ + φ(z)2)∇u) − (u − g) in (0, T) × U ∂tz = 2ǫ∆z − φ′(z)φ(z)|∇u|2 + 1−z 2ǫ in (0, T) × U ∂ ∂nu = ∂ ∂nz = 0 in (0, T) × ∂U u(0, ·) = u0 and z(0, ·) = z0 in {0} × U (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
47
+ page_content='2) where φ is a cutoff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
48
+ page_content=' The reason to introduce such a cutoff, as we will show later, is to bound the L∞ norm of ηǫ+φ(zN)2 when considering Galerkin approximations zN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
49
+ page_content=' To be precise, a good working definition of φ might be φ(s) = \uf8f1 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f3 −1 if s ≤ −1 s if − 1 < s < 2 2 if s ≥ 2 but you could actually do the cuts at levels −δ1 and 1 + δ2 for every δ1, δ2 > 0 and nothing would change.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
50
+ page_content=' A modified Ambrosio-Tortorelli functional (of which (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
51
+ page_content='2) is the gradient flow of) will be denoted as ATǫ and defined as ATǫ(u(t), z(t)) = 1 2 � U[(ηǫ+φ(z(t))2)|∇u(t)|2+(u(t)−g)2]+ � U �(1 − z(t))2 4ǫ + ǫ|∇z(t)|2 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
52
+ page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
53
+ page_content='3) First of all, let’s define the notion of strong solution, following [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
54
+ page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
55
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
56
+ page_content=' Strong solution A couple (u, z) is a strong solution of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
57
+ page_content='1) if it satisfies the system in a dis- tributional sense, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
58
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
59
+ page_content=' for every ψ ∈ H1(U) it holds � U u(t)ψdx = � U u0ψdx − � t 0 � U (ηǫ + z(s)2)⟨∇u(s), ∇ψ⟩dxds − � t 0 � U (u(s) − g)ψdxds � U z(t)ψdx = � U z0ψdx − 2ǫ � t 0 � U ⟨∇z(s), ∇ψ⟩dxds − � t 0 � U z(s)ψ|∇u(s)|2 + 1 − z(s) 2ǫ ψdxds for almost every t ∈ (0, T), and moreover we require (u, z) ∈ [L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
60
+ page_content=' H2(U)) ∩ L∞(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
61
+ page_content=' H1(U)) ∩ H1(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
62
+ page_content=' L2(U))]2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
63
+ page_content=' We start with a proposition which shows how a solution of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
64
+ page_content='2) is also solution of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
65
+ page_content='1) if 0 ≤ z0 ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
66
+ page_content=' 3 Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
67
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
68
+ page_content=' Let 0 ≤ z0 ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
69
+ page_content=' If a strong solution (u, z) for (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
70
+ page_content='2) exists, it must be 0 ≤ z(t, x) ≤ 1 for every t ∈ (0, T) and for a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
71
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
72
+ page_content=' x ∈ U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
73
+ page_content=' So (u, z) will also be a solution of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
74
+ page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
75
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
76
+ page_content=' If we test the equation for z in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
77
+ page_content='2) with z itself it gives 1 2 d dt||z(t)||2 L2(U) = −2ǫ||∇z(t)||2 L2(U)− � U φ′(z(t))φ(z(t))z(t)|∇u(t)|2ds+ � U 1 − z(t) 2ǫ z(t)ds (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
78
+ page_content='4) where the existence of the time derivative is ensured by Lions-Magenes lemma (see [14]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
79
+ page_content=' Consider f0(z) = max{−z, 0} and f1(z) = max{z − 1, 0}, so f0 and f1 are defined as f0(s) = \uf8f1 \uf8f2 \uf8f3 −s if s ≤ 0 0 if s > 0 and f1(s) = \uf8f1 \uf8f2 \uf8f3 0 if s ≤ 1 s − 1 if s > 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
80
+ page_content=' It’s easy to check that: 1 2 d dt||f0(z(t))||2 L2(U) = � U f0(z(t))f ′ 0(z(t))∂tz(t)dx = � U χ{z(t)<0} z(t) ∂tz(t)dx, and noting that χ{z(t)<0}z(t) = −f0(z(t)) is a Sobolev function (see Lemma 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
81
+ page_content='6 in [12]) we are allowed to use it as a test function in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
82
+ page_content='2), getting: 1 2 d dt||f0(z(t))||2 L2(U) = ⟨∂tz(t), −f0(z(t))⟩L2(U) = = χ{z(t)<0} � −2ǫ||∇z(t)||2 L2(U) − � U φ′(z(t))φ(z(t))z(t)|∇u(t)|2 + � U 1 − z(t) 2ǫ z(t) � ≤ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
83
+ page_content=' Since f0(z(0)) = 0 because z0 ≥ 0, we have f0(z(t)) ≡ 0 for every t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
84
+ page_content=' In the same way we can prove z ≤ 1 by considering f1(z(t)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
85
+ page_content=' The strategy will be to make use of Galerkin approximates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
86
+ page_content=' Consider an or- thogonal basis of H1(U) composed of eigenfunctions of −∆ on U with homoge- neous Neumann boundary conditions normalized with respect to the L2(U) norm, and denote it as {ei}i∈N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
87
+ page_content=' So \uf8f1 \uf8f2 \uf8f3 −∆ei = λiei in U ∂nei = 0 in ∂U and ||ei||L2(U) = 1 for every i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
88
+ page_content=' We want to find Galerkin approximates (uN, zN) such that they solve (in dis- tributional sense) in VN = Span({e1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
89
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
90
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
91
+ page_content=' , eN}) (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
92
+ page_content='5) the system 4 \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 ∂tuN = πN[div((ηǫ + φ(zN)2)∇uN)] − (uN − gN) in (0, T) × U ∂tzN = 2ǫ∆zN − πN[φ′(zN)φ(zN)|∇uN|2] + 1−zN 2ǫ in (0, T) × U ∂ ∂nuN = ∂ ∂nzN = 0 in (0, T) × ∂U uN(0, ·) = πN[u0] and zN(0, ·) = πN[z0] in {0} × U (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
93
+ page_content='6) with πN the orthogonal projection on VN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
94
+ page_content=' Notice that by the orthogonality properties of {ei}N i=1 this is actually a 2N system in u(1)(t), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
95
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
96
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
97
+ page_content=' , u(N)(t), z(1)(t), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
98
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
99
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
100
+ page_content=' , z(N)(t), with uN(t, x) = N � i=1 u(i)(t)ei(x) and zN(t, x) = N � i=1 z(i)(t)ei(x) which by Cauchy-Lipshitz admits a unique local solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
101
+ page_content=' Indeed if we test with ei it holds that: u(i)(t) = (u0)(i) − � t 0 �� U(ηǫ + φ(zN(s))2)⟨∇uN(s), ∇ei⟩ + (uN(s) − gN)eidx � ds z(i)(t) = (z0)(i) − � t 0 �� U 2ǫ⟨∇zN(s), ∇ei⟩ − φ′(zN(s))φ(zN(s))|∇uN(s)|2ei + 1 − zN(s) 2ǫ eidx � ds for every 1 ≤ i ≤ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
102
+ page_content=' However there are strong non linearities at play, so a priori for every N we only have a local solution in [0, tN) without being able to extend it immediately to [0, T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
103
+ page_content=' In order to gain existence in [0, T] of Galerkin approximates we’ll use the following a priori estimates, which hold in a slightly more general situation with less regular initial data: Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
104
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
105
+ page_content=' Existence of weak approximate solutions in [0,T] Given (u0, z0) ∈ [L2(U)]2 and a solution (uN, zN) of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
106
+ page_content='6) in VN, it holds sup 0≤t≤T[||uN(t)||2 L2(U)] + � T 0 ||∇uN(s)||2 L2(U)ds ≤ C and sup 0≤t≤T[||zN(t)||2 L2(U)] + � T 0 ||∇zN(s)||2 L2(U)ds ≤ C with C a positive constant independent of N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
107
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
108
+ page_content=' Test the equation for uN in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
109
+ page_content='6) with uN itself, to get d dt||uN||2 L2(U) = − � U(ηǫ + φ(zN)2)|∇uN|2dx − � U uN(uN − gN)dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
110
+ page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
111
+ page_content='7) So d dt||uN||2 L2(U) ≤ ||gN||L2(U)||uN||L2(U) ≤ ||g||L2(U)(1 + ||uN||2 L2(U)) 5 and you get uniform boundedness of ||uN||L2(U) by Gronwall’s lemma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
112
+ page_content=' Going back to (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
113
+ page_content='7) you easily conclude by integrating in time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
114
+ page_content=' The same holds if we test the equation in zN with zN itself, obtaining sup 0≤t≤T[||zN(t)||2 L2(U)] + 2ǫ � T 0 ≤ T 8ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
115
+ page_content=' By orthogonality of the ei s this can be rewritten as ||uN(t)||2 L2(U) = N � i=1 [u(i)(t)]2 and ||zN(t)||2 L2(U) = N � i=1 [z(i)(t)]2 so we cannot have a blow-up in finite time and we have thus proved existence up to time T for every T > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
116
+ page_content=' Now that we have existence of solutions of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
117
+ page_content='6) in [0, T] for every N, let’s prove stronger inequalities exploiting the variational characterization of the problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
118
+ page_content=' Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
119
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
120
+ page_content=' A priori energy estimates Assume (u0, z0) ∈ [H1(U)]2, then sup t∈[0,T] [ATǫ(uN(t), zN(t))] + � T 0 ||∂tuN(s)||2 L2(U) + ||∂tzN(s)||2 L2(U)ds = = sup t∈[0,T] [ATǫ(uN(t), zN(t))] + � T 0 ||πN[∇ATǫ(uN, zN)]||2 L2(U)ds ≤ ≲ ATǫ(u0, z0) + ||g||2 L2(U).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
121
+ page_content=' In particular we have (∂tuN, ∂tzN) ∈ [L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
122
+ page_content=' L2(U))]2 and (uN, zN) ∈ [L∞(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
123
+ page_content=' H1(U))]2, both uniformly bounded independently from N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
124
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
125
+ page_content=' Derive ATǫ(uN, zN) in time and get: d dtATǫ(uN, zN) = −||πN∇ATǫ||2 L2(U) = −||∂tuN||2 L2(U) − ||∂tzN||2 L2(U).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
126
+ page_content=' Integrating this equality: ATǫ(uN, zN) + � T 0 ||∂tuN(s)||2 L2(U) + ||∂tzN(s)||2 L2(U)ds = ATǫ(πNu0, πNz0) ≤ C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
127
+ page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
128
+ page_content='8) Notice that a priori we have no control in N for the quantity � U(ηǫ + πN[z0]2)|∇πN[u0]|2dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
129
+ page_content=' But since we truncated with |φ| ≤ 2 we have � U(ηǫ + 4)|∇πN[u0]|2dx ≤ � U(ηǫ + 4)|∇u0|2dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
130
+ page_content=' 6 At this point we still can’t prove the weak convergence of the non linear parts of the equation, in particular div((ηǫ+φ(zN)2)∇uN) and φ′(zN)φ(zN)|∇u|2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
131
+ page_content=' Stronger estimates are needed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
132
+ page_content=' In the next Proposition we’ll prove uniform L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
133
+ page_content=' H2(U)) boundedness of (uN, zN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
134
+ page_content=' Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
135
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
136
+ page_content=' Uniform L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
137
+ page_content=' H2(U)) estimates Let U ⊂ R2 be a bounded Lipshitz domain and let (u0, z0) ∈ [H1(U)]2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
138
+ page_content=' Consider solutions (uN, zN) of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
139
+ page_content='6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
140
+ page_content=' It holds that sup t∈[0,T] [||uN(t)||2 H1(U) + ||zN(t)||2 H1(U)] + � T 0 ||∆uN||2 L2(U) + ||∆zN||2 L2(U) ≤ C for some C > 0 independent of N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
141
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
142
+ page_content=' First of all, we want to prove an estimate like sup 0≤t≤T[ATǫ(uN(t), zN(t))] + � T 0 ||∆uN(s)||2 L2(U) + ||∆zN(s)||2 L2(U)ds ≲ ≲ C + � T 0 ||∇uN(s)||4 L4(U) + ||∇zN(s)||4 L4(U)ds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
143
+ page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
144
+ page_content='9) The idea is to expand the energy equality (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
145
+ page_content='8) obtained in Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
146
+ page_content='3 with ||πN∇ATǫ(uN, zN)||2 L2(U).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
147
+ page_content=' For the sake of readability we omit writing time dependence,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
148
+ page_content=' abbreviate φ(z) as φ and omit the subscripts too: |∇ATǫ(u,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
149
+ page_content=' z)|2 = ∂uATǫ(u,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
150
+ page_content=' z)2 + ∂zATǫ(u,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
151
+ page_content=' z)2 = = [(η + φ2)∆u + 2φφ′⟨∇z,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
152
+ page_content=' ∇u⟩ − (u − g)]2 + � 2ǫ∆z − φφ′|∇u|2 + 1 − z 2ǫ �2 = = (η + φ2)2(∆u)2 � �� � 1 + 4φ2(φ′)2⟨∇z,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
153
+ page_content=' ∇u⟩2 � �� � 2 +(u − g)2 + 4φφ′(η + φ2)⟨∇z,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
154
+ page_content=' ∇u⟩∆u � �� � 3 − −4φφ′(u − g)⟨∇z,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
155
+ page_content=' ∇u⟩ � �� � 4 −2(u − g)(η + φ2)∆u � �� � 5 + 4ǫ2(∆z)2 � �� � 6 +(φ′)2φ2|∇u|4 + (1 − z)2 4ǫ2 − −4ǫφφ′|∇u|2∆z � �� � 7 −φφ′(1 − z)|∇u|2 ǫ � �� � 8 + 2(1 − z)∆z � �� � 9 ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
156
+ page_content=' where we highlighted all the terms we will manipulate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
157
+ page_content=' The strategy is very simple: use estimates like ab ≥ − 1 2δa2 − δ 2b2 (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
158
+ page_content='10) to get 7 C ≥ sup 0≤t≤T[ATǫ(u(t), z(t))] + � T 0 ||πN∇ATǫ(uN(s), zN(s))||2 L2(U)ds ≳ sup 0≤t≤T[ATǫ(u(t), z(t))] + � T 0 ||∆uN(s)||2 L2(U) + ||∆zN(s)||2 L2(U)ds− − � T 0 ||∇uN(s)||4 L4(U) + ||∇zN(s)||4 L4(U)ds − 1, and from this recover the desired inequality (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
159
+ page_content='9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
160
+ page_content=' The idea is to use (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
161
+ page_content='10) with different suitable δ on all highlighted terms except 1 and 6 which will absorb squared laplacians.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
162
+ page_content=' You can easily see that each term can be estimated with a sum like in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
163
+ page_content='10) of two of the following quantities: A term −(∆u)2 and/or −(∆z)2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
164
+ page_content=' A term −|∇u|4 and/or −|∇z|4;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
165
+ page_content=' A term which by Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
166
+ page_content='3 we know to be uniformly bounded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
167
+ page_content=' The only tedious part (which we skip) is to choose δ wisely each time so that in the end you remain with c1(∆u)2 + c2(∆z)2 − C1|∇u|4 − C2|∇z|4 + h with c1, c2 > 0 and h a sum of functions in L∞(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
168
+ page_content=' L2(U)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
169
+ page_content=' In fact we can reduce to estimating the L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
170
+ page_content=' H2(U)) norm of uN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
171
+ page_content=' Testing the equation in zN with −∆zN in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
172
+ page_content='6) we get 1 2 d dt||∇zN||2 L2(U) = −2ǫ||∆zN||2 L2(U) + � U φ′(zN)φ(zN)|∇uN|2∆zNdx − � U 1 − zN 2ǫ ∆zNdx and from this, using again ab ≤ (δ/2)a2 + (1/2δ)b2, it’s clear we can estimate the L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
173
+ page_content=' H2(U)) norm of zN with the L4(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
174
+ page_content=' L4(U)) norm of ∇uN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
175
+ page_content=' Moreover by Gagliardo-Niremberg inequality: ||∇zN||4 L4(U) ≤ C(1 + ||∇zN||2 L2(U)||∇2zN||2 L2(U)) ≤ C(1 + ||∇2zN||2 L2(U)) thanks to the L∞(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
176
+ page_content=' H1(U)) estimates on zN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
177
+ page_content=' Then: � T 0 ||uN(t)||2 H2(U)dt ≲ 1 + sup t∈[0,T] [ATǫ(uN(t), zN(t))]+ + � T 0 ||∆uN(t)||2 L2(U) + ||∆zN(t)||2 L2(U)dt ≲ 1 + � T 0 ||∇uN(t)||4 L4(U) + ||∇zN(t)||4 L4(U)dt ≲ ≲ 1 + � T 0 ||∇uN(t)||4 L4(U) + ||∇2zN(t)||2 L2(U)dt ≲ 1 + � T 0 ||∇uN(t)||4 L4(U)dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
178
+ page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
179
+ page_content='11) 8 The goal will be to obtain an estimate like � T 0 ||∇uN(t)||4 L4(U) ≲ �� T 0 ||uN(t)||2 H2(U) �q/2 (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
180
+ page_content='12) for some q < 2 so that we can get uniform bounds in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
181
+ page_content='11) and conclude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
182
+ page_content=' Notice that in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
183
+ page_content='12) the estimate is non homogeneous, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
184
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
185
+ page_content=' we are estimating a fourth power with something of homogeneity q < 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
186
+ page_content=' The reason why this is possible is that the constants we are omitting in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
187
+ page_content='12) actually depend on uN in a way such that the homogeneity is preserved, as we will see later.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
188
+ page_content=' Considering the time fixed (we will thus omit writing the dependence on t for the moment) we focus on the first equation of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
189
+ page_content='6) and we consider uN the solution of: \uf8f1 \uf8f2 \uf8f3 −div((ηǫ + φ(zN)2)∇uN) = f ∂nuN = 0 where f = −∂tuN − (uN − gN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
190
+ page_content=' Now we use a procedure used in [4] and suggested as a possible alternative proof in a footnote in [9], that is using Meyers theorem (see [15]) to get H2 esti- mates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
191
+ page_content=' Meyers theorem was originally proved for homogeneous Dirichlet boundary conditions on ∂U, but in [11] it has been generalised (among others) to the case of homogeneous Neumann boundary conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
192
+ page_content=' The proof in [11] for the Neu- mann case consists in a series of strategies (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
193
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
194
+ page_content=' partition of unity, extension of the functions, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
195
+ page_content=') in order to go back to the case of the original Meyers theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
196
+ page_content=' In particular we want to use Theorem 2 in [11], so we consider G : L2 m(U) �→ H1 m(U) such that G(f) = ϕ with \uf8f1 \uf8f2 \uf8f3 −∆ϕ = f in U ∂nϕ = 0 in ∂U (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
197
+ page_content='13) and f ∈ L2 m(U) = � g ∈ L2(U) ���� � U g = 0 � ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
198
+ page_content=' ϕ ∈ H1 m(U) = H1(U) ∩ L2 m(U).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
199
+ page_content=' Notice that problem (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
200
+ page_content='13) admits a unique solution in H1 m(U) if and only if � U f = 0 (see [8]), which is our case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
201
+ page_content=' In particular it holds ⟨∇G(f), ∇φ⟩L2 = ⟨f, φ⟩L2 for all φ ∈ H1(U).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
202
+ page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
203
+ page_content='14) By Theorem 2 in [11] (up to multiplicative constants we are neglecting): ||∇uN||Lp(U) ≤ ||∇G(f)||Lp(U) for some p ∈ (2, +∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
204
+ page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
205
+ page_content='15) 9 Remark 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
206
+ page_content=' Actually, the precise statement of Theorem 2 in [11] would give the estimate: ||uN||W 1,p(U) ≲ ||f||W 1,q(U)′, with 1 p + 1 q = 1, 2 < p < +∞ and W 1,q(U)′ denoting the dual.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
207
+ page_content=' But then we readily have: ||f||W 1,q(U)′ = sup ||φ||W 1,q(U)=1 ⟨f, φ⟩L2 = sup ||φ||W 1,q(U)=1 ⟨∇G(f), ∇φ⟩L2 ≤ ≤ sup ||φ||W 1,q(U)=1 ||∇G(f)||Lp(U)||∇φ||Lq(U) ≤ ||∇G(f)||Lp(U) and so we have the estimate (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
208
+ page_content='15).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
209
+ page_content=' The reason why we consider ∇G(f) instead of dealing with f is because we’ll make use of Gagliardo-Niremberg inequality and estimates from elliptic regularity theory on ∇G(f).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
210
+ page_content=' Using Gagliardo-Niremberg inequality we get ||∇uN||Lp(U) ≤ C(1 + ||∇G(f)||2/p L2(U)||∇2G(f)|| p−2 p L2(U)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
211
+ page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
212
+ page_content='16) Now notice that up to modification by an additive constant we can consider without loss of generality −∇G(f) = (ηǫ + φ(zN)2)∇uN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
213
+ page_content=' Indeed: −∆G(f) = div(−∇G(f)) = f = div((ηǫ + φ(zN)2)∇uN), so −∇G(f) = (ηǫ + φ(zN)2)∇uN ∈ L∞(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
214
+ page_content=' L2(U)) thanks to Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
215
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
216
+ page_content=' Integrate in time the inequality (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
217
+ page_content='16) raised to the power 2p/(p − 2) to get: � T 0 ||∇uN(t)|| 2p p−2 Lp(U)dt ≲ 1 + � T 0 ||∇2G(f)(t)||2 L2(U)dt ≲ 1 + � T 0 ||f(t)||2 L2(U)dt ≤ C, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
218
+ page_content='17) where we used standard elliptic regularity theory to pass from the L2 norm of ∇2G(f) to the L2 norm of f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
219
+ page_content=' We can assume without loss of generality that 2 < p < 4, otherwise if we had p > 4 we could conclude directly by the above estimates, indeed 2p/(p − 2) < 4 and by Hölder, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
220
+ page_content='17) and the uniform L∞(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
221
+ page_content=' L2(U)) bounds on ∇uN: � T 0 ||∇uN(t)||4 L4(U)dt = � T 0 �� U |∇uN(t)| 2p p−2|∇uN(t)| 2p−8 p−2 dx � dt ≤ ≤ � T 0 ||∇uN(t)||2p/(p−2) Lp(U) �� U |∇uN(t)|2dx � p−4 p−2 dt ≤ ≤ C � T 0 ||∇uN(t)||2p/(p−2) Lp(U) dt ≤ C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
222
+ page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
223
+ page_content='18) Of course we also assume p ̸= 4, or the thesis would follow trivially.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
224
+ page_content=' So assume 2 < p < 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
225
+ page_content=' Applying again Gagliardo-Nirenberg, Hölder and (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
226
+ page_content='17): 10 � T 0 ||∇uN(t)||4 L4(U)dt ≤ � T 0 ||∇uN(t)||p Lp(U)||uN(t)||4−p H2(U)dt ≤ ≤ �� T 0 ||∇uN|| 2p p−2 Lp(U) � p−2 p �� T 0 ||uN||2 H2(U) � 4−p 2 ≲ �� T 0 ||uN||2 H2(U) � 4−p 2 , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
227
+ page_content='19) and we can conclude since (4 − p)/2 < 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
228
+ page_content=' Remark 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
229
+ page_content=' Notice the assumption n = 2 is needed in order to have the necessary Gagliardo-Niremberg estimates in the previous Proposition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
230
+ page_content=' Also, notice how the homogeneity of degree 4 is preserved both in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
231
+ page_content='18) and in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
232
+ page_content='19), where to conclude we uniformly bound some quantities depending on uN, namely ( � U |∇uN(t)|2dx) p−4 p−2 and �� T 0 ||∇uN|| 2p p−2 Lp(U) � p−2 p .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
233
+ page_content=' The estimates obtained in the previous Proposition actually yield uniform esti- mates of uN and zN in L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
234
+ page_content=' H2(U)) thanks to the classical fact that ||u||L2(U) + ||∆u||L2(U) is an equivalent norm for H2(U).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
235
+ page_content=' To recapitulate, we have (up to a subsequence we will not rename): \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 (uN, zN) weakly- ∗ converging in L∞(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
236
+ page_content=' H1(U)) (uN, zN) weakly converging in L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
237
+ page_content=' H2(U)) (∂tuN, ∂tzN) weakly converging in L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
238
+ page_content=' L2(U)) (uN, zN) converging in C(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
239
+ page_content=' L2(U)) (uN, zN) converging in the strong topology in L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
240
+ page_content=' H1(U)) (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
241
+ page_content='20) where the compact embeddings in C(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
242
+ page_content=' L2(U)) and L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
243
+ page_content=' H1(U)) are ob- tained by applying the Aubin-Lions lemma (see [3], [13], [18]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
244
+ page_content=' We are now ready to prove the main result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
245
+ page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
246
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
247
+ page_content=' Existence and uniqueness of strong solutions Let U ⊂ R2 be a bounded Lipshitz domain and let (u0, z0) ∈ [H1(U)]2 with 0 ≤ z0 ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
248
+ page_content=' Then there exists a unique strong solution (u, z) of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
249
+ page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
250
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
251
+ page_content=' Let (u, z) be the weak limit of (uN, zN) in L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
252
+ page_content=' H2(U)), let’s see how the pair is a solution of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
253
+ page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
254
+ page_content=' This will be sufficient to prove the thesis thanks to Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
255
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
256
+ page_content=' Let ψ ∈ VM = Span{e1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
257
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
258
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
259
+ page_content=' , eM} be a test function for (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
260
+ page_content='6) with N > M, so it holds: � U uN(t)ψ � �� � 1 = � U πN[u0]ψ − � t 0 � U(ηǫ + φ(zN)2)∇uN∇ψ � �� � 2 − � t 0 � U(uN − gN)ψ � U zN(t)ψ � �� � 3 = � U πN[z0]ψ − 2ǫ � t 0 � U ∇zN∇ψ − � t 0 � U φ′(zN)φ(zN)|∇uN|2ψ � �� � 4 + � t 0 � U 1 − zN 2ǫ ψ, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
261
+ page_content='21) 11 and we want to show we can pass to the limit in every highlighted term, since for the others it’s trivial by weak convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
262
+ page_content=' As for 1 and 3 , we can pass to the limit thanks to the compactness in C(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
263
+ page_content=' L2(U)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
264
+ page_content=' For 2 , we have (by dominated convergence) strong convergence in L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
265
+ page_content=' L2(U)) of (ηǫ + φ(zN)2), and weak convergence of ∇uN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
266
+ page_content=' So their product weakly converges and we can pass to the limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
267
+ page_content=' We already saw in Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
268
+ page_content='4 how ∇uN is uniformly bounded in L4(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
269
+ page_content=' L4(U)), which is the same as saying |∇uN|2 is uniformly bounded in L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
270
+ page_content=' L2(U)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
271
+ page_content=' Then, up to taking another subsequence, |∇uN|2 ⇀ |∇u|2 in L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
272
+ page_content=' L2(U)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
273
+ page_content=' Since φ′(zN)φ(zN) → φ′(z)φ(z) in the strong L2(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
274
+ page_content=' L2(U)) topology by dominated convergence, their product weakly converges and we can pass to the limit in 4 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
275
+ page_content=' It only remains to prove that (u, z) satisfy the homogeneous Neumann bound- ary conditions of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
276
+ page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
277
+ page_content=' To do that we first have to make sense of ∂nu for any u ∈ H2(U).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
278
+ page_content=' We define ∂n : H2(U) → H1/2(∂U) as ∂nu(ψ) = � U ∆uΨdx + � U ∇u∇Ψdx, where ψ ∈ H1/2(∂U) and Ψ ∈ H1(U) is an extension of ψ to the whole U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
279
+ page_content=' In particular Ψ will be chosen according to the trace extension operator, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
280
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
281
+ page_content=' Ψ = Eψ, where E is defined as: Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
282
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
283
+ page_content=' Trace extension operator, see [17] Given a bounded, Lipshitz domain Ω ⊂ Rn and 1 < p < +∞, there exists a linear and bounded trace extension operator E : W 1− 1 p ,p(∂Ω) → W 1,p(Ω) such that Tr(Eu) = u for every u ∈ W 1− 1 p(∂Ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
284
+ page_content=' The operator ∂n just defined is continuous, indeed using ||Eψ||H1(U) ≤ C||ψ||H1/2(U): ||∂nu||H1/2(∂U) = sup ψ∈H1/2(∂U) � ∂nu(ψ) ||ψ||H1/2(∂U) � ≤ ≤ C sup ψ∈H1/2(∂U) � 1 ||Eψ||H1(U) � U ∆uEψdx + � U ∇u∇Eψdx � ≤ ≤ C(||∆u||L2(U) + ||∇u||L2(U)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
285
+ page_content=' Moreover it is known that W 1−1/p,p(∂U) compactly embeds into Lp(∂U) (see [7]), so 12 ∂n : H2(U) �→ L2(∂U) is weak-strong continuous, meaning it sends weakly converging sequences in strong converging ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
286
+ page_content=' We have to prove that ∂nu(t) = ∂nz(t) = 0 for almost every t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
287
+ page_content=' Since the argument is the same we’ll only show that the boundary conditions hold for u, moreover for simplicity we assume that u(t) ∈ H2(U) for every t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
288
+ page_content=' By weak-strong continuity of ∂n we have that for every t ∈ [0, T], modulo a subsequence (which depends on t): uN(t) H2(U) −−−⇀ u(t) =⇒ ∂nuN(t) L2(∂U) −−−−→ ∂nu(t) as N → +∞, but since ∂nuN(t) ≡ 0 for every N we have the thesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
289
+ page_content=' References [1] Luigi Ambrosio, Nicola Fusco, and Diego Pallara.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
290
+ page_content=' Functions of bounded vari- ation and free discontinuity problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
291
+ page_content=' Courier Corporation, 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
292
+ page_content=' [2] Luigi Ambrosio and Vincenzo Maria Tortorelli.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
293
+ page_content=' Approximation of functional depending on jumps by elliptic functional via gamma-convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
294
+ page_content=' Commu- nications on Pure and Applied Mathematics, 43(8):999–1036, 1990.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
295
+ page_content=' [3] Jean-Pierre Aubin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
296
+ page_content=' Analyse mathematique-un theoreme de compacite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
297
+ page_content=' Comptes Rendus Hebdomadaires Des Seances De L Academie Des Sciences, 256(24):5042, 1963.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
298
+ page_content=' [4] John W Barrett, Xiaobing Feng, and Andreas Prohl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
299
+ page_content=' Convergence of a fully discrete finite element method for a degenerate parabolic system modelling nematic liquid crystals with variable degree of orientation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
300
+ page_content=' ESAIM: Mathe- matical Modelling and Numerical Analysis, 40(1):175–199, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
301
+ page_content=' [5] Guy David.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
302
+ page_content=' Singular sets of minimizers for the Mumford-Shah functional, volume 233.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
303
+ page_content=' Springer Science & Business Media, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
304
+ page_content=' [6] E De Giorgi, M Carriero, and A Leaci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
305
+ page_content=' Existence theorem for a minimum problem with free discontinuity set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
306
+ page_content=' Ennio De Giorgi, page 654, 1989.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
307
+ page_content=' [7] Eleonora Di Nezza, Giampiero Palatucci, and Enrico Valdinoci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
308
+ page_content=' Hitchhiker’s guide to the fractional sobolev spaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
309
+ page_content=' Bulletin des sciences mathématiques, 136(5):521–573, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
310
+ page_content=' [8] Lawrence C Evans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
311
+ page_content=' Partial differential equations, volume 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
312
+ page_content=' American Math- ematical Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
313
+ page_content=', 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
314
+ page_content=' [9] Xiaobing Feng and Andreas Prohl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
315
+ page_content=' Analysis of gradient flow of a regular- ized mumford-shah functional for image segmentation and image inpaint- ing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
316
+ page_content=' ESAIM: Mathematical Modelling and Numerical Analysis, 38(2):291–320, 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
317
+ page_content=' 13 [10] Gilles A Francfort and J-J Marigo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
318
+ page_content=' Revisiting brittle fracture as an en- ergy minimization problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
319
+ page_content=' Journal of the Mechanics and Physics of Solids, 46(8):1319–1342, 1998.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
320
+ page_content=' [11] Thierry Gallouet and Alexis Monier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
321
+ page_content=' On the regularity of solutions to elliptic equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
322
+ page_content=' Rend.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
323
+ page_content=' Mat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
324
+ page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
325
+ page_content=' (7), 19(4):471–488, 1999.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
326
+ page_content=' [12] David Gilbarg, Neil S Trudinger, David Gilbarg, and NS Trudinger.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
327
+ page_content=' Elliptic partial differential equations of second order, volume 224.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
328
+ page_content=' Springer, 1977.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
329
+ page_content=' [13] Jacques-Louis Lions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
330
+ page_content=' Quelques méthodes de résolution de problemes aux limites non linéaires.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
331
+ page_content=' 1969.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
332
+ page_content=' [14] Jacques Louis Lions and Enrico Magenes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
333
+ page_content=' Non-homogeneous boundary value problems and applications: Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
334
+ page_content=' 1, volume 181.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
335
+ page_content=' Springer Science & Business Media, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
336
+ page_content=' [15] Norman G Meyers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
337
+ page_content=' An Lp-estimate for the gradient of solutions of second order elliptic divergence equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
338
+ page_content=' Annali della Scuola Normale Superiore di Pisa-Classe di Scienze, 17(3):189–206, 1963.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
339
+ page_content=' [16] David Bryant Mumford and Jayant Shah.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
340
+ page_content=' Optimal approximations by piece- wise smooth functions and associated variational problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
341
+ page_content=' Communications on pure and applied mathematics, 1989.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
342
+ page_content=' [17] Jindrich Necas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
343
+ page_content=' Les méthodes directes en théorie des équations elliptiques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
344
+ page_content=' 1967.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
345
+ page_content=' [18] Jacques Simon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
346
+ page_content=' Compact sets in the space Lp(0, T;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
347
+ page_content=' B).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
348
+ page_content=' Annali di Matematica pura ed applicata, 146(1):65–96, 1986.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
349
+ page_content=' [19] Michael Struwe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
350
+ page_content=' Geometric evolution problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
351
+ page_content=' Nonlinear partial differential equations in differential geometry, 2:257–339, 1996.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
352
+ page_content=' 14' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdFRT4oBgHgl3EQfyTjG/content/2301.13645v1.pdf'}
FNAzT4oBgHgl3EQfG_ve/content/2301.01039v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:350856f94b90096539451654b8edb7d6007d12e9e58220b7c26483413afca626
3
+ size 181232
FNAzT4oBgHgl3EQfG_ve/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2413b839c4146c16b1fd2771e59687a36f42f15286f94be74f5c61fdc878195f
3
+ size 2162733